diff --git a/vertical-pod-autoscaler/.golangci.yaml b/vertical-pod-autoscaler/.golangci.yaml index 85dac3a4eb9d..e133a47b0c9a 100644 --- a/vertical-pod-autoscaler/.golangci.yaml +++ b/vertical-pod-autoscaler/.golangci.yaml @@ -26,6 +26,8 @@ linters: - name: empty-lines - name: use-errors-new - name: early-return + - name: redefines-builtin-id + - name: redundant-import-alias # Below lists the rules disabled diff --git a/vertical-pod-autoscaler/pkg/admission-controller/resource/handler.go b/vertical-pod-autoscaler/pkg/admission-controller/resource/handler.go index 3ea9d118b200..30988dde3f22 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/resource/handler.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/resource/handler.go @@ -19,7 +19,7 @@ package resource import ( "context" - v1 "k8s.io/api/admission/v1" + apiv1 "k8s.io/api/admission/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/metrics/admission" @@ -41,5 +41,5 @@ type Handler interface { // DisallowIncorrectObjects returns whether incorrect objects (eg. unparsable, not passing validations) should be disallowed by Admission Server. DisallowIncorrectObjects() bool // GetPatches returns patches for given AdmissionRequest - GetPatches(context.Context, *v1.AdmissionRequest) ([]PatchRecord, error) + GetPatches(context.Context, *apiv1.AdmissionRequest) ([]PatchRecord, error) } diff --git a/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/handler_test.go b/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/handler_test.go index 9cc69e876449..4a3145411259 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/handler_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/handler_test.go @@ -25,7 +25,7 @@ import ( "github.com/stretchr/testify/assert" admissionv1 "k8s.io/api/admission/v1" apiv1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" resource_admission "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/resource" @@ -184,7 +184,7 @@ func TestGetPatches(t *testing.T) { fvm := &fakeVpaMatcher{vpa: tc.vpa} h := NewResourceHandler(fppp, fvm, tc.calculators) patches, err := h.GetPatches(context.Background(), &admissionv1.AdmissionRequest{ - Resource: v1.GroupVersionResource{ + Resource: metav1.GroupVersionResource{ Version: "v1", }, Namespace: tc.namespace, diff --git a/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/handler.go b/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/handler.go index cce3a288b127..f197530b50cb 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/handler.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/handler.go @@ -22,8 +22,8 @@ import ( "errors" "fmt" - v1 "k8s.io/api/admission/v1" - corev1 "k8s.io/api/core/v1" + admissionv1 "k8s.io/api/admission/v1" + apiv1 "k8s.io/api/core/v1" apires "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" @@ -67,8 +67,8 @@ func (h *resourceHandler) DisallowIncorrectObjects() bool { } // GetPatches builds patches for VPA in given admission request. -func (h *resourceHandler) GetPatches(_ context.Context, ar *v1.AdmissionRequest) ([]resource.PatchRecord, error) { - raw, isCreate := ar.Object.Raw, ar.Operation == v1.Create +func (h *resourceHandler) GetPatches(_ context.Context, ar *admissionv1.AdmissionRequest) ([]resource.PatchRecord, error) { + raw, isCreate := ar.Object.Raw, ar.Operation == admissionv1.Create vpa, err := parseVPA(raw) if err != nil { return nil, err @@ -158,18 +158,18 @@ func ValidateVPA(vpa *vpa_types.VerticalPodAutoscaler, isCreate bool) error { return fmt.Errorf("unexpected Mode value %s", *mode) } } - for resource, min := range policy.MinAllowed { - if err := validateResourceResolution(resource, min); err != nil { + for resource, minAllowed := range policy.MinAllowed { + if err := validateResourceResolution(resource, minAllowed); err != nil { return fmt.Errorf("minAllowed: %v", err) } - max, found := policy.MaxAllowed[resource] - if found && max.Cmp(min) < 0 { + maxAllowed, found := policy.MaxAllowed[resource] + if found && maxAllowed.Cmp(minAllowed) < 0 { return fmt.Errorf("max resource for %v is lower than min", resource) } } - for resource, max := range policy.MaxAllowed { - if err := validateResourceResolution(resource, max); err != nil { + for resource, maxAllowed := range policy.MaxAllowed { + if err := validateResourceResolution(resource, maxAllowed); err != nil { return fmt.Errorf("maxAllowed: %v", err) } } @@ -193,11 +193,11 @@ func ValidateVPA(vpa *vpa_types.VerticalPodAutoscaler, isCreate bool) error { return nil } -func validateResourceResolution(name corev1.ResourceName, val apires.Quantity) error { +func validateResourceResolution(name apiv1.ResourceName, val apires.Quantity) error { switch name { - case corev1.ResourceCPU: + case apiv1.ResourceCPU: return validateCPUResolution(val) - case corev1.ResourceMemory: + case apiv1.ResourceMemory: return validateMemoryResolution(val) } return nil diff --git a/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/matcher_test.go b/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/matcher_test.go index 2b6785bdcb21..62ea33170b79 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/matcher_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/resource/vpa/matcher_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/assert" "go.uber.org/mock/gomock" appsv1 "k8s.io/api/apps/v1" - v1 "k8s.io/api/autoscaling/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -51,12 +51,12 @@ func TestGetMatchingVpa(t *testing.T) { Namespace: "default", }, } - targetRef := &v1.CrossVersionObjectReference{ + targetRef := &autoscalingv1.CrossVersionObjectReference{ Kind: sts.Kind, Name: sts.Name, APIVersion: sts.APIVersion, } - targetRefWithNoMatches := &v1.CrossVersionObjectReference{ + targetRefWithNoMatches := &autoscalingv1.CrossVersionObjectReference{ Kind: "ReplicaSet", Name: "rs", APIVersion: "apps/v1", diff --git a/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/types.go b/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/types.go index 0b27e9a6b425..65001ebb17f6 100644 --- a/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/types.go +++ b/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/types.go @@ -18,7 +18,7 @@ package v1 import ( autoscaling "k8s.io/api/autoscaling/v1" - v1 "k8s.io/api/core/v1" + apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -127,7 +127,7 @@ type EvictionRequirement struct { // Resources is a list of one or more resources that the condition applies // to. If more than one resource is given, the EvictionRequirement is fulfilled // if at least one resource meets `changeRequirement`. - Resources []v1.ResourceName `json:"resources" protobuf:"bytes,1,name=resources"` + Resources []apiv1.ResourceName `json:"resources" protobuf:"bytes,1,name=resources"` ChangeRequirement EvictionChangeRequirement `json:"changeRequirement" protobuf:"bytes,2,name=changeRequirement"` } @@ -210,17 +210,17 @@ type ContainerResourcePolicy struct { // Specifies the minimal amount of resources that will be recommended // for the container. The default is no minimum. // +optional - MinAllowed v1.ResourceList `json:"minAllowed,omitempty" protobuf:"bytes,3,rep,name=minAllowed,casttype=ResourceList,castkey=ResourceName"` + MinAllowed apiv1.ResourceList `json:"minAllowed,omitempty" protobuf:"bytes,3,rep,name=minAllowed,casttype=ResourceList,castkey=ResourceName"` // Specifies the maximum amount of resources that will be recommended // for the container. The default is no maximum. // +optional - MaxAllowed v1.ResourceList `json:"maxAllowed,omitempty" protobuf:"bytes,4,rep,name=maxAllowed,casttype=ResourceList,castkey=ResourceName"` + MaxAllowed apiv1.ResourceList `json:"maxAllowed,omitempty" protobuf:"bytes,4,rep,name=maxAllowed,casttype=ResourceList,castkey=ResourceName"` // Specifies the type of recommendations that will be computed // (and possibly applied) by VPA. // If not specified, the default of [ResourceCPU, ResourceMemory] will be used. // +patchStrategy=merge - ControlledResources *[]v1.ResourceName `json:"controlledResources,omitempty" patchStrategy:"merge" protobuf:"bytes,5,rep,name=controlledResources"` + ControlledResources *[]apiv1.ResourceName `json:"controlledResources,omitempty" patchStrategy:"merge" protobuf:"bytes,5,rep,name=controlledResources"` // Specifies which resource values should be controlled. // The default is "RequestsAndLimits". @@ -298,17 +298,17 @@ type RecommendedContainerResources struct { // Name of the container. ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` // Recommended amount of resources. Observes ContainerResourcePolicy. - Target v1.ResourceList `json:"target" protobuf:"bytes,2,rep,name=target,casttype=ResourceList,castkey=ResourceName"` + Target apiv1.ResourceList `json:"target" protobuf:"bytes,2,rep,name=target,casttype=ResourceList,castkey=ResourceName"` // Minimum recommended amount of resources. Observes ContainerResourcePolicy. // This amount is not guaranteed to be sufficient for the application to operate in a stable way, however // running with less resources is likely to have significant impact on performance/availability. // +optional - LowerBound v1.ResourceList `json:"lowerBound,omitempty" protobuf:"bytes,3,rep,name=lowerBound,casttype=ResourceList,castkey=ResourceName"` + LowerBound apiv1.ResourceList `json:"lowerBound,omitempty" protobuf:"bytes,3,rep,name=lowerBound,casttype=ResourceList,castkey=ResourceName"` // Maximum recommended amount of resources. Observes ContainerResourcePolicy. // Any resources allocated beyond this value are likely wasted. This value may be larger than the maximum // amount of application is actually capable of consuming. // +optional - UpperBound v1.ResourceList `json:"upperBound,omitempty" protobuf:"bytes,4,rep,name=upperBound,casttype=ResourceList,castkey=ResourceName"` + UpperBound apiv1.ResourceList `json:"upperBound,omitempty" protobuf:"bytes,4,rep,name=upperBound,casttype=ResourceList,castkey=ResourceName"` // The most recent recommended resources target computed by the autoscaler // for the controlled pods, based only on actual resource usage, not taking // into account the ContainerResourcePolicy. @@ -317,7 +317,7 @@ type RecommendedContainerResources struct { // or higher that MaxAllowed). // Used only as status indication, will not affect actual resource assignment. // +optional - UncappedTarget v1.ResourceList `json:"uncappedTarget,omitempty" protobuf:"bytes,5,opt,name=uncappedTarget"` + UncappedTarget apiv1.ResourceList `json:"uncappedTarget,omitempty" protobuf:"bytes,5,opt,name=uncappedTarget"` } // VerticalPodAutoscalerConditionType are the valid conditions of @@ -348,7 +348,7 @@ type VerticalPodAutoscalerCondition struct { // type describes the current condition Type VerticalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` // status is the status of the condition (True, False, Unknown) - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + Status apiv1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional diff --git a/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1/types.go b/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1/types.go index 70eab4ce3609..9e3c091574da 100644 --- a/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1/types.go +++ b/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1/types.go @@ -18,7 +18,7 @@ limitations under the License. package v1beta1 import ( - v1 "k8s.io/api/core/v1" + apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -131,11 +131,11 @@ type ContainerResourcePolicy struct { // Specifies the minimal amount of resources that will be recommended // for the container. The default is no minimum. // +optional - MinAllowed v1.ResourceList `json:"minAllowed,omitempty" protobuf:"bytes,3,rep,name=minAllowed,casttype=ResourceList,castkey=ResourceName"` + MinAllowed apiv1.ResourceList `json:"minAllowed,omitempty" protobuf:"bytes,3,rep,name=minAllowed,casttype=ResourceList,castkey=ResourceName"` // Specifies the maximum amount of resources that will be recommended // for the container. The default is no maximum. // +optional - MaxAllowed v1.ResourceList `json:"maxAllowed,omitempty" protobuf:"bytes,4,rep,name=maxAllowed,casttype=ResourceList,castkey=ResourceName"` + MaxAllowed apiv1.ResourceList `json:"maxAllowed,omitempty" protobuf:"bytes,4,rep,name=maxAllowed,casttype=ResourceList,castkey=ResourceName"` } const ( @@ -187,17 +187,17 @@ type RecommendedContainerResources struct { // Name of the container. ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` // Recommended amount of resources. Observes ContainerResourcePolicy. - Target v1.ResourceList `json:"target" protobuf:"bytes,2,rep,name=target,casttype=ResourceList,castkey=ResourceName"` + Target apiv1.ResourceList `json:"target" protobuf:"bytes,2,rep,name=target,casttype=ResourceList,castkey=ResourceName"` // Minimum recommended amount of resources. Observes ContainerResourcePolicy. // This amount is not guaranteed to be sufficient for the application to operate in a stable way, however // running with less resources is likely to have significant impact on performance/availability. // +optional - LowerBound v1.ResourceList `json:"lowerBound,omitempty" protobuf:"bytes,3,rep,name=lowerBound,casttype=ResourceList,castkey=ResourceName"` + LowerBound apiv1.ResourceList `json:"lowerBound,omitempty" protobuf:"bytes,3,rep,name=lowerBound,casttype=ResourceList,castkey=ResourceName"` // Maximum recommended amount of resources. Observes ContainerResourcePolicy. // Any resources allocated beyond this value are likely wasted. This value may be larger than the maximum // amount of application is actually capable of consuming. // +optional - UpperBound v1.ResourceList `json:"upperBound,omitempty" protobuf:"bytes,4,rep,name=upperBound,casttype=ResourceList,castkey=ResourceName"` + UpperBound apiv1.ResourceList `json:"upperBound,omitempty" protobuf:"bytes,4,rep,name=upperBound,casttype=ResourceList,castkey=ResourceName"` // The most recent recommended resources target computed by the autoscaler // for the controlled pods, based only on actual resource usage, not taking // into account the ContainerResourcePolicy. @@ -206,7 +206,7 @@ type RecommendedContainerResources struct { // or higher that MaxAllowed). // Used only as status indication, will not affect actual resource assignment. // +optional - UncappedTarget v1.ResourceList `json:"uncappedTarget,omitempty" protobuf:"bytes,5,opt,name=uncappedTarget"` + UncappedTarget apiv1.ResourceList `json:"uncappedTarget,omitempty" protobuf:"bytes,5,opt,name=uncappedTarget"` } // VerticalPodAutoscalerConditionType are the valid conditions of @@ -231,7 +231,7 @@ type VerticalPodAutoscalerCondition struct { // type describes the current condition Type VerticalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` // status is the status of the condition (True, False, Unknown) - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + Status apiv1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional diff --git a/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2/types.go b/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2/types.go index 8cdac456d688..bbc030c26539 100644 --- a/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2/types.go +++ b/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2/types.go @@ -19,7 +19,7 @@ package v1beta2 import ( autoscaling "k8s.io/api/autoscaling/v1" - v1 "k8s.io/api/core/v1" + apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -150,11 +150,11 @@ type ContainerResourcePolicy struct { // Specifies the minimal amount of resources that will be recommended // for the container. The default is no minimum. // +optional - MinAllowed v1.ResourceList `json:"minAllowed,omitempty" protobuf:"bytes,3,rep,name=minAllowed,casttype=ResourceList,castkey=ResourceName"` + MinAllowed apiv1.ResourceList `json:"minAllowed,omitempty" protobuf:"bytes,3,rep,name=minAllowed,casttype=ResourceList,castkey=ResourceName"` // Specifies the maximum amount of resources that will be recommended // for the container. The default is no maximum. // +optional - MaxAllowed v1.ResourceList `json:"maxAllowed,omitempty" protobuf:"bytes,4,rep,name=maxAllowed,casttype=ResourceList,castkey=ResourceName"` + MaxAllowed apiv1.ResourceList `json:"maxAllowed,omitempty" protobuf:"bytes,4,rep,name=maxAllowed,casttype=ResourceList,castkey=ResourceName"` } const ( @@ -207,17 +207,17 @@ type RecommendedContainerResources struct { // Name of the container. ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` // Recommended amount of resources. Observes ContainerResourcePolicy. - Target v1.ResourceList `json:"target" protobuf:"bytes,2,rep,name=target,casttype=ResourceList,castkey=ResourceName"` + Target apiv1.ResourceList `json:"target" protobuf:"bytes,2,rep,name=target,casttype=ResourceList,castkey=ResourceName"` // Minimum recommended amount of resources. Observes ContainerResourcePolicy. // This amount is not guaranteed to be sufficient for the application to operate in a stable way, however // running with less resources is likely to have significant impact on performance/availability. // +optional - LowerBound v1.ResourceList `json:"lowerBound,omitempty" protobuf:"bytes,3,rep,name=lowerBound,casttype=ResourceList,castkey=ResourceName"` + LowerBound apiv1.ResourceList `json:"lowerBound,omitempty" protobuf:"bytes,3,rep,name=lowerBound,casttype=ResourceList,castkey=ResourceName"` // Maximum recommended amount of resources. Observes ContainerResourcePolicy. // Any resources allocated beyond this value are likely wasted. This value may be larger than the maximum // amount of application is actually capable of consuming. // +optional - UpperBound v1.ResourceList `json:"upperBound,omitempty" protobuf:"bytes,4,rep,name=upperBound,casttype=ResourceList,castkey=ResourceName"` + UpperBound apiv1.ResourceList `json:"upperBound,omitempty" protobuf:"bytes,4,rep,name=upperBound,casttype=ResourceList,castkey=ResourceName"` // The most recent recommended resources target computed by the autoscaler // for the controlled pods, based only on actual resource usage, not taking // into account the ContainerResourcePolicy. @@ -226,7 +226,7 @@ type RecommendedContainerResources struct { // or higher that MaxAllowed). // Used only as status indication, will not affect actual resource assignment. // +optional - UncappedTarget v1.ResourceList `json:"uncappedTarget,omitempty" protobuf:"bytes,5,opt,name=uncappedTarget"` + UncappedTarget apiv1.ResourceList `json:"uncappedTarget,omitempty" protobuf:"bytes,5,opt,name=uncappedTarget"` } // VerticalPodAutoscalerConditionType are the valid conditions of @@ -257,7 +257,7 @@ type VerticalPodAutoscalerCondition struct { // type describes the current condition Type VerticalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` // status is the status of the condition (True, False, Unknown) - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + Status apiv1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional diff --git a/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1/types.go b/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1/types.go index 1c46d5057895..4ddc77c79b86 100644 --- a/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1/types.go +++ b/vertical-pod-autoscaler/pkg/apis/poc.autoscaling.k8s.io/v1alpha1/types.go @@ -18,7 +18,7 @@ limitations under the License. package v1alpha1 import ( - v1 "k8s.io/api/core/v1" + apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -131,11 +131,11 @@ type ContainerResourcePolicy struct { // Specifies the minimal amount of resources that will be recommended // for the container. The default is no minimum. // +optional - MinAllowed v1.ResourceList `json:"minAllowed,omitempty" protobuf:"bytes,3,rep,name=minAllowed,casttype=ResourceList,castkey=ResourceName"` + MinAllowed apiv1.ResourceList `json:"minAllowed,omitempty" protobuf:"bytes,3,rep,name=minAllowed,casttype=ResourceList,castkey=ResourceName"` // Specifies the maximum amount of resources that will be recommended // for the container. The default is no maximum. // +optional - MaxAllowed v1.ResourceList `json:"maxAllowed,omitempty" protobuf:"bytes,4,rep,name=maxAllowed,casttype=ResourceList,castkey=ResourceName"` + MaxAllowed apiv1.ResourceList `json:"maxAllowed,omitempty" protobuf:"bytes,4,rep,name=maxAllowed,casttype=ResourceList,castkey=ResourceName"` } const ( @@ -187,17 +187,17 @@ type RecommendedContainerResources struct { // Name of the container. ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` // Recommended amount of resources. - Target v1.ResourceList `json:"target" protobuf:"bytes,2,rep,name=target,casttype=ResourceList,castkey=ResourceName"` + Target apiv1.ResourceList `json:"target" protobuf:"bytes,2,rep,name=target,casttype=ResourceList,castkey=ResourceName"` // Minimum recommended amount of resources. // This amount is not guaranteed to be sufficient for the application to operate in a stable way, however // running with less resources is likely to have significant impact on performance/availability. // +optional - LowerBound v1.ResourceList `json:"lowerBound,omitempty" protobuf:"bytes,3,rep,name=lowerBound,casttype=ResourceList,castkey=ResourceName"` + LowerBound apiv1.ResourceList `json:"lowerBound,omitempty" protobuf:"bytes,3,rep,name=lowerBound,casttype=ResourceList,castkey=ResourceName"` // Maximum recommended amount of resources. // Any resources allocated beyond this value are likely wasted. This value may be larger than the maximum // amount of application is actually capable of consuming. // +optional - UpperBound v1.ResourceList `json:"upperBound,omitempty" protobuf:"bytes,4,rep,name=upperBound,casttype=ResourceList,castkey=ResourceName"` + UpperBound apiv1.ResourceList `json:"upperBound,omitempty" protobuf:"bytes,4,rep,name=upperBound,casttype=ResourceList,castkey=ResourceName"` } // VerticalPodAutoscalerConditionType are the valid conditions of @@ -222,7 +222,7 @@ type VerticalPodAutoscalerCondition struct { // type describes the current condition Type VerticalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` // status is the status of the condition (True, False, Unknown) - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + Status apiv1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` // lastTransitionTime is the last time the condition transitioned from // one status to another // +optional diff --git a/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer_test.go b/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer_test.go index 9c7967462708..7d4ab5f42168 100644 --- a/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/checkpoint/checkpoint_writer_test.go @@ -25,7 +25,7 @@ import ( "github.com/stretchr/testify/assert" autoscalingv1 "k8s.io/api/autoscaling/v1" - v1 "k8s.io/api/core/v1" + apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -78,7 +78,7 @@ func addVpa(t *testing.T, cluster model.ClusterState, vpaID model.VpaID, selecto func TestMergeContainerStateForCheckpointDropsRecentMemoryPeak(t *testing.T) { cluster := model.NewClusterState(testGcPeriod) - cluster.AddOrUpdatePod(testPodID1, testLabels, v1.PodRunning) + cluster.AddOrUpdatePod(testPodID1, testLabels, apiv1.PodRunning) assert.NoError(t, cluster.AddOrUpdateContainer(testContainerID1, testRequest)) container := cluster.GetContainer(testContainerID1) @@ -120,7 +120,7 @@ func TestIsFetchingHistory(t *testing.T) { vpa.SetConditionsMap(map[vpa_types.VerticalPodAutoscalerConditionType]vpa_types.VerticalPodAutoscalerCondition{ vpa_types.FetchingHistory: { Type: vpa_types.FetchingHistory, - Status: v1.ConditionFalse, + Status: apiv1.ConditionFalse, }, }) return vpa @@ -133,7 +133,7 @@ func TestIsFetchingHistory(t *testing.T) { vpa.SetConditionsMap(map[vpa_types.VerticalPodAutoscalerConditionType]vpa_types.VerticalPodAutoscalerCondition{ vpa_types.FetchingHistory: { Type: vpa_types.FetchingHistory, - Status: v1.ConditionTrue, + Status: apiv1.ConditionTrue, }, }) return vpa @@ -214,7 +214,7 @@ func TestStoreCheckpointsMakesProgressEvenForCancelledContext(t *testing.T) { PodName: fmt.Sprintf("pod-%d", i), } podLabels := map[string]string{"app": fmt.Sprintf("pod-%d", i)} - clusterState.AddOrUpdatePod(podID, podLabels, v1.PodRunning) + clusterState.AddOrUpdatePod(podID, podLabels, apiv1.PodRunning) for j := range 2 { containerID := model.ContainerID{ PodID: podID, diff --git a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder_test.go b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder_test.go index c84e5d0e6636..d4200a32139e 100644 --- a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder_test.go @@ -26,7 +26,7 @@ import ( "github.com/stretchr/testify/assert" "go.uber.org/mock/gomock" autoscalingv1 "k8s.io/api/autoscaling/v1" - v1 "k8s.io/api/core/v1" + apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" @@ -106,7 +106,7 @@ func (cs *fakeClusterState) AddSample(sample *model.ContainerUsageSampleWithKey) return nil } -func (cs *fakeClusterState) AddOrUpdatePod(podID model.PodID, _ labels.Set, _ v1.PodPhase) { +func (cs *fakeClusterState) AddOrUpdatePod(podID model.PodID, _ labels.Set, _ apiv1.PodPhase) { cs.addedPods = append(cs.addedPods, podID) } @@ -858,7 +858,7 @@ func TestCanCleanupCheckpoints(t *testing.T) { client := fake.NewClientset() namespace := "testNamespace" - _, err := client.CoreV1().Namespaces().Create(tctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{}) + _, err := client.CoreV1().Namespaces().Create(tctx, &apiv1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{}) assert.NoError(t, err) vpaBuilder := test.VerticalPodAutoscaler().WithContainer("container").WithNamespace(namespace).WithTargetRef(&autoscalingv1.CrossVersionObjectReference{ diff --git a/vertical-pod-autoscaler/pkg/recommender/input/metrics/metrics_source.go b/vertical-pod-autoscaler/pkg/recommender/input/metrics/metrics_source.go index 3d848ea38d70..bc6eebc149c7 100644 --- a/vertical-pod-autoscaler/pkg/recommender/input/metrics/metrics_source.go +++ b/vertical-pod-autoscaler/pkg/recommender/input/metrics/metrics_source.go @@ -21,7 +21,7 @@ import ( "time" k8sapiv1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" "k8s.io/client-go/rest" @@ -35,7 +35,7 @@ import ( // PodMetricsLister wraps both metrics-client and External Metrics type PodMetricsLister interface { - List(ctx context.Context, namespace string, opts v1.ListOptions) (*v1beta1.PodMetricsList, error) + List(ctx context.Context, namespace string, opts metav1.ListOptions) (*v1beta1.PodMetricsList, error) } // podMetricsSource is the metrics-client source of metrics. @@ -48,7 +48,7 @@ func NewPodMetricsesSource(source resourceclient.PodMetricsesGetter) PodMetricsL return podMetricsSource{metricsGetter: source} } -func (s podMetricsSource) List(ctx context.Context, namespace string, opts v1.ListOptions) (*v1beta1.PodMetricsList, error) { +func (s podMetricsSource) List(ctx context.Context, namespace string, opts metav1.ListOptions) (*v1beta1.PodMetricsList, error) { podMetricsInterface := s.metricsGetter.PodMetricses(namespace) return podMetricsInterface.List(ctx, opts) } @@ -81,7 +81,7 @@ func NewExternalClient(c *rest.Config, clusterState model.ClusterState, options } } -func (s *externalMetricsClient) List(ctx context.Context, namespace string, opts v1.ListOptions) (*v1beta1.PodMetricsList, error) { +func (s *externalMetricsClient) List(ctx context.Context, namespace string, opts metav1.ListOptions) (*v1beta1.PodMetricsList, error) { result := v1beta1.PodMetricsList{} for _, vpa := range s.clusterState.VPAs() { @@ -103,9 +103,9 @@ func (s *externalMetricsClient) List(ctx context.Context, namespace string, opts } selector := vpa.PodSelector.Add(*podNameReq) podMets := v1beta1.PodMetrics{ - TypeMeta: v1.TypeMeta{}, - ObjectMeta: v1.ObjectMeta{Namespace: vpa.ID.Namespace, Name: pod.PodName}, - Window: v1.Duration{}, + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{Namespace: vpa.ID.Namespace, Name: pod.PodName}, + Window: metav1.Duration{}, Containers: make([]v1beta1.ContainerMetrics, 0), } // Query each resource in turn, then assemble back to a single []ContainerMetrics. @@ -122,7 +122,7 @@ func (s *externalMetricsClient) List(ctx context.Context, namespace string, opts klog.V(4).InfoS("External Metrics Query for VPA", "vpa", klog.KRef(vpa.ID.Namespace, vpa.ID.VpaName), "resource", resourceName, "metric", metricName, "itemCount", len(m.Items), "firstItem", m.Items[0]) podMets.Timestamp = m.Items[0].Timestamp if m.Items[0].WindowSeconds != nil { - podMets.Window = v1.Duration{Duration: time.Duration(*m.Items[0].WindowSeconds) * time.Second} + podMets.Window = metav1.Duration{Duration: time.Duration(*m.Items[0].WindowSeconds) * time.Second} } for _, val := range m.Items { ctrName, hasCtrName := val.MetricLabels[s.options.ContainerNameLabel] diff --git a/vertical-pod-autoscaler/pkg/recommender/input/oom/observer_test.go b/vertical-pod-autoscaler/pkg/recommender/input/oom/observer_test.go index 7f0dc706b610..69a64b8f4f08 100644 --- a/vertical-pod-autoscaler/pkg/recommender/input/oom/observer_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/input/oom/observer_test.go @@ -21,7 +21,7 @@ import ( "time" "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" + apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -34,7 +34,7 @@ var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) func init() { - utilruntime.Must(v1.AddToScheme(scheme)) + utilruntime.Must(apiv1.AddToScheme(scheme)) } const pod1Yaml = ` @@ -77,22 +77,22 @@ status: reason: OOMKilled ` -func newPod(yaml string) (*v1.Pod, error) { +func newPod(yaml string) (*apiv1.Pod, error) { decode := codecs.UniversalDeserializer().Decode obj, _, err := decode([]byte(yaml), nil, nil) if err != nil { return nil, err } - return obj.(*v1.Pod), nil + return obj.(*apiv1.Pod), nil } -func newEvent(yaml string) (*v1.Event, error) { +func newEvent(yaml string) (*apiv1.Event, error) { decode := codecs.UniversalDeserializer().Decode obj, _, err := decode([]byte(yaml), nil, nil) if err != nil { return nil, err } - return obj.(*v1.Event), nil + return obj.(*apiv1.Event), nil } func TestOOMReceived(t *testing.T) { @@ -105,8 +105,8 @@ func TestOOMReceived(t *testing.T) { testCases := []struct { desc string - oldPod *v1.Pod - newPod *v1.Pod + oldPod *apiv1.Pod + newPod *apiv1.Pod wantOOMInfo OomInfo }{ { @@ -127,7 +127,7 @@ func TestOOMReceived(t *testing.T) { }, { desc: "Old pod does not set memory requests", - oldPod: func() *v1.Pod { + oldPod: func() *apiv1.Pod { oldPod := p1.DeepCopy() oldPod.Spec.Containers[0].Resources.Requests = nil oldPod.Status.ContainerStatuses[0].Resources = nil @@ -148,11 +148,11 @@ func TestOOMReceived(t *testing.T) { }, { desc: "Old pod also set memory request in containerStatus, prefer info from containerStatus", - oldPod: func() *v1.Pod { + oldPod: func() *apiv1.Pod { oldPod := p1.DeepCopy() - oldPod.Status.ContainerStatuses[0].Resources = &v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceMemory: resource.MustParse("2048"), + oldPod.Status.ContainerStatuses[0].Resources = &apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("2048"), }, } return oldPod diff --git a/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client.go b/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client.go index 0076809e7ad3..d6249d545339 100644 --- a/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client.go +++ b/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client.go @@ -17,7 +17,7 @@ limitations under the License. package spec import ( - v1 "k8s.io/api/core/v1" + apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" v1lister "k8s.io/client-go/listers/core/v1" @@ -36,7 +36,7 @@ type BasicPodSpec struct { // List of init containers within this pod. InitContainers []BasicContainerSpec // PodPhase describing current life cycle phase of the Pod. - Phase v1.PodPhase + Phase apiv1.PodPhase } // BasicContainerSpec contains basic information defining a container. @@ -81,7 +81,7 @@ func (client *specClient) GetPodSpecs() ([]*BasicPodSpec, error) { return podSpecs, nil } -func newBasicPodSpec(pod *v1.Pod) *BasicPodSpec { +func newBasicPodSpec(pod *apiv1.Pod) *BasicPodSpec { containerSpecs := newContainerSpecs(pod, pod.Spec.Containers, false /* isInitContainer */) initContainerSpecs := newContainerSpecs(pod, pod.Spec.InitContainers, true /* isInitContainer */) @@ -95,7 +95,7 @@ func newBasicPodSpec(pod *v1.Pod) *BasicPodSpec { return basicPodSpec } -func newContainerSpecs(pod *v1.Pod, containers []v1.Container, isInitContainer bool) []BasicContainerSpec { +func newContainerSpecs(pod *apiv1.Pod, containers []apiv1.Container, isInitContainer bool) []BasicContainerSpec { var containerSpecs []BasicContainerSpec for _, container := range containers { containerSpec := newContainerSpec(pod, container, isInitContainer) @@ -104,7 +104,7 @@ func newContainerSpecs(pod *v1.Pod, containers []v1.Container, isInitContainer b return containerSpecs } -func newContainerSpec(pod *v1.Pod, container v1.Container, isInitContainer bool) BasicContainerSpec { +func newContainerSpec(pod *apiv1.Pod, container apiv1.Container, isInitContainer bool) BasicContainerSpec { containerSpec := BasicContainerSpec{ ID: model.ContainerID{ PodID: podID(pod), @@ -116,17 +116,17 @@ func newContainerSpec(pod *v1.Pod, container v1.Container, isInitContainer bool) return containerSpec } -func calculateRequestedResources(pod *v1.Pod, container v1.Container, isInitContainer bool) model.Resources { +func calculateRequestedResources(pod *apiv1.Pod, container apiv1.Container, isInitContainer bool) model.Resources { requestsAndLimitsFn := resourcehelpers.ContainerRequestsAndLimits if isInitContainer { requestsAndLimitsFn = resourcehelpers.InitContainerRequestsAndLimits } requests, _ := requestsAndLimitsFn(container.Name, pod) - cpuQuantity := requests[v1.ResourceCPU] + cpuQuantity := requests[apiv1.ResourceCPU] cpuMillicores := cpuQuantity.MilliValue() - memoryQuantity := requests[v1.ResourceMemory] + memoryQuantity := requests[apiv1.ResourceMemory] memoryBytes := memoryQuantity.Value() return model.Resources{ @@ -135,7 +135,7 @@ func calculateRequestedResources(pod *v1.Pod, container v1.Container, isInitCont } } -func podID(pod *v1.Pod) model.PodID { +func podID(pod *apiv1.Pod) model.PodID { return model.PodID{ PodName: pod.Name, Namespace: pod.Namespace, diff --git a/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client_test_util.go b/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client_test_util.go index 3e55e3cb87be..29749b2473b4 100644 --- a/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client_test_util.go +++ b/vertical-pod-autoscaler/pkg/recommender/input/spec/spec_client_test_util.go @@ -20,7 +20,7 @@ import ( "fmt" "github.com/stretchr/testify/mock" - v1 "k8s.io/api/core/v1" + apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -34,7 +34,7 @@ var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) func init() { - utilruntime.Must(v1.AddToScheme(scheme)) + utilruntime.Must(apiv1.AddToScheme(scheme)) } const pod1Yaml = ` @@ -123,9 +123,9 @@ type podListerMock struct { mock.Mock } -func (m *podListerMock) List(selector labels.Selector) (ret []*v1.Pod, err error) { +func (m *podListerMock) List(selector labels.Selector) (ret []*apiv1.Pod, err error) { args := m.Called() - return args.Get(0).([]*v1.Pod), args.Error(1) + return args.Get(0).([]*apiv1.Pod), args.Error(1) } func (m *podListerMock) Pods(namespace string) v1lister.PodNamespaceLister { @@ -196,19 +196,19 @@ func (tc *specClientTestCase) createFakeSpecClient() SpecClient { return NewSpecClient(podListerMock) } -func (tc *specClientTestCase) getFakePods() []*v1.Pod { - pods := []*v1.Pod{} +func (tc *specClientTestCase) getFakePods() []*apiv1.Pod { + pods := []*apiv1.Pod{} for _, yaml := range tc.podYamls { pods = append(pods, newPod(yaml)) } return pods } -func newPod(yaml string) *v1.Pod { +func newPod(yaml string) *apiv1.Pod { decode := codecs.UniversalDeserializer().Decode obj, _, err := decode([]byte(yaml), nil, nil) if err != nil { fmt.Printf("%#v", err) } - return obj.(*v1.Pod) + return obj.(*apiv1.Pod) } diff --git a/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state_test.go b/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state_test.go index fd0e2f79a7f6..be67d81ffff1 100644 --- a/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/model/aggregate_container_state_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/assert" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/labels" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/util" diff --git a/vertical-pod-autoscaler/pkg/recommender/model/cluster.go b/vertical-pod-autoscaler/pkg/recommender/model/cluster.go index 4bf55e867a06..4f0d792e47c0 100644 --- a/vertical-pod-autoscaler/pkg/recommender/model/cluster.go +++ b/vertical-pod-autoscaler/pkg/recommender/model/cluster.go @@ -24,7 +24,7 @@ import ( "time" apiv1 "k8s.io/api/core/v1" - labels "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/labels" "k8s.io/klog/v2" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" diff --git a/vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor_test.go b/vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor_test.go index ad79f5f0c86b..362cf3d27075 100644 --- a/vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/routines/cpu_integer_post_processor_test.go @@ -20,7 +20,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" + apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -170,7 +170,7 @@ func equalRecommendedPodResources(a, b *vpa_types.RecommendedPodResources) bool return true } -func equalResourceList(rla, rlb v1.ResourceList) bool { +func equalResourceList(rla, rlb apiv1.ResourceList) bool { if len(rla) != len(rlb) { return false } @@ -192,40 +192,40 @@ func equalResourceList(rla, rlb v1.ResourceList) bool { func TestSetIntegerCPURecommendation(t *testing.T) { tests := []struct { name string - recommendation v1.ResourceList - expectedRecommendation v1.ResourceList + recommendation apiv1.ResourceList + expectedRecommendation apiv1.ResourceList }{ { name: "unchanged", - recommendation: map[v1.ResourceName]resource.Quantity{ - v1.ResourceCPU: resource.MustParse("8"), - v1.ResourceMemory: resource.MustParse("6Gi"), + recommendation: map[apiv1.ResourceName]resource.Quantity{ + apiv1.ResourceCPU: resource.MustParse("8"), + apiv1.ResourceMemory: resource.MustParse("6Gi"), }, - expectedRecommendation: map[v1.ResourceName]resource.Quantity{ - v1.ResourceCPU: resource.MustParse("8"), - v1.ResourceMemory: resource.MustParse("6Gi"), + expectedRecommendation: map[apiv1.ResourceName]resource.Quantity{ + apiv1.ResourceCPU: resource.MustParse("8"), + apiv1.ResourceMemory: resource.MustParse("6Gi"), }, }, { name: "round up from 0.1", - recommendation: map[v1.ResourceName]resource.Quantity{ - v1.ResourceCPU: resource.MustParse("8.1"), - v1.ResourceMemory: resource.MustParse("6Gi"), + recommendation: map[apiv1.ResourceName]resource.Quantity{ + apiv1.ResourceCPU: resource.MustParse("8.1"), + apiv1.ResourceMemory: resource.MustParse("6Gi"), }, - expectedRecommendation: map[v1.ResourceName]resource.Quantity{ - v1.ResourceCPU: resource.MustParse("9"), - v1.ResourceMemory: resource.MustParse("6Gi"), + expectedRecommendation: map[apiv1.ResourceName]resource.Quantity{ + apiv1.ResourceCPU: resource.MustParse("9"), + apiv1.ResourceMemory: resource.MustParse("6Gi"), }, }, { name: "round up from 0.9", - recommendation: map[v1.ResourceName]resource.Quantity{ - v1.ResourceCPU: resource.MustParse("8.9"), - v1.ResourceMemory: resource.MustParse("6Gi"), + recommendation: map[apiv1.ResourceName]resource.Quantity{ + apiv1.ResourceCPU: resource.MustParse("8.9"), + apiv1.ResourceMemory: resource.MustParse("6Gi"), }, - expectedRecommendation: map[v1.ResourceName]resource.Quantity{ - v1.ResourceCPU: resource.MustParse("9"), - v1.ResourceMemory: resource.MustParse("6Gi"), + expectedRecommendation: map[apiv1.ResourceName]resource.Quantity{ + apiv1.ResourceCPU: resource.MustParse("9"), + apiv1.ResourceMemory: resource.MustParse("6Gi"), }, }, } diff --git a/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go b/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go index c86871fa952e..1883096c7fa0 100644 --- a/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go +++ b/vertical-pod-autoscaler/pkg/recommender/routines/recommender.go @@ -24,7 +24,7 @@ import ( "k8s.io/klog/v2" - v1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" + vpaautoscalingv1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" vpa_api "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/typed/autoscaling.k8s.io/v1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/checkpoint" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/input" @@ -80,7 +80,7 @@ func (r *recommender) GetClusterStateFeeder() input.ClusterStateFeeder { return r.clusterStateFeeder } -func processVPAUpdate(r *recommender, vpa *model.Vpa, observedVpa *v1.VerticalPodAutoscaler) { +func processVPAUpdate(r *recommender, vpa *model.Vpa, observedVpa *vpaautoscalingv1.VerticalPodAutoscaler) { resources := r.podResourceRecommender.GetRecommendedPodResources(GetContainerNameToAggregateStateMap(vpa)) had := vpa.HasRecommendation() @@ -120,7 +120,7 @@ func (r *recommender) UpdateVPAs() { defer cnt.Observe() // Create a channel to send VPA updates to workers - vpaUpdates := make(chan *v1.VerticalPodAutoscaler, len(r.clusterState.ObservedVPAs())) + vpaUpdates := make(chan *vpaautoscalingv1.VerticalPodAutoscaler, len(r.clusterState.ObservedVPAs())) // Create a wait group to wait for all workers to finish var wg sync.WaitGroup diff --git a/vertical-pod-autoscaler/pkg/recommender/routines/recommender_test.go b/vertical-pod-autoscaler/pkg/recommender/routines/recommender_test.go index 29d1dc2e13ee..5f63fdb01675 100644 --- a/vertical-pod-autoscaler/pkg/recommender/routines/recommender_test.go +++ b/vertical-pod-autoscaler/pkg/recommender/routines/recommender_test.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - v1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" + vpaautoscalingv1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" vpa_fake "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned/fake" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/logic" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/recommender/model" @@ -48,7 +48,7 @@ func TestProcessUpdateVPAsConcurrency(t *testing.T) { vpaCount := 1000 vpas := make(map[model.VpaID]*model.Vpa, vpaCount) - apiObjectVPAs := make([]*v1.VerticalPodAutoscaler, vpaCount) + apiObjectVPAs := make([]*vpaautoscalingv1.VerticalPodAutoscaler, vpaCount) fakedClient := make([]runtime.Object, vpaCount) for i := range vpaCount { @@ -97,7 +97,7 @@ func TestProcessUpdateVPAsConcurrency(t *testing.T) { defer cnt.Observe() // Create a channel to send VPA updates to workers - vpaUpdates := make(chan *v1.VerticalPodAutoscaler, len(apiObjectVPAs)) + vpaUpdates := make(chan *vpaautoscalingv1.VerticalPodAutoscaler, len(apiObjectVPAs)) var counter int64 @@ -196,7 +196,7 @@ func TestConcurrentAccessToSameVPA(t *testing.T) { err = r.clusterState.AddOrUpdateVpa(apiVpa, parsedSelector) assert.NoError(t, err, "Failed to add or update VPA in cluster state") - r.clusterState.SetObservedVPAs([]*v1.VerticalPodAutoscaler{apiVpa}) + r.clusterState.SetObservedVPAs([]*vpaautoscalingv1.VerticalPodAutoscaler{apiVpa}) // Now simulate multiple workers ALL processing the SAME VPA concurrently // This is the exact scenario that caused the production crash @@ -268,7 +268,7 @@ func TestConcurrentVPAMethodAccess(t *testing.T) { _ = vpa.AsStatus() _ = vpa.HasRecommendation() _ = vpa.HasMatchedPods() - _ = vpa.ConditionActive(v1.RecommendationProvided) + _ = vpa.ConditionActive(vpaautoscalingv1.RecommendationProvided) } }(w) } @@ -280,7 +280,7 @@ func TestConcurrentVPAMethodAccess(t *testing.T) { // by having multiple workers process overlapping sets of VPAs. func TestUpdateVPAsRaceCondition(t *testing.T) { vpaCount := 20 - apiObjectVPAs := make([]*v1.VerticalPodAutoscaler, vpaCount) + apiObjectVPAs := make([]*vpaautoscalingv1.VerticalPodAutoscaler, vpaCount) fakedClient := make([]runtime.Object, vpaCount) for i := range vpaCount { diff --git a/vertical-pod-autoscaler/pkg/recommender/util/histogram.go b/vertical-pod-autoscaler/pkg/recommender/util/histogram.go index eba0e46ee0dc..d4cd3517dd9b 100644 --- a/vertical-pod-autoscaler/pkg/recommender/util/histogram.go +++ b/vertical-pod-autoscaler/pkg/recommender/util/histogram.go @@ -228,14 +228,14 @@ func (h *histogram) SaveToChekpoint() (*vpa_types.HistogramCheckpoint, error) { } result.TotalWeight = h.totalWeight // Find max - max := 0. + maxWeight := 0. for bucket := h.minBucket; bucket <= h.maxBucket; bucket++ { - if h.bucketWeight[bucket] > max { - max = h.bucketWeight[bucket] + if h.bucketWeight[bucket] > maxWeight { + maxWeight = h.bucketWeight[bucket] } } // Compute ratio - ratio := float64(MaxCheckpointWeight) / max + ratio := float64(MaxCheckpointWeight) / maxWeight // Convert weights and drop near-zero weights for bucket := h.minBucket; bucket <= h.maxBucket; bucket++ { newWeight := uint32(round(h.bucketWeight[bucket] * ratio)) diff --git a/vertical-pod-autoscaler/pkg/target/controller_fetcher/controller_fetcher_test.go b/vertical-pod-autoscaler/pkg/target/controller_fetcher/controller_fetcher_test.go index 899e2cdc25ae..f7e000127425 100644 --- a/vertical-pod-autoscaler/pkg/target/controller_fetcher/controller_fetcher_test.go +++ b/vertical-pod-autoscaler/pkg/target/controller_fetcher/controller_fetcher_test.go @@ -27,7 +27,7 @@ import ( appsv1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" + apiv1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -79,8 +79,8 @@ func simpleControllerFetcher() *controllerFetcher { // return not found if if tries to find the scale subresource on bah scaleNamespacer.AddReactor("get", "bah", func(action core.Action) (handled bool, ret runtime.Object, err error) { groupResource := schema.GroupResource{} - error := apierrors.NewNotFound(groupResource, "Foo") - return true, nil, error + err = apierrors.NewNotFound(groupResource, "Foo") + return true, nil, err }) // resource that can scale @@ -277,7 +277,7 @@ func TestControllerFetcher(t *testing.T) { name: "rc no parent", key: &ControllerKeyWithAPIVersion{ControllerKey: ControllerKey{ Name: testReplicationController, Kind: "ReplicationController", Namespace: testNamespace}}, - objects: []runtime.Object{&corev1.ReplicationController{ + objects: []runtime.Object{&apiv1.ReplicationController{ TypeMeta: metav1.TypeMeta{ Kind: "ReplicationController", }, diff --git a/vertical-pod-autoscaler/pkg/updater/logic/updater.go b/vertical-pod-autoscaler/pkg/updater/logic/updater.go index f5cd045b8890..f4b43fefe648 100644 --- a/vertical-pod-autoscaler/pkg/updater/logic/updater.go +++ b/vertical-pod-autoscaler/pkg/updater/logic/updater.go @@ -44,8 +44,8 @@ import ( "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target" controllerfetcher "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target/controller_fetcher" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/updater/priority" - restriction "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/updater/restriction" - utils "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/updater/utils" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/updater/restriction" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/updater/utils" metrics_updater "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/metrics/updater" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/status" vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" diff --git a/vertical-pod-autoscaler/pkg/updater/logic/updater_test.go b/vertical-pod-autoscaler/pkg/updater/logic/updater_test.go index 9567adbc8c4e..b2f53a4594bf 100644 --- a/vertical-pod-autoscaler/pkg/updater/logic/updater_test.go +++ b/vertical-pod-autoscaler/pkg/updater/logic/updater_test.go @@ -26,7 +26,7 @@ import ( "github.com/stretchr/testify/assert" "go.uber.org/mock/gomock" "golang.org/x/time/rate" - v1 "k8s.io/api/autoscaling/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,8 +40,8 @@ import ( controllerfetcher "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target/controller_fetcher" target_mock "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target/mock" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/updater/priority" - restriction "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/updater/restriction" - utils "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/updater/utils" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/updater/restriction" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/updater/utils" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/status" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" ) @@ -252,7 +252,7 @@ func testRunOnceBase( podLister := &test.PodListerMock{} podLister.On("List").Return(pods, nil) - targetRef := &v1.CrossVersionObjectReference{ + targetRef := &autoscalingv1.CrossVersionObjectReference{ Kind: rc.Kind, Name: rc.Name, APIVersion: rc.APIVersion, @@ -393,7 +393,7 @@ func TestRunOnceIgnoreNamespaceMatchingPods(t *testing.T) { podLister := &test.PodListerMock{} podLister.On("List").Return(pods, nil) - targetRef := &v1.CrossVersionObjectReference{ + targetRef := &autoscalingv1.CrossVersionObjectReference{ Kind: rc.Kind, Name: rc.Name, APIVersion: rc.APIVersion, diff --git a/vertical-pod-autoscaler/pkg/updater/priority/scaling_direction_pod_eviction_admission_test.go b/vertical-pod-autoscaler/pkg/updater/priority/scaling_direction_pod_eviction_admission_test.go index e7039a8605d8..11d1b323d389 100644 --- a/vertical-pod-autoscaler/pkg/updater/priority/scaling_direction_pod_eviction_admission_test.go +++ b/vertical-pod-autoscaler/pkg/updater/priority/scaling_direction_pod_eviction_admission_test.go @@ -23,15 +23,15 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - v1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" + vpaautoscalingv1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" ) func TestLoopInit(t *testing.T) { - podEvictionRequirements := []*v1.EvictionRequirement{ + podEvictionRequirements := []*vpaautoscalingv1.EvictionRequirement{ { Resources: []corev1.ResourceName{corev1.ResourceCPU}, - ChangeRequirement: v1.TargetHigherThanRequests, + ChangeRequirement: vpaautoscalingv1.TargetHigherThanRequests, }, } container1Name := "test-container-1" @@ -44,7 +44,7 @@ func TestLoopInit(t *testing.T) { AddContainer(test.Container().WithName(container1Name).WithCPURequest(resource.MustParse("500m")).WithMemRequest(resource.MustParse("10Gi")).Get()). AddContainer(test.Container().WithName(container2Name).WithCPURequest(resource.MustParse("500m")).WithMemRequest(resource.MustParse("10Gi")).Get()). Get() - expectedEvictionRequirements := map[*corev1.Pod][]*v1.EvictionRequirement{ + expectedEvictionRequirements := map[*corev1.Pod][]*vpaautoscalingv1.EvictionRequirement{ pod: podEvictionRequirements, pod2: podEvictionRequirements, } @@ -53,7 +53,7 @@ func TestLoopInit(t *testing.T) { WithContainer(container1Name). WithEvictionRequirements(podEvictionRequirements). Get() - vpaToPodMap := map[*v1.VerticalPodAutoscaler][]*corev1.Pod{testVPA: {pod, pod2}} + vpaToPodMap := map[*vpaautoscalingv1.VerticalPodAutoscaler][]*corev1.Pod{testVPA: {pod, pod2}} t.Run("it should not require UpdateMode and EvictionRequirements.", func(t *testing.T) { sdpea := NewScalingDirectionPodEvictionAdmission() @@ -64,7 +64,7 @@ func TestLoopInit(t *testing.T) { WithContainer(container1Name). Get() - newVpaToPodMap := map[*v1.VerticalPodAutoscaler][]*corev1.Pod{newTestVPA: {pod, pod2}} + newVpaToPodMap := map[*vpaautoscalingv1.VerticalPodAutoscaler][]*corev1.Pod{newTestVPA: {pod, pod2}} sdpea.LoopInit(nil, newVpaToPodMap) assert.Len(t, sdpea.(*scalingDirectionPodEvictionAdmission).EvictionRequirements, 0) @@ -81,10 +81,10 @@ func TestLoopInit(t *testing.T) { sdpea := NewScalingDirectionPodEvictionAdmission() sdpea.LoopInit(nil, vpaToPodMap) - newPodEvictionRequirements := []*v1.EvictionRequirement{ + newPodEvictionRequirements := []*vpaautoscalingv1.EvictionRequirement{ { Resources: []corev1.ResourceName{corev1.ResourceMemory}, - ChangeRequirement: v1.TargetLowerThanRequests, + ChangeRequirement: vpaautoscalingv1.TargetLowerThanRequests, }, } newTestVPA := test.VerticalPodAutoscaler(). @@ -92,8 +92,8 @@ func TestLoopInit(t *testing.T) { WithContainer(container1Name). WithEvictionRequirements(newPodEvictionRequirements). Get() - newVpaToPodMap := map[*v1.VerticalPodAutoscaler][]*corev1.Pod{newTestVPA: {pod, pod2}} - newExpectedEvictionRequirements := map[*corev1.Pod][]*v1.EvictionRequirement{ + newVpaToPodMap := map[*vpaautoscalingv1.VerticalPodAutoscaler][]*corev1.Pod{newTestVPA: {pod, pod2}} + newExpectedEvictionRequirements := map[*corev1.Pod][]*vpaautoscalingv1.EvictionRequirement{ pod: newPodEvictionRequirements, pod2: newPodEvictionRequirements, } @@ -106,10 +106,10 @@ func TestLoopInit(t *testing.T) { sdpea := NewScalingDirectionPodEvictionAdmission() sdpea.LoopInit(nil, vpaToPodMap) - newPodEvictionRequirements := []*v1.EvictionRequirement{ + newPodEvictionRequirements := []*vpaautoscalingv1.EvictionRequirement{ { Resources: []corev1.ResourceName{corev1.ResourceMemory}, - ChangeRequirement: v1.TargetLowerThanRequests, + ChangeRequirement: vpaautoscalingv1.TargetLowerThanRequests, }, } newTestVPA := test.VerticalPodAutoscaler(). @@ -117,8 +117,8 @@ func TestLoopInit(t *testing.T) { WithContainer(container1Name). WithEvictionRequirements(newPodEvictionRequirements). Get() - newVpaToPodMap := map[*v1.VerticalPodAutoscaler][]*corev1.Pod{newTestVPA: {pod2}} - newExpectedEvictionRequirements := map[*corev1.Pod][]*v1.EvictionRequirement{ + newVpaToPodMap := map[*vpaautoscalingv1.VerticalPodAutoscaler][]*corev1.Pod{newTestVPA: {pod2}} + newExpectedEvictionRequirements := map[*corev1.Pod][]*vpaautoscalingv1.EvictionRequirement{ pod2: newPodEvictionRequirements, } sdpea.LoopInit(nil, newVpaToPodMap) @@ -143,10 +143,10 @@ func TestAdmitForSingleContainer(t *testing.T) { }) t.Run("it should admit a Pod for eviction if no resource request is present for a Pod", func(t *testing.T) { - evictionRequirements := map[*corev1.Pod][]*v1.EvictionRequirement{podWithoutRequests: { + evictionRequirements := map[*corev1.Pod][]*vpaautoscalingv1.EvictionRequirement{podWithoutRequests: { { Resources: []corev1.ResourceName{corev1.ResourceCPU}, - ChangeRequirement: v1.TargetHigherThanRequests, + ChangeRequirement: vpaautoscalingv1.TargetHigherThanRequests, }, }} sdpea := NewScalingDirectionPodEvictionAdmission() @@ -165,10 +165,10 @@ func TestAdmitForSingleContainer(t *testing.T) { WithMemRequest(resource.MustParse("1Gi")). WithMemLimit(resource.MustParse("1Gi")).Get()} - evictionRequirements := map[*corev1.Pod][]*v1.EvictionRequirement{ + evictionRequirements := map[*corev1.Pod][]*vpaautoscalingv1.EvictionRequirement{ podWithContainerStatus: { {Resources: []corev1.ResourceName{corev1.ResourceCPU, corev1.ResourceMemory}, - ChangeRequirement: v1.TargetHigherThanRequests, + ChangeRequirement: vpaautoscalingv1.TargetHigherThanRequests, }, }, } @@ -185,18 +185,18 @@ func TestAdmitForSingleContainer(t *testing.T) { t.Run("it should admit a Pod for eviction if no config is given", func(t *testing.T) { sdpea := NewScalingDirectionPodEvictionAdmission() - sdpea.(*scalingDirectionPodEvictionAdmission).EvictionRequirements = map[*corev1.Pod][]*v1.EvictionRequirement{pod: {}} + sdpea.(*scalingDirectionPodEvictionAdmission).EvictionRequirements = map[*corev1.Pod][]*vpaautoscalingv1.EvictionRequirement{pod: {}} recommendation := test.Recommendation().WithContainer(containerName).WithTarget("600m", "10Gi").Get() assert.Equal(t, true, sdpea.Admit(pod, recommendation)) }) t.Run("it should admit a Pod for eviction if Container CPU is scaled up and config allows scaling up CPU", func(t *testing.T) { - evictionRequirements := map[*corev1.Pod][]*v1.EvictionRequirement{ + evictionRequirements := map[*corev1.Pod][]*vpaautoscalingv1.EvictionRequirement{ pod: { { Resources: []corev1.ResourceName{corev1.ResourceCPU}, - ChangeRequirement: v1.TargetHigherThanRequests, + ChangeRequirement: vpaautoscalingv1.TargetHigherThanRequests, }, }, } @@ -208,11 +208,11 @@ func TestAdmitForSingleContainer(t *testing.T) { }) t.Run("it should admit a Pod for eviction if Container CPU is scaled down and config allows scaling down CPU", func(t *testing.T) { - evictionRequirements := map[*corev1.Pod][]*v1.EvictionRequirement{ + evictionRequirements := map[*corev1.Pod][]*vpaautoscalingv1.EvictionRequirement{ pod: { { Resources: []corev1.ResourceName{corev1.ResourceCPU}, - ChangeRequirement: v1.TargetLowerThanRequests, + ChangeRequirement: vpaautoscalingv1.TargetLowerThanRequests, }, }, } @@ -224,10 +224,10 @@ func TestAdmitForSingleContainer(t *testing.T) { }) t.Run("it should not admit a Pod for eviction if Container CPU is scaled down and config allows only scaling up CPU", func(t *testing.T) { - evictionRequirements := map[*corev1.Pod][]*v1.EvictionRequirement{ + evictionRequirements := map[*corev1.Pod][]*vpaautoscalingv1.EvictionRequirement{ pod: { {Resources: []corev1.ResourceName{corev1.ResourceCPU, corev1.ResourceMemory}, - ChangeRequirement: v1.TargetHigherThanRequests, + ChangeRequirement: vpaautoscalingv1.TargetHigherThanRequests, }, }, } @@ -239,10 +239,10 @@ func TestAdmitForSingleContainer(t *testing.T) { }) t.Run("it should admit a Pod for eviction even if Container CPU is scaled down and config allows only scaling up CPU, because memory is scaled up", func(t *testing.T) { - evictionRequirements := map[*corev1.Pod][]*v1.EvictionRequirement{ + evictionRequirements := map[*corev1.Pod][]*vpaautoscalingv1.EvictionRequirement{ pod: { {Resources: []corev1.ResourceName{corev1.ResourceCPU, corev1.ResourceMemory}, - ChangeRequirement: v1.TargetHigherThanRequests, + ChangeRequirement: vpaautoscalingv1.TargetHigherThanRequests, }, }, } @@ -254,11 +254,11 @@ func TestAdmitForSingleContainer(t *testing.T) { }) t.Run("it should admit a Pod for eviction if Container memory is scaled up and config allows scaling up memory", func(t *testing.T) { - evictionRequirements := map[*corev1.Pod][]*v1.EvictionRequirement{ + evictionRequirements := map[*corev1.Pod][]*vpaautoscalingv1.EvictionRequirement{ pod: { { Resources: []corev1.ResourceName{corev1.ResourceMemory}, - ChangeRequirement: v1.TargetHigherThanRequests, + ChangeRequirement: vpaautoscalingv1.TargetHigherThanRequests, }, }, } @@ -270,11 +270,11 @@ func TestAdmitForSingleContainer(t *testing.T) { }) t.Run("it should admit a Pod for eviction if Container memory is scaled down and config allows scaling down memory", func(t *testing.T) { - evictionRequirements := map[*corev1.Pod][]*v1.EvictionRequirement{ + evictionRequirements := map[*corev1.Pod][]*vpaautoscalingv1.EvictionRequirement{ pod: { { Resources: []corev1.ResourceName{corev1.ResourceMemory}, - ChangeRequirement: v1.TargetLowerThanRequests, + ChangeRequirement: vpaautoscalingv1.TargetLowerThanRequests, }, }, } @@ -286,11 +286,11 @@ func TestAdmitForSingleContainer(t *testing.T) { }) t.Run("it should not admit a Pod for eviction if Container memory is scaled down and config allows only scaling up memory", func(t *testing.T) { - evictionRequirements := map[*corev1.Pod][]*v1.EvictionRequirement{ + evictionRequirements := map[*corev1.Pod][]*vpaautoscalingv1.EvictionRequirement{ pod: { { Resources: []corev1.ResourceName{corev1.ResourceCPU, corev1.ResourceMemory}, - ChangeRequirement: v1.TargetHigherThanRequests, + ChangeRequirement: vpaautoscalingv1.TargetHigherThanRequests, }, }, } @@ -302,15 +302,15 @@ func TestAdmitForSingleContainer(t *testing.T) { }) t.Run("it should admit a Pod for eviction if Container CPU is scaled up, memory is scaled down and config allows scaling up CPU and scaling down memory", func(t *testing.T) { - evictionRequirements := map[*corev1.Pod][]*v1.EvictionRequirement{ + evictionRequirements := map[*corev1.Pod][]*vpaautoscalingv1.EvictionRequirement{ pod: { { Resources: []corev1.ResourceName{corev1.ResourceCPU}, - ChangeRequirement: v1.TargetHigherThanRequests, + ChangeRequirement: vpaautoscalingv1.TargetHigherThanRequests, }, { Resources: []corev1.ResourceName{corev1.ResourceMemory}, - ChangeRequirement: v1.TargetLowerThanRequests, + ChangeRequirement: vpaautoscalingv1.TargetLowerThanRequests, }, }, } @@ -322,11 +322,11 @@ func TestAdmitForSingleContainer(t *testing.T) { }) t.Run("it should not admit a Pod for eviction if Container CPU is scaled up and config allows only scaling down CPU", func(t *testing.T) { - evictionRequirements := map[*corev1.Pod][]*v1.EvictionRequirement{ + evictionRequirements := map[*corev1.Pod][]*vpaautoscalingv1.EvictionRequirement{ pod: { { Resources: []corev1.ResourceName{corev1.ResourceCPU}, - ChangeRequirement: v1.TargetLowerThanRequests, + ChangeRequirement: vpaautoscalingv1.TargetLowerThanRequests, }, }, } @@ -338,15 +338,15 @@ func TestAdmitForSingleContainer(t *testing.T) { }) t.Run("it should not admit a Pod for eviction if Container CPU is scaled up, memory is scaled down and config allows scaling up CPU and scaling up memory", func(t *testing.T) { - evictionRequirements := map[*corev1.Pod][]*v1.EvictionRequirement{ + evictionRequirements := map[*corev1.Pod][]*vpaautoscalingv1.EvictionRequirement{ pod: { { Resources: []corev1.ResourceName{corev1.ResourceCPU}, - ChangeRequirement: v1.TargetHigherThanRequests, + ChangeRequirement: vpaautoscalingv1.TargetHigherThanRequests, }, { Resources: []corev1.ResourceName{corev1.ResourceMemory}, - ChangeRequirement: v1.TargetHigherThanRequests, + ChangeRequirement: vpaautoscalingv1.TargetHigherThanRequests, }, }, } @@ -367,18 +367,18 @@ func TestAdmitForMultipleContainer(t *testing.T) { Get() t.Run("it should admit the Pod if both containers fulfill the EvictionRequirements", func(tt *testing.T) { - evictionRequirements := map[*corev1.Pod][]*v1.EvictionRequirement{ + evictionRequirements := map[*corev1.Pod][]*vpaautoscalingv1.EvictionRequirement{ pod: { { Resources: []corev1.ResourceName{corev1.ResourceCPU}, - ChangeRequirement: v1.TargetHigherThanRequests, + ChangeRequirement: vpaautoscalingv1.TargetHigherThanRequests, }, }, } sdpea := NewScalingDirectionPodEvictionAdmission() sdpea.(*scalingDirectionPodEvictionAdmission).EvictionRequirements = evictionRequirements - recommendation := &v1.RecommendedPodResources{ - ContainerRecommendations: []v1.RecommendedContainerResources{ + recommendation := &vpaautoscalingv1.RecommendedPodResources{ + ContainerRecommendations: []vpaautoscalingv1.RecommendedContainerResources{ test.Recommendation().WithContainer(container1Name).WithTarget("600m", "10Gi").GetContainerResources(), test.Recommendation().WithContainer(container2Name).WithTarget("700m", "10Gi").GetContainerResources(), }, @@ -388,18 +388,18 @@ func TestAdmitForMultipleContainer(t *testing.T) { }) t.Run("it should admit the Pod if only one container fulfills the EvictionRequirements", func(tt *testing.T) { - evictionRequirements := map[*corev1.Pod][]*v1.EvictionRequirement{ + evictionRequirements := map[*corev1.Pod][]*vpaautoscalingv1.EvictionRequirement{ pod: { { Resources: []corev1.ResourceName{corev1.ResourceCPU}, - ChangeRequirement: v1.TargetHigherThanRequests, + ChangeRequirement: vpaautoscalingv1.TargetHigherThanRequests, }, }, } sdpea := NewScalingDirectionPodEvictionAdmission() sdpea.(*scalingDirectionPodEvictionAdmission).EvictionRequirements = evictionRequirements - recommendation := &v1.RecommendedPodResources{ - ContainerRecommendations: []v1.RecommendedContainerResources{ + recommendation := &vpaautoscalingv1.RecommendedPodResources{ + ContainerRecommendations: []vpaautoscalingv1.RecommendedContainerResources{ test.Recommendation().WithContainer(container1Name).WithTarget("200m", "10Gi").GetContainerResources(), test.Recommendation().WithContainer(container2Name).WithTarget("700m", "10Gi").GetContainerResources(), }, @@ -409,18 +409,18 @@ func TestAdmitForMultipleContainer(t *testing.T) { }) t.Run("it should not admit the Pod if no container fulfills the EvictionRequirements", func(tt *testing.T) { - evictionRequirements := map[*corev1.Pod][]*v1.EvictionRequirement{ + evictionRequirements := map[*corev1.Pod][]*vpaautoscalingv1.EvictionRequirement{ pod: { { Resources: []corev1.ResourceName{corev1.ResourceCPU}, - ChangeRequirement: v1.TargetHigherThanRequests, + ChangeRequirement: vpaautoscalingv1.TargetHigherThanRequests, }, }, } sdpea := NewScalingDirectionPodEvictionAdmission() sdpea.(*scalingDirectionPodEvictionAdmission).EvictionRequirements = evictionRequirements - recommendation := &v1.RecommendedPodResources{ - ContainerRecommendations: []v1.RecommendedContainerResources{ + recommendation := &vpaautoscalingv1.RecommendedPodResources{ + ContainerRecommendations: []vpaautoscalingv1.RecommendedContainerResources{ test.Recommendation().WithContainer(container1Name).WithTarget("200m", "10Gi").GetContainerResources(), test.Recommendation().WithContainer(container2Name).WithTarget("300m", "10Gi").GetContainerResources(), }, @@ -430,18 +430,18 @@ func TestAdmitForMultipleContainer(t *testing.T) { }) t.Run("it should not admit the Pod even if there is a container that doesn't have a Recommendation and the other one doesn't fulfill the EvictionRequirements", func(tt *testing.T) { - evictionRequirements := map[*corev1.Pod][]*v1.EvictionRequirement{ + evictionRequirements := map[*corev1.Pod][]*vpaautoscalingv1.EvictionRequirement{ pod: { { Resources: []corev1.ResourceName{corev1.ResourceCPU}, - ChangeRequirement: v1.TargetHigherThanRequests, + ChangeRequirement: vpaautoscalingv1.TargetHigherThanRequests, }, }, } sdpea := NewScalingDirectionPodEvictionAdmission() sdpea.(*scalingDirectionPodEvictionAdmission).EvictionRequirements = evictionRequirements - recommendation := &v1.RecommendedPodResources{ - ContainerRecommendations: []v1.RecommendedContainerResources{ + recommendation := &vpaautoscalingv1.RecommendedPodResources{ + ContainerRecommendations: []vpaautoscalingv1.RecommendedContainerResources{ test.Recommendation().WithContainer(container2Name).WithTarget("300m", "10Gi").GetContainerResources(), }, } diff --git a/vertical-pod-autoscaler/pkg/updater/restriction/pods_inplace_restriction.go b/vertical-pod-autoscaler/pkg/updater/restriction/pods_inplace_restriction.go index 36533f044f67..a4a289bc2a8a 100644 --- a/vertical-pod-autoscaler/pkg/updater/restriction/pods_inplace_restriction.go +++ b/vertical-pod-autoscaler/pkg/updater/restriction/pods_inplace_restriction.go @@ -35,7 +35,7 @@ import ( "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/resource/pod/patch" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/features" - utils "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/updater/utils" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/updater/utils" ) // TODO: Make these configurable by flags diff --git a/vertical-pod-autoscaler/pkg/updater/restriction/pods_inplace_restriction_test.go b/vertical-pod-autoscaler/pkg/updater/restriction/pods_inplace_restriction_test.go index 0d02b0c4c934..d344c863ea4f 100644 --- a/vertical-pod-autoscaler/pkg/updater/restriction/pods_inplace_restriction_test.go +++ b/vertical-pod-autoscaler/pkg/updater/restriction/pods_inplace_restriction_test.go @@ -30,7 +30,7 @@ import ( baseclocktest "k8s.io/utils/clock/testing" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/features" - utils "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/updater/utils" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/updater/utils" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" ) diff --git a/vertical-pod-autoscaler/pkg/utils/annotations/vpa_observed_containers.go b/vertical-pod-autoscaler/pkg/utils/annotations/vpa_observed_containers.go index ec705a6a6c5e..ad8ce2b0bbae 100644 --- a/vertical-pod-autoscaler/pkg/utils/annotations/vpa_observed_containers.go +++ b/vertical-pod-autoscaler/pkg/utils/annotations/vpa_observed_containers.go @@ -20,7 +20,7 @@ import ( "fmt" "strings" - v1 "k8s.io/api/core/v1" + apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/validation" ) @@ -31,7 +31,7 @@ const ( ) // GetVpaObservedContainersValue creates an annotation value for a given pod. -func GetVpaObservedContainersValue(pod *v1.Pod) string { +func GetVpaObservedContainersValue(pod *apiv1.Pod) string { containerNames := make([]string, len(pod.Spec.Containers)) for i := range pod.Spec.Containers { containerNames[i] = pod.Spec.Containers[i].Name diff --git a/vertical-pod-autoscaler/pkg/utils/annotations/vpa_observed_containers_test.go b/vertical-pod-autoscaler/pkg/utils/annotations/vpa_observed_containers_test.go index c1738f720aee..d320a498c260 100644 --- a/vertical-pod-autoscaler/pkg/utils/annotations/vpa_observed_containers_test.go +++ b/vertical-pod-autoscaler/pkg/utils/annotations/vpa_observed_containers_test.go @@ -21,7 +21,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" + apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" ) @@ -29,7 +29,7 @@ import ( func TestGetVpaObservedContainersValue(t *testing.T) { tests := []struct { name string - pod *v1.Pod + pod *apiv1.Pod want string }{ { diff --git a/vertical-pod-autoscaler/pkg/utils/resources/resourcehelpers.go b/vertical-pod-autoscaler/pkg/utils/resources/resourcehelpers.go index 54c71b9faa99..aff4c4c58c8b 100644 --- a/vertical-pod-autoscaler/pkg/utils/resources/resourcehelpers.go +++ b/vertical-pod-autoscaler/pkg/utils/resources/resourcehelpers.go @@ -17,7 +17,7 @@ limitations under the License. package resourcehelpers import ( - v1 "k8s.io/api/core/v1" + apiv1 "k8s.io/api/core/v1" "k8s.io/klog/v2" metrics_resources "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/metrics/resources" @@ -31,7 +31,7 @@ import ( // - Otherwise, fallback to the resource requests defined in the pod spec. // // [1] https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/1287-in-place-update-pod-resources -func ContainerRequestsAndLimits(containerName string, pod *v1.Pod) (v1.ResourceList, v1.ResourceList) { +func ContainerRequestsAndLimits(containerName string, pod *apiv1.Pod) (apiv1.ResourceList, apiv1.ResourceList) { cs := containerStatusFor(containerName, pod) if cs != nil && cs.Resources != nil { metrics_resources.RecordGetResourcesCount(metrics_resources.ContainerStatus) @@ -56,7 +56,7 @@ func ContainerRequestsAndLimits(containerName string, pod *v1.Pod) (v1.ResourceL // - Otherwise, fallback to the resource requests defined in the pod spec. // // [1] https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/1287-in-place-update-pod-resources -func InitContainerRequestsAndLimits(initContainerName string, pod *v1.Pod) (v1.ResourceList, v1.ResourceList) { +func InitContainerRequestsAndLimits(initContainerName string, pod *apiv1.Pod) (apiv1.ResourceList, apiv1.ResourceList) { cs := initContainerStatusFor(initContainerName, pod) if cs != nil && cs.Resources != nil { metrics_resources.RecordGetResourcesCount(metrics_resources.InitContainerStatus) @@ -73,7 +73,7 @@ func InitContainerRequestsAndLimits(initContainerName string, pod *v1.Pod) (v1.R return nil, nil } -func findContainer(containerName string, pod *v1.Pod) *v1.Container { +func findContainer(containerName string, pod *apiv1.Pod) *apiv1.Container { for i, container := range pod.Spec.Containers { if container.Name == containerName { return &pod.Spec.Containers[i] @@ -82,7 +82,7 @@ func findContainer(containerName string, pod *v1.Pod) *v1.Container { return nil } -func findInitContainer(initContainerName string, pod *v1.Pod) *v1.Container { +func findInitContainer(initContainerName string, pod *apiv1.Pod) *apiv1.Container { for i, initContainer := range pod.Spec.InitContainers { if initContainer.Name == initContainerName { return &pod.Spec.InitContainers[i] @@ -91,7 +91,7 @@ func findInitContainer(initContainerName string, pod *v1.Pod) *v1.Container { return nil } -func containerStatusFor(containerName string, pod *v1.Pod) *v1.ContainerStatus { +func containerStatusFor(containerName string, pod *apiv1.Pod) *apiv1.ContainerStatus { for i, containerStatus := range pod.Status.ContainerStatuses { if containerStatus.Name == containerName { return &pod.Status.ContainerStatuses[i] @@ -100,7 +100,7 @@ func containerStatusFor(containerName string, pod *v1.Pod) *v1.ContainerStatus { return nil } -func initContainerStatusFor(initContainerName string, pod *v1.Pod) *v1.ContainerStatus { +func initContainerStatusFor(initContainerName string, pod *apiv1.Pod) *apiv1.ContainerStatus { for i, initContainerStatus := range pod.Status.InitContainerStatuses { if initContainerStatus.Name == initContainerName { return &pod.Status.InitContainerStatuses[i] diff --git a/vertical-pod-autoscaler/pkg/utils/test/test_limit_range.go b/vertical-pod-autoscaler/pkg/utils/test/test_limit_range.go index 52cbc0e271dc..c7a977a287f4 100644 --- a/vertical-pod-autoscaler/pkg/utils/test/test_limit_range.go +++ b/vertical-pod-autoscaler/pkg/utils/test/test_limit_range.go @@ -59,15 +59,15 @@ func (lrb *limitRangeBuilder) WithDefault(defaultValues core.ResourceList) *limi return &result } -func (lrb *limitRangeBuilder) WithMax(max core.ResourceList) *limitRangeBuilder { +func (lrb *limitRangeBuilder) WithMax(maxValues core.ResourceList) *limitRangeBuilder { result := *lrb - result.maxValues = append(result.maxValues, &max) + result.maxValues = append(result.maxValues, &maxValues) return &result } -func (lrb *limitRangeBuilder) WithMin(min core.ResourceList) *limitRangeBuilder { +func (lrb *limitRangeBuilder) WithMin(minValues core.ResourceList) *limitRangeBuilder { result := *lrb - result.minValues = append(result.minValues, &min) + result.minValues = append(result.minValues, &minValues) return &result } diff --git a/vertical-pod-autoscaler/pkg/utils/test/test_utils.go b/vertical-pod-autoscaler/pkg/utils/test/test_utils.go index 9f6c29c9b29e..fedb6b6c1bcf 100644 --- a/vertical-pod-autoscaler/pkg/utils/test/test_utils.go +++ b/vertical-pod-autoscaler/pkg/utils/test/test_utils.go @@ -26,14 +26,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - v1 "k8s.io/client-go/listers/core/v1" + listersv1 "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/record" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" vpa_types_v1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1" vpa_lister_v1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta1" - utils "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/updater/utils" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/updater/utils" ) var ( @@ -145,11 +145,11 @@ type PodListerMock struct { } // Pods is a mock implementation of PodLister.Pods -func (m *PodListerMock) Pods(namespace string) v1.PodNamespaceLister { +func (m *PodListerMock) Pods(namespace string) listersv1.PodNamespaceLister { args := m.Called(namespace) - var returnArg v1.PodNamespaceLister + var returnArg listersv1.PodNamespaceLister if args.Get(0) != nil { - returnArg = args.Get(0).(v1.PodNamespaceLister) + returnArg = args.Get(0).(listersv1.PodNamespaceLister) } return returnArg } diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go index 43c9fdc54fae..70098b7a440a 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/api_test.go @@ -23,7 +23,7 @@ import ( "time" "github.com/stretchr/testify/assert" - v1 "k8s.io/api/autoscaling/v1" + apiv1 "k8s.io/api/autoscaling/v1" core "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" meta "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -163,7 +163,7 @@ func TestGetControllingVPAForPod(t *testing.T) { vpaA := vpaBuilder.WithCreationTimestamp(time.Unix(5, 0)).Get() vpaB := vpaBuilder.WithCreationTimestamp(time.Unix(10, 0)).Get() nonMatchingVPA := vpaBuilder.WithCreationTimestamp(time.Unix(2, 0)).Get() - vpaA.Spec.TargetRef = &v1.CrossVersionObjectReference{ + vpaA.Spec.TargetRef = &apiv1.CrossVersionObjectReference{ Kind: "StatefulSet", Name: "test-sts", APIVersion: "apps/v1", diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go index 565fd430d0bc..69b97ca8bba4 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go @@ -250,8 +250,8 @@ func maybeCapToPolicyMax(recommended resource.Quantity, resourceName apiv1.Resou } func maybeCapToMax(recommended resource.Quantity, resourceName apiv1.ResourceName, - max apiv1.ResourceList) (resource.Quantity, bool) { - maxResource, found := max[resourceName] + maxAllowed apiv1.ResourceList) (resource.Quantity, bool) { + maxResource, found := maxAllowed[resourceName] if found && !maxResource.IsZero() && recommended.Cmp(maxResource) > 0 { return maxResource, true } @@ -259,8 +259,8 @@ func maybeCapToMax(recommended resource.Quantity, resourceName apiv1.ResourceNam } func maybeCapToMin(recommended resource.Quantity, resourceName apiv1.ResourceName, - min apiv1.ResourceList) (resource.Quantity, bool) { - minResource, found := min[resourceName] + minAllowed apiv1.ResourceList) (resource.Quantity, bool) { + minResource, found := minAllowed[resourceName] if found && !minResource.IsZero() && recommended.Cmp(minResource) < 0 { return minResource, true } diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/recommendation_processor.go b/vertical-pod-autoscaler/pkg/utils/vpa/recommendation_processor.go index 4e5dc93c6024..0a1315c54bca 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/recommendation_processor.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/recommendation_processor.go @@ -17,7 +17,7 @@ limitations under the License. package api import ( - v1 "k8s.io/api/core/v1" + apiv1 "k8s.io/api/core/v1" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" ) @@ -31,5 +31,5 @@ type RecommendationProcessor interface { // VPA policy and possibly other internal RecommendationProcessor context. // Must return a non-nil pointer to RecommendedPodResources or error. Apply(vpa *vpa_types.VerticalPodAutoscaler, - pod *v1.Pod) (*vpa_types.RecommendedPodResources, ContainerToAnnotationsMap, error) + pod *apiv1.Pod) (*vpa_types.RecommendedPodResources, ContainerToAnnotationsMap, error) } diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/sequential_processor.go b/vertical-pod-autoscaler/pkg/utils/vpa/sequential_processor.go index b9ccbaa989a0..edfe1605f6f1 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/sequential_processor.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/sequential_processor.go @@ -19,7 +19,7 @@ package api import ( "errors" - v1 "k8s.io/api/core/v1" + apiv1 "k8s.io/api/core/v1" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" ) @@ -36,7 +36,7 @@ type sequentialRecommendationProcessor struct { // Apply chains calls to underlying RecommendationProcessors in order provided on object construction func (p *sequentialRecommendationProcessor) Apply( vpa *vpa_types.VerticalPodAutoscaler, - pod *v1.Pod) (*vpa_types.RecommendedPodResources, ContainerToAnnotationsMap, error) { + pod *apiv1.Pod) (*vpa_types.RecommendedPodResources, ContainerToAnnotationsMap, error) { if vpa == nil { return nil, nil, errors.New("cannot process nil vpa") } diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/sequential_processor_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/sequential_processor_test.go index 3f01e0fad55f..69dabedd821f 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/sequential_processor_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/sequential_processor_test.go @@ -20,7 +20,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" + apiv1 "k8s.io/api/core/v1" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" ) @@ -30,7 +30,7 @@ type fakeProcessor struct { } func (p *fakeProcessor) Apply(vpa *vpa_types.VerticalPodAutoscaler, - pod *v1.Pod) (*vpa_types.RecommendedPodResources, ContainerToAnnotationsMap, error) { + pod *apiv1.Pod) (*vpa_types.RecommendedPodResources, ContainerToAnnotationsMap, error) { result := vpa.Status.Recommendation result.ContainerRecommendations[0].ContainerName += p.message containerToAnnotationsMap := ContainerToAnnotationsMap{"trace": []string{p.message}}