diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 820ac39..f24eeca 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -64,3 +64,12 @@ rules: - patch - update - watch +- apiGroups: + - batch + resources: + - cronjobs + verbs: + - get + - list + - patch + - update diff --git a/internal/handlers/idler/service-kubernetes.go b/internal/handlers/idler/service-kubernetes.go index e492738..ba5ae6a 100644 --- a/internal/handlers/idler/service-kubernetes.go +++ b/internal/handlers/idler/service-kubernetes.go @@ -10,6 +10,7 @@ import ( "github.com/go-logr/logr" "github.com/uselagoon/aergia-controller/internal/handlers/metrics" appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" networkv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/labels" @@ -163,6 +164,7 @@ func (h *Idler) KubernetesServiceIdler(ctx context.Context, opLog logr.Logger, n } opLog.Info("Environment will be idled") h.idleDeployments(ctx, opLog, deployments, forceIdle, forceScale) + h.idleCronjobs(ctx, opLog, namespace.Name, forceIdle, forceScale) } } } @@ -217,6 +219,60 @@ func (h *Idler) idleDeployments(ctx context.Context, opLog logr.Logger, deployme } } +func (h *Idler) idleCronjobs(ctx context.Context, opLog logr.Logger, namespace string, forceIdle, forceScale bool) { + labelRequirements := generateLabelRequirements(h.Selectors.Service.Deployments) + listOption := (&client.ListOptions{}).ApplyOptions([]client.ListOption{ + client.InNamespace(namespace), + client.MatchingLabelsSelector{ + Selector: labels.NewSelector().Add(labelRequirements...), + }, + }) + cronjobs := &batchv1.CronJobList{} + if err := h.Client.List(ctx, cronjobs, listOption); err != nil { + // if we can't get any deployment configs for this namespace, log it and move on to the next + opLog.Error(err, "Error getting deployments") + return + } + for _, cronjob := range cronjobs.Items { + // @TODO: use the patch method for the k8s client for now, this seems to work just fine + // Patching the deployment also works as we patch the endpoints below + if !h.DryRun { + suspendCronjob := cronjob.DeepCopy() + labels := map[string]string{ + // add the watch label so that the unidler knows to look at it + "idling.amazee.io/watch": "true", + "idling.amazee.io/idled": "true", + } + if forceIdle { + labels["idling.amazee.io/force-idled"] = "true" + } + if forceScale { + labels["idling.amazee.io/force-scaled"] = "true" + } + mergePatch, _ := json.Marshal(map[string]interface{}{ + "spec": map[string]interface{}{ + "suspend": true, // suspend the cronjob + }, + "metadata": map[string]interface{}{ + "labels": labels, + "annotations": map[string]string{ + // add these annotations so user knows to look at them + "idling.amazee.io/idled-at": time.Now().Format(time.RFC3339), + }, + }, + }) + if err := h.Client.Patch(ctx, suspendCronjob, client.RawPatch(types.MergePatchType, mergePatch)); err != nil { + // log it but try and scale the rest of the deployments anyway (some idled is better than none?) + opLog.Info(fmt.Sprintf("Error suspending cronjob %s", cronjob.Name)) + } else { + opLog.Info(fmt.Sprintf("Cronjob %s suspended", cronjob.Name)) + } + } else { + opLog.Info(fmt.Sprintf("Cronjob %s would suspend", cronjob.Name)) + } + } +} + /* patchIngress will patch any ingress with matching labels with the `custom-http-errors` annotation. this annotation is used by the unidler to make sure that the correct information is passed to the custom backend for diff --git a/internal/handlers/idler/service.go b/internal/handlers/idler/service.go index e2bd6c5..2779261 100644 --- a/internal/handlers/idler/service.go +++ b/internal/handlers/idler/service.go @@ -16,6 +16,7 @@ import ( // +kubebuilder:rbac:groups=apps,resources=deployments,verbs=list;get;watch;patch // +kubebuilder:rbac:groups=*,resources=ingresses,verbs=get;list;watch;update;patch // +kubebuilder:rbac:groups=*,resources=ingress/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=list;get;update;patch // ServiceIdler will run the Service idler process. func (h *Idler) ServiceIdler() { diff --git a/internal/handlers/unidler/unidler.go b/internal/handlers/unidler/unidler.go index 30c83e1..251cb5d 100644 --- a/internal/handlers/unidler/unidler.go +++ b/internal/handlers/unidler/unidler.go @@ -13,6 +13,7 @@ import ( "github.com/go-logr/logr" "github.com/uselagoon/aergia-controller/internal/handlers/metrics" appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" @@ -183,6 +184,40 @@ func (h *Unidler) Unidle(ctx context.Context, namespace *corev1.Namespace, opLog } } } + cronjobs := &batchv1.CronJobList{} + if err := h.Client.List(ctx, cronjobs, listOption); err != nil { + opLog.Info(fmt.Sprintf("Unable to get any cronjobs - %s", namespace.Name)) + return + } + for _, cronjob := range cronjobs.Items { + // if the idled annotation is true + lv, lok := cronjob.Labels["idling.amazee.io/idled"] + if lok && lv == "true" { + opLog.Info(fmt.Sprintf("Cronjob %s - %s", cronjob.Name, namespace.Name)) + mergePatch, _ := json.Marshal(map[string]interface{}{ + "spec": map[string]interface{}{ + "suspend": false, // unsuspend the cronjob + }, + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{ + "idling.amazee.io/idled": "false", + "idling.amazee.io/force-idled": nil, + "idling.amazee.io/force-scaled": nil, + }, + "annotations": map[string]interface{}{ + "idling.amazee.io/idled-at": nil, + }, + }, + }) + unsuspendCronjob := cronjob.DeepCopy() + if err := h.Client.Patch(ctx, unsuspendCronjob, ctrlClient.RawPatch(types.MergePatchType, mergePatch)); err != nil { + // log it but try and scale the rest of the deployments anyway (some idled is better than none?) + opLog.Info(fmt.Sprintf("Error unsuspending cronjob %s - %s", cronjob.Name, namespace.Name)) + } else { + opLog.Info(fmt.Sprintf("Cronjob %s unsuspended - %s", cronjob.Name, namespace.Name)) + } + } + } // now wait for the pods of these deployments to be ready // this could still result in 503 for users until the resulting services/endpoints are active and receiving traffic for _, deploy := range deployments.Items {