From 027f180b10679b4e2e3e12b021269a5fd4970387 Mon Sep 17 00:00:00 2001 From: Priyansh Saxena Date: Mon, 6 Oct 2025 02:33:33 +0530 Subject: [PATCH 01/34] feat: Add flexible topology configuration support to SliceConfig CRD MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Added topology configuration API to SliceConfig CRD to enable hub-spoke, partial-mesh, and custom connectivity patterns, moving away from hardcoded full-mesh topology. Changes include: - Extended SliceConfig API with TopologyConfig struct supporting multiple topology types (auto, full-mesh, hub-spoke, partial-mesh, custom) - Added hub-spoke configuration with allowSpokeToSpoke flag - Added custom connectivity matrix for explicit cluster-to-cluster routing - Added cluster VPN role assignment (auto, server, client) - Added policy nodes for security-aware routing in auto topology mode - Updated CRD OpenAPI schema with validation rules and defaults - Added three sample SliceConfig manifests demonstrating topology patterns API changes are backward compatible - existing SliceConfigs without topologyConfig will default to 'auto' mode. Signed-off-by: Priyansh Saxena Signed-off-by: “Transcendental-Programmer” <“priyena.programming@gmail.com”> Signed-off-by: Priyansh Saxena Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- apis/controller/v1alpha1/sliceconfig_types.go | 44 +++++++++++++ .../controller.kubeslice.io_sliceconfigs.yaml | 62 +++++++++++++++++++ config/samples/topology-auto-secure.yaml | 16 +++++ config/samples/topology-custom-matrix.yaml | 22 +++++++ config/samples/topology-hub-spoke.yaml | 23 +++++++ 5 files changed, 167 insertions(+) create mode 100644 config/samples/topology-auto-secure.yaml create mode 100644 config/samples/topology-custom-matrix.yaml create mode 100644 config/samples/topology-hub-spoke.yaml diff --git a/apis/controller/v1alpha1/sliceconfig_types.go b/apis/controller/v1alpha1/sliceconfig_types.go index 38b7bbded..437977c74 100644 --- a/apis/controller/v1alpha1/sliceconfig_types.go +++ b/apis/controller/v1alpha1/sliceconfig_types.go @@ -74,6 +74,8 @@ type SliceConfigSpec struct { // RenewBefore is used for renew now! RenewBefore *metav1.Time `json:"renewBefore,omitempty"` VPNConfig *VPNConfiguration `json:"vpnConfig,omitempty"` + // TopologyConfig defines cluster connectivity patterns + TopologyConfig *TopologyConfig `json:"topologyConfig,omitempty"` } // ExternalGatewayConfig is the configuration for external gateways like 'istio', etc/ @@ -174,6 +176,48 @@ type VPNConfiguration struct { Cipher string `json:"cipher"` } +// +kubebuilder:validation:Enum:=auto;full-mesh;hub-spoke;partial-mesh;custom +type TopologyType string + +const ( + TopologyAuto TopologyType = "auto" + TopologyFullMesh TopologyType = "full-mesh" + TopologyHubSpoke TopologyType = "hub-spoke" + TopologyPartialMesh TopologyType = "partial-mesh" + TopologyCustom TopologyType = "custom" +) + +type TopologyConfig struct { + //+kubebuilder:default:=auto + TopologyType TopologyType `json:"topologyType,omitempty"` + HubSpoke *HubSpokeConfig `json:"hubSpoke,omitempty"` + ConnectivityMatrix []ConnectivityEntry `json:"connectivityMatrix,omitempty"` + ClusterRoles []ClusterRole `json:"clusterRoles,omitempty"` + PolicyNodes []string `json:"policyNodes,omitempty"` +} + +type HubSpokeConfig struct { + //+kubebuilder:validation:Required + HubClusters []string `json:"hubClusters"` + SpokeClusters []string `json:"spokeClusters,omitempty"` + AllowSpokeToSpoke bool `json:"allowSpokeToSpoke,omitempty"` +} + +type ConnectivityEntry struct { + //+kubebuilder:validation:Required + SourceCluster string `json:"sourceCluster"` + //+kubebuilder:validation:Required + TargetClusters []string `json:"targetClusters"` +} + +type ClusterRole struct { + //+kubebuilder:validation:Required + ClusterName string `json:"clusterName"` + //+kubebuilder:default:=auto + //+kubebuilder:validation:Enum:=auto;server;client + VPNRole string `json:"vpnRole,omitempty"` +} + type KubesliceEvent struct { // Type of the event. Can be one of Error, Success or InProgress Type string `json:"type,omitempty"` diff --git a/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml b/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml index 70063d3b8..e7657966b 100644 --- a/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml +++ b/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml @@ -227,6 +227,68 @@ spec: type: string standardQosProfileName: type: string + topologyConfig: + description: TopologyConfig defines cluster connectivity patterns + properties: + clusterRoles: + items: + properties: + clusterName: + type: string + vpnRole: + default: auto + enum: + - auto + - server + - client + type: string + required: + - clusterName + type: object + type: array + connectivityMatrix: + items: + properties: + sourceCluster: + type: string + targetClusters: + items: + type: string + type: array + required: + - sourceCluster + - targetClusters + type: object + type: array + hubSpoke: + properties: + allowSpokeToSpoke: + type: boolean + hubClusters: + items: + type: string + type: array + spokeClusters: + items: + type: string + type: array + required: + - hubClusters + type: object + policyNodes: + items: + type: string + type: array + topologyType: + default: auto + enum: + - auto + - full-mesh + - hub-spoke + - partial-mesh + - custom + type: string + type: object vpnConfig: description: VPNConfiguration defines the additional (optional) VPN Configuration to customise diff --git a/config/samples/topology-auto-secure.yaml b/config/samples/topology-auto-secure.yaml new file mode 100644 index 000000000..8e108b920 --- /dev/null +++ b/config/samples/topology-auto-secure.yaml @@ -0,0 +1,16 @@ +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: demo-auto-secure + namespace: kubeslice-avesha +spec: + sliceSubnet: "10.2.0.0/16" + clusters: ["dmz", "gateway", "internal", "analytics"] + topologyConfig: + topologyType: auto + policyNodes: ["gateway"] + clusterRoles: + - clusterName: gateway + vpnRole: server + - clusterName: dmz + vpnRole: client \ No newline at end of file diff --git a/config/samples/topology-custom-matrix.yaml b/config/samples/topology-custom-matrix.yaml new file mode 100644 index 000000000..7574a0726 --- /dev/null +++ b/config/samples/topology-custom-matrix.yaml @@ -0,0 +1,22 @@ +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: demo-custom-matrix + namespace: kubeslice-avesha +spec: + sliceSubnet: "10.3.0.0/16" + clusters: ["dmz", "gateway", "internal", "database"] + topologyConfig: + topologyType: custom + connectivityMatrix: + - sourceCluster: dmz + targetClusters: ["gateway"] + - sourceCluster: gateway + targetClusters: ["internal", "dmz"] + - sourceCluster: internal + targetClusters: ["database", "gateway"] + clusterRoles: + - clusterName: gateway + vpnRole: server + - clusterName: internal + vpnRole: server \ No newline at end of file diff --git a/config/samples/topology-hub-spoke.yaml b/config/samples/topology-hub-spoke.yaml new file mode 100644 index 000000000..b40394149 --- /dev/null +++ b/config/samples/topology-hub-spoke.yaml @@ -0,0 +1,23 @@ +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: demo-hub-spoke + namespace: kubeslice-avesha +spec: + sliceSubnet: "10.1.0.0/16" + clusters: ["hub", "spoke1", "spoke2", "spoke3"] + topologyConfig: + topologyType: hub-spoke + hubSpoke: + hubClusters: ["hub"] + spokeClusters: ["spoke1", "spoke2", "spoke3"] + allowSpokeToSpoke: false + clusterRoles: + - clusterName: hub + vpnRole: server + - clusterName: spoke1 + vpnRole: client + - clusterName: spoke2 + vpnRole: client + - clusterName: spoke3 + vpnRole: client \ No newline at end of file From d78170196a533d6bd8943fa2fabf4b9e62ea96ad Mon Sep 17 00:00:00 2001 From: Priyansh Saxena Date: Tue, 14 Oct 2025 11:49:58 +0530 Subject: [PATCH 02/34] =?UTF-8?q?=EF=BB=BFfeat:=20Add=20flexible=20topolog?= =?UTF-8?q?y=20configuration=20to=20SliceConfig=20-=20Add=20TopologyConfig?= =?UTF-8?q?=20with=205=20topology=20types=20(full-mesh,=20hub-spoke,=20aut?= =?UTF-8?q?o,=20custom,=20isolated)=20-=20Implement=20AutoTopologyOptions?= =?UTF-8?q?=20with=20int-based=20relativeThresholdPercent=20(1-500=20=3D?= =?UTF-8?q?=200.1%-50.0%)=20-=20Add=20HubSpokeConfig=20with=20SpokeConnect?= =?UTF-8?q?ivity=20for=20selective=20spoke-to-spoke=20connections=20-=20Cr?= =?UTF-8?q?eate=20TopologyValidator=20service=20for=20comprehensive=20vali?= =?UTF-8?q?dation=20-=20Integrate=20validation=20into=20SliceConfig=20admi?= =?UTF-8?q?ssion=20webhook=20-=20Generate=20CRD=20with=20topology=20fields?= =?UTF-8?q?=20and=20validation=20constraints=20-=20Maintain=20backward=20c?= =?UTF-8?q?ompatibility=20(nil=20topologyConfig=20defaults=20to=20full-mes?= =?UTF-8?q?h)=20Tested:=20-=20=E2=9C=85=20Legacy=20SliceConfig=20without?= =?UTF-8?q?=20topology=20(backward=20compatible)=20-=20=E2=9C=85=20Hub-spo?= =?UTF-8?q?ke=20topology=20with=20spoke=20connectivity=20-=20=E2=9C=85=20A?= =?UTF-8?q?uto=20topology=20with=20integer=20threshold=20-=20=E2=9C=85=20C?= =?UTF-8?q?RD=20validation=20rejects=20out-of-range=20values=20(1000=20>?= =?UTF-8?q?=20500)=20Week=201-2=20deliverable:=20API=20specification=20and?= =?UTF-8?q?=20validation=20logic?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Priyansh Saxena Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- apis/controller/v1alpha1/sliceconfig_types.go | 42 +++-- .../v1alpha1/zz_generated.deepcopy.go | 129 +++++++++++++++ .../controller.kubeslice.io_sliceconfigs.yaml | 37 +++++ service/slice_config_webhook_validation.go | 16 ++ service/topology_validation.go | 147 ++++++++++++++++++ 5 files changed, 360 insertions(+), 11 deletions(-) create mode 100644 service/topology_validation.go diff --git a/apis/controller/v1alpha1/sliceconfig_types.go b/apis/controller/v1alpha1/sliceconfig_types.go index 437977c74..32c239c2e 100644 --- a/apis/controller/v1alpha1/sliceconfig_types.go +++ b/apis/controller/v1alpha1/sliceconfig_types.go @@ -189,25 +189,27 @@ const ( type TopologyConfig struct { //+kubebuilder:default:=auto - TopologyType TopologyType `json:"topologyType,omitempty"` - HubSpoke *HubSpokeConfig `json:"hubSpoke,omitempty"` - ConnectivityMatrix []ConnectivityEntry `json:"connectivityMatrix,omitempty"` - ClusterRoles []ClusterRole `json:"clusterRoles,omitempty"` - PolicyNodes []string `json:"policyNodes,omitempty"` + TopologyType TopologyType `json:"topologyType,omitempty"` + HubSpoke *HubSpokeConfig `json:"hubSpoke,omitempty"` + ConnectivityMatrix []ConnectivityEntry `json:"connectivityMatrix,omitempty"` + ClusterRoles []ClusterRole `json:"clusterRoles,omitempty"` + PolicyNodes []string `json:"policyNodes,omitempty"` + AutoOptions *AutoTopologyOptions `json:"autoOptions,omitempty"` } type HubSpokeConfig struct { //+kubebuilder:validation:Required - HubClusters []string `json:"hubClusters"` - SpokeClusters []string `json:"spokeClusters,omitempty"` - AllowSpokeToSpoke bool `json:"allowSpokeToSpoke,omitempty"` + HubClusters []string `json:"hubClusters"` + SpokeClusters []string `json:"spokeClusters,omitempty"` + AllowSpokeToSpoke bool `json:"allowSpokeToSpoke,omitempty"` + SpokeConnectivity []ConnectivityEntry `json:"spokeConnectivity,omitempty"` } type ConnectivityEntry struct { //+kubebuilder:validation:Required - SourceCluster string `json:"sourceCluster"` + SourceCluster string `json:"sourceCluster"` //+kubebuilder:validation:Required - TargetClusters []string `json:"targetClusters"` + TargetClusters []string `json:"targetClusters"` } type ClusterRole struct { @@ -215,7 +217,25 @@ type ClusterRole struct { ClusterName string `json:"clusterName"` //+kubebuilder:default:=auto //+kubebuilder:validation:Enum:=auto;server;client - VPNRole string `json:"vpnRole,omitempty"` + VPNRole string `json:"vpnRole,omitempty"` +} + +type AutoTopologyOptions struct { + //+kubebuilder:default:=false + EnableShortcuts bool `json:"enableShortcuts,omitempty"` + //+kubebuilder:default:=20 + //+kubebuilder:validation:Minimum=1 + //+kubebuilder:validation:Maximum=500 + RelativeThresholdPercent int `json:"relativeThresholdPercent,omitempty"` + //+kubebuilder:default:=3 + //+kubebuilder:validation:Minimum=1 + PersistenceWindows int `json:"persistenceWindows,omitempty"` + //+kubebuilder:default:=10 + //+kubebuilder:validation:Minimum=1 + //+kubebuilder:validation:Maximum=50 + MaxShortcuts int `json:"maxShortcuts,omitempty"` + //+kubebuilder:default:="5m" + TelemetryWindow string `json:"telemetryWindow,omitempty"` } type KubesliceEvent struct { diff --git a/apis/controller/v1alpha1/zz_generated.deepcopy.go b/apis/controller/v1alpha1/zz_generated.deepcopy.go index e7d11c185..2a394694d 100644 --- a/apis/controller/v1alpha1/zz_generated.deepcopy.go +++ b/apis/controller/v1alpha1/zz_generated.deepcopy.go @@ -24,6 +24,21 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoTopologyOptions) DeepCopyInto(out *AutoTopologyOptions) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoTopologyOptions. +func (in *AutoTopologyOptions) DeepCopy() *AutoTopologyOptions { + if in == nil { + return nil + } + out := new(AutoTopologyOptions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Cluster) DeepCopyInto(out *Cluster) { *out = *in @@ -122,6 +137,21 @@ func (in *ClusterProperty) DeepCopy() *ClusterProperty { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRole. +func (in *ClusterRole) DeepCopy() *ClusterRole { + if in == nil { + return nil + } + out := new(ClusterRole) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { *out = *in @@ -203,6 +233,26 @@ func (in *ComponentStatus) DeepCopy() *ComponentStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityEntry) DeepCopyInto(out *ConnectivityEntry) { + *out = *in + if in.TargetClusters != nil { + in, out := &in.TargetClusters, &out.TargetClusters + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityEntry. +func (in *ConnectivityEntry) DeepCopy() *ConnectivityEntry { + if in == nil { + return nil + } + out := new(ConnectivityEntry) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExternalGatewayConfig) DeepCopyInto(out *ExternalGatewayConfig) { *out = *in @@ -273,6 +323,38 @@ func (in *GeoLocation) DeepCopy() *GeoLocation { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HubSpokeConfig) DeepCopyInto(out *HubSpokeConfig) { + *out = *in + if in.HubClusters != nil { + in, out := &in.HubClusters, &out.HubClusters + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SpokeClusters != nil { + in, out := &in.SpokeClusters, &out.SpokeClusters + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SpokeConnectivity != nil { + in, out := &in.SpokeConnectivity, &out.SpokeConnectivity + *out = make([]ConnectivityEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSpokeConfig. +func (in *HubSpokeConfig) DeepCopy() *HubSpokeConfig { + if in == nil { + return nil + } + out := new(HubSpokeConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubernetesDashboard) DeepCopyInto(out *KubernetesDashboard) { *out = *in @@ -744,6 +826,11 @@ func (in *SliceConfigSpec) DeepCopyInto(out *SliceConfigSpec) { *out = new(VPNConfiguration) **out = **in } + if in.TopologyConfig != nil { + in, out := &in.TopologyConfig, &out.TopologyConfig + *out = new(TopologyConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SliceConfigSpec. @@ -933,6 +1020,48 @@ func (in *Telemetry) DeepCopy() *Telemetry { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologyConfig) DeepCopyInto(out *TopologyConfig) { + *out = *in + if in.HubSpoke != nil { + in, out := &in.HubSpoke, &out.HubSpoke + *out = new(HubSpokeConfig) + (*in).DeepCopyInto(*out) + } + if in.ConnectivityMatrix != nil { + in, out := &in.ConnectivityMatrix, &out.ConnectivityMatrix + *out = make([]ConnectivityEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterRoles != nil { + in, out := &in.ClusterRoles, &out.ClusterRoles + *out = make([]ClusterRole, len(*in)) + copy(*out, *in) + } + if in.PolicyNodes != nil { + in, out := &in.PolicyNodes, &out.PolicyNodes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AutoOptions != nil { + in, out := &in.AutoOptions, &out.AutoOptions + *out = new(AutoTopologyOptions) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologyConfig. +func (in *TopologyConfig) DeepCopy() *TopologyConfig { + if in == nil { + return nil + } + out := new(TopologyConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VCPURestriction) DeepCopyInto(out *VCPURestriction) { *out = *in diff --git a/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml b/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml index e7657966b..5d3fb46a1 100644 --- a/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml +++ b/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml @@ -230,6 +230,29 @@ spec: topologyConfig: description: TopologyConfig defines cluster connectivity patterns properties: + autoOptions: + properties: + enableShortcuts: + default: false + type: boolean + maxShortcuts: + default: 10 + maximum: 50 + minimum: 1 + type: integer + persistenceWindows: + default: 3 + minimum: 1 + type: integer + relativeThresholdPercent: + default: 20 + maximum: 500 + minimum: 1 + type: integer + telemetryWindow: + default: 5m + type: string + type: object clusterRoles: items: properties: @@ -272,6 +295,20 @@ spec: items: type: string type: array + spokeConnectivity: + items: + properties: + sourceCluster: + type: string + targetClusters: + items: + type: string + type: array + required: + - sourceCluster + - targetClusters + type: object + type: array required: - hubClusters type: object diff --git a/service/slice_config_webhook_validation.go b/service/slice_config_webhook_validation.go index 99ba653ae..37c2b79ea 100644 --- a/service/slice_config_webhook_validation.go +++ b/service/slice_config_webhook_validation.go @@ -59,6 +59,14 @@ func ValidateSliceConfigCreate(ctx context.Context, sliceConfig *controllerv1alp if err := validateMaxClusterCount(sliceConfig); err != nil { return nil, apierrors.NewInvalid(schema.GroupKind{Group: apiGroupKubeSliceControllers, Kind: "SliceConfig"}, sliceConfig.Name, field.ErrorList{err}) } + if sliceConfig.Spec.TopologyConfig != nil { + validator := NewTopologyValidator() + if err := validator.ValidateTopologyConfig(sliceConfig.Spec.TopologyConfig, sliceConfig.Spec.Clusters); err != nil { + return nil, apierrors.NewInvalid(schema.GroupKind{Group: apiGroupKubeSliceControllers, Kind: "SliceConfig"}, sliceConfig.Name, field.ErrorList{ + field.Invalid(field.NewPath("spec").Child("topologyConfig"), sliceConfig.Spec.TopologyConfig, err.Error()), + }) + } + } if sliceConfig.Spec.OverlayNetworkDeploymentMode != controllerv1alpha1.NONET { if err := validateSliceSubnet(sliceConfig); err != nil { return nil, apierrors.NewInvalid(schema.GroupKind{Group: apiGroupKubeSliceControllers, Kind: "SliceConfig"}, sliceConfig.Name, field.ErrorList{err}) @@ -106,6 +114,14 @@ func ValidateSliceConfigUpdate(ctx context.Context, sliceConfig *controllerv1alp if err := validateNamespaceIsolationProfile(sliceConfig); err != nil { return nil, apierrors.NewInvalid(schema.GroupKind{Group: apiGroupKubeSliceControllers, Kind: "SliceConfig"}, sliceConfig.Name, field.ErrorList{err}) } + if sliceConfig.Spec.TopologyConfig != nil { + validator := NewTopologyValidator() + if err := validator.ValidateTopologyConfig(sliceConfig.Spec.TopologyConfig, sliceConfig.Spec.Clusters); err != nil { + return nil, apierrors.NewInvalid(schema.GroupKind{Group: apiGroupKubeSliceControllers, Kind: "SliceConfig"}, sliceConfig.Name, field.ErrorList{ + field.Invalid(field.NewPath("spec").Child("topologyConfig"), sliceConfig.Spec.TopologyConfig, err.Error()), + }) + } + } // Validate single/multi overlay network deployment mode specific fields if sliceConfig.Spec.OverlayNetworkDeploymentMode != controllerv1alpha1.NONET { if err := validateSliceSubnet(sliceConfig); err != nil { diff --git a/service/topology_validation.go b/service/topology_validation.go new file mode 100644 index 000000000..83b47a886 --- /dev/null +++ b/service/topology_validation.go @@ -0,0 +1,147 @@ +package service + +import ( + "fmt" + controllerv1alpha1 "github.com/kubeslice/kubeslice-controller/apis/controller/v1alpha1" +) + +type TopologyValidator struct{} + +func NewTopologyValidator() *TopologyValidator { + return &TopologyValidator{} +} + +func (v *TopologyValidator) ValidateTopologyConfig(topology *controllerv1alpha1.TopologyConfig, clusters []string) error { + if topology == nil { + return nil + } + + clusterSet := toSet(clusters) + + switch topology.TopologyType { + case controllerv1alpha1.TopologyHubSpoke: + if err := v.validateHubSpoke(topology.HubSpoke, clusterSet); err != nil { + return err + } + case controllerv1alpha1.TopologyCustom: + if err := v.validateCustom(topology.ConnectivityMatrix, clusterSet); err != nil { + return err + } + case controllerv1alpha1.TopologyAuto: + if err := v.validateAuto(topology, clusterSet); err != nil { + return err + } + case controllerv1alpha1.TopologyFullMesh, controllerv1alpha1.TopologyPartialMesh: + case "": + default: + return fmt.Errorf("invalid topology type: %s", topology.TopologyType) + } + + if err := v.validateClusterRoles(topology.ClusterRoles, clusterSet); err != nil { + return err + } + + return v.validatePolicyNodes(topology.PolicyNodes, clusterSet) +} + +func (v *TopologyValidator) validateHubSpoke(config *controllerv1alpha1.HubSpokeConfig, clusterSet map[string]struct{}) error { + if config == nil { + return fmt.Errorf("hubSpoke config required for hub-spoke topology") + } + + if len(config.HubClusters) == 0 { + return fmt.Errorf("at least one hub cluster required") + } + + for _, hub := range config.HubClusters { + if _, exists := clusterSet[hub]; !exists { + return fmt.Errorf("hub cluster %s not in spec.clusters", hub) + } + } + + hubSet := toSet(config.HubClusters) + for _, spoke := range config.SpokeClusters { + if _, exists := clusterSet[spoke]; !exists { + return fmt.Errorf("spoke cluster %s not in spec.clusters", spoke) + } + if _, isHub := hubSet[spoke]; isHub { + return fmt.Errorf("cluster %s cannot be both hub and spoke", spoke) + } + } + + if config.SpokeConnectivity != nil { + spokeSet := toSet(config.SpokeClusters) + for _, entry := range config.SpokeConnectivity { + if _, exists := spokeSet[entry.SourceCluster]; !exists { + return fmt.Errorf("spokeConnectivity source %s not a spoke cluster", entry.SourceCluster) + } + for _, target := range entry.TargetClusters { + if _, exists := spokeSet[target]; !exists { + return fmt.Errorf("spokeConnectivity target %s not a spoke cluster", target) + } + } + } + } + + return nil +} + +func (v *TopologyValidator) validateCustom(matrix []controllerv1alpha1.ConnectivityEntry, clusterSet map[string]struct{}) error { + if len(matrix) == 0 { + return fmt.Errorf("connectivityMatrix required for custom topology") + } + + for _, entry := range matrix { + if _, exists := clusterSet[entry.SourceCluster]; !exists { + return fmt.Errorf("connectivityMatrix source %s not in spec.clusters", entry.SourceCluster) + } + for _, target := range entry.TargetClusters { + if _, exists := clusterSet[target]; !exists { + return fmt.Errorf("connectivityMatrix target %s not in spec.clusters", target) + } + } + } + + return nil +} + +func (v *TopologyValidator) validateAuto(topology *controllerv1alpha1.TopologyConfig, clusterSet map[string]struct{}) error { + if topology.AutoOptions != nil { + if topology.AutoOptions.RelativeThresholdPercent < 1 || topology.AutoOptions.RelativeThresholdPercent > 500 { + return fmt.Errorf("relativeThresholdPercent must be between 1 and 500 (represents 0.1%% to 50.0%%)") + } + if topology.AutoOptions.PersistenceWindows < 1 { + return fmt.Errorf("persistenceWindows must be at least 1") + } + if topology.AutoOptions.MaxShortcuts < 1 || topology.AutoOptions.MaxShortcuts > 50 { + return fmt.Errorf("maxShortcuts must be between 1 and 50") + } + } + return nil +} + +func (v *TopologyValidator) validateClusterRoles(roles []controllerv1alpha1.ClusterRole, clusterSet map[string]struct{}) error { + for _, role := range roles { + if _, exists := clusterSet[role.ClusterName]; !exists { + return fmt.Errorf("clusterRole %s not in spec.clusters", role.ClusterName) + } + } + return nil +} + +func (v *TopologyValidator) validatePolicyNodes(policyNodes []string, clusterSet map[string]struct{}) error { + for _, node := range policyNodes { + if _, exists := clusterSet[node]; !exists { + return fmt.Errorf("policyNode %s not in spec.clusters", node) + } + } + return nil +} + +func toSet(items []string) map[string]struct{} { + set := make(map[string]struct{}, len(items)) + for _, item := range items { + set[item] = struct{}{} + } + return set +} From ca3e1d1e2bec6539cf5e676b122c4d30f59438a1 Mon Sep 17 00:00:00 2001 From: Priyansh Saxena Date: Wed, 15 Oct 2025 00:15:53 +0530 Subject: [PATCH 03/34] Week 3: Implement flexible topology service with auto, hub-spoke, custom, and full-mesh support Signed-off-by: Priyansh Saxena Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- service/topology_service.go | 210 ++++++++++++++++++++++++++++ service/topology_service_test.go | 229 +++++++++++++++++++++++++++++++ 2 files changed, 439 insertions(+) create mode 100644 service/topology_service.go create mode 100644 service/topology_service_test.go diff --git a/service/topology_service.go b/service/topology_service.go new file mode 100644 index 000000000..a5589b59f --- /dev/null +++ b/service/topology_service.go @@ -0,0 +1,210 @@ +package service + +import ( + "fmt" + controllerv1alpha1 "github.com/kubeslice/kubeslice-controller/apis/controller/v1alpha1" +) + +type GatewayPair struct { + Source string + Target string + Bidirectional bool +} + +type TopologyService interface { + ResolveTopology(sliceConfig *controllerv1alpha1.SliceConfig) ([]GatewayPair, error) +} + +type DefaultTopologyService struct{} + +func NewTopologyService() TopologyService { + return &DefaultTopologyService{} +} + +func (s *DefaultTopologyService) ResolveTopology(sc *controllerv1alpha1.SliceConfig) ([]GatewayPair, error) { + if sc.Spec.TopologyConfig == nil { + return s.resolveFullMesh(sc.Spec.Clusters) + } + + switch sc.Spec.TopologyConfig.TopologyType { + case controllerv1alpha1.TopologyFullMesh, "": + return s.resolveFullMesh(sc.Spec.Clusters) + case controllerv1alpha1.TopologyHubSpoke: + return s.resolveHubSpoke(sc.Spec.Clusters, sc.Spec.TopologyConfig.HubSpoke) + case controllerv1alpha1.TopologyCustom: + return s.resolveCustom(sc.Spec.Clusters, sc.Spec.TopologyConfig.ConnectivityMatrix) + case controllerv1alpha1.TopologyAuto: + return s.resolveAuto(sc.Spec.Clusters, sc.Spec.TopologyConfig.PolicyNodes) + default: + return nil, fmt.Errorf("unknown topology type: %s", sc.Spec.TopologyConfig.TopologyType) + } +} + +func (s *DefaultTopologyService) resolveFullMesh(clusters []string) ([]GatewayPair, error) { + if len(clusters) < 2 { + return []GatewayPair{}, nil + } + + pairs := make([]GatewayPair, 0, len(clusters)*(len(clusters)-1)/2) + for i := 0; i < len(clusters); i++ { + for j := i + 1; j < len(clusters); j++ { + pairs = append(pairs, GatewayPair{ + Source: clusters[i], + Target: clusters[j], + Bidirectional: true, + }) + } + } + return pairs, nil +} + +func (s *DefaultTopologyService) resolveHubSpoke(clusters []string, cfg *controllerv1alpha1.HubSpokeConfig) ([]GatewayPair, error) { + if cfg == nil { + return nil, fmt.Errorf("hub-spoke config required") + } + + hubs := cfg.HubClusters + spokes := cfg.SpokeClusters + if len(spokes) == 0 { + spokes = s.getSpokes(clusters, hubs) + } + + if len(hubs) == 0 { + return nil, fmt.Errorf("at least one hub required") + } + + pairs := make([]GatewayPair, 0, len(hubs)*len(spokes)) + for _, hub := range hubs { + for _, spoke := range spokes { + pairs = append(pairs, GatewayPair{ + Source: hub, + Target: spoke, + Bidirectional: true, + }) + } + } + + if cfg.AllowSpokeToSpoke { + pairs = append(pairs, s.resolveSpokePairs(spokes, cfg.SpokeConnectivity)...) + } + + return pairs, nil +} + +func (s *DefaultTopologyService) getSpokes(clusters []string, hubs []string) []string { + hubSet := s.toSet(hubs) + spokes := make([]string, 0, len(clusters)) + for _, cluster := range clusters { + if !hubSet[cluster] { + spokes = append(spokes, cluster) + } + } + return spokes +} + +func (s *DefaultTopologyService) resolveSpokePairs(spokes []string, connectivity []controllerv1alpha1.ConnectivityEntry) []GatewayPair { + if len(connectivity) > 0 { + return s.resolveSelectiveSpokes(connectivity) + } + + pairs := make([]GatewayPair, 0, len(spokes)*(len(spokes)-1)/2) + for i := 0; i < len(spokes); i++ { + for j := i + 1; j < len(spokes); j++ { + pairs = append(pairs, GatewayPair{ + Source: spokes[i], + Target: spokes[j], + Bidirectional: true, + }) + } + } + return pairs +} + +func (s *DefaultTopologyService) resolveSelectiveSpokes(entries []controllerv1alpha1.ConnectivityEntry) []GatewayPair { + pairs := make([]GatewayPair, 0) + for _, entry := range entries { + for _, target := range entry.TargetClusters { + pairs = append(pairs, GatewayPair{ + Source: entry.SourceCluster, + Target: target, + Bidirectional: true, + }) + } + } + return pairs +} + +func (s *DefaultTopologyService) resolveCustom(clusters []string, matrix []controllerv1alpha1.ConnectivityEntry) ([]GatewayPair, error) { + if len(matrix) == 0 { + return nil, fmt.Errorf("custom config with connectivity matrix required") + } + + clusterSet := s.toSet(clusters) + pairs := make([]GatewayPair, 0) + + for _, entry := range matrix { + if !clusterSet[entry.SourceCluster] { + return nil, fmt.Errorf("connectivity entry references unknown source cluster: %s", entry.SourceCluster) + } + for _, target := range entry.TargetClusters { + if !clusterSet[target] { + return nil, fmt.Errorf("connectivity entry references unknown target cluster: %s", target) + } + pairs = append(pairs, GatewayPair{ + Source: entry.SourceCluster, + Target: target, + Bidirectional: true, + }) + } + } + + return pairs, nil +} + +func (s *DefaultTopologyService) resolveAuto(clusters []string, policyNodes []string) ([]GatewayPair, error) { + allPairs, _ := s.resolveFullMesh(clusters) + + if len(policyNodes) == 0 { + return allPairs, nil + } + + forbidden := s.buildForbiddenSet(clusters, policyNodes) + return s.filterPairs(allPairs, forbidden), nil +} + +func (s *DefaultTopologyService) buildForbiddenSet(clusters []string, policyNodes []string) map[string]bool { + forbidden := make(map[string]bool) + for _, node := range policyNodes { + for _, cluster := range clusters { + if cluster != node { + forbidden[s.pairKey(node, cluster)] = true + } + } + } + return forbidden +} + +func (s *DefaultTopologyService) filterPairs(pairs []GatewayPair, forbidden map[string]bool) []GatewayPair { + filtered := make([]GatewayPair, 0, len(pairs)) + for _, p := range pairs { + if !forbidden[s.pairKey(p.Source, p.Target)] { + filtered = append(filtered, p) + } + } + return filtered +} + +func (s *DefaultTopologyService) toSet(items []string) map[string]bool { + set := make(map[string]bool, len(items)) + for _, item := range items { + set[item] = true + } + return set +} + +func (s *DefaultTopologyService) pairKey(a, b string) string { + if a < b { + return a + "-" + b + } + return b + "-" + a +} diff --git a/service/topology_service_test.go b/service/topology_service_test.go new file mode 100644 index 000000000..d3f73c0ce --- /dev/null +++ b/service/topology_service_test.go @@ -0,0 +1,229 @@ +package service + +import ( + "testing" + + controllerv1alpha1 "github.com/kubeslice/kubeslice-controller/apis/controller/v1alpha1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestResolveTopology_Legacy(t *testing.T) { + svc := NewTopologyService() + sc := &controllerv1alpha1.SliceConfig{ + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{"cluster-1", "cluster-2", "cluster-3"}, + }, + } + + pairs, err := svc.ResolveTopology(sc) + require.NoError(t, err) + assert.Len(t, pairs, 3) + assertContainsPair(t, pairs, "cluster-1", "cluster-2", true) + assertContainsPair(t, pairs, "cluster-1", "cluster-3", true) + assertContainsPair(t, pairs, "cluster-2", "cluster-3", true) +} + +func TestResolveFullMesh(t *testing.T) { + tests := []struct { + name string + clusters []string + expected int + }{ + {"2 clusters", []string{"c1", "c2"}, 1}, + {"3 clusters", []string{"c1", "c2", "c3"}, 3}, + {"4 clusters", []string{"c1", "c2", "c3", "c4"}, 6}, + {"1 cluster", []string{"c1"}, 0}, + {"0 clusters", []string{}, 0}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + svc := &DefaultTopologyService{} + pairs, err := svc.resolveFullMesh(tt.clusters) + require.NoError(t, err) + assert.Len(t, pairs, tt.expected) + for _, p := range pairs { + assert.True(t, p.Bidirectional) + } + }) + } +} + +func TestResolveHubSpoke_SingleHub(t *testing.T) { + svc := &DefaultTopologyService{} + clusters := []string{"hub1", "spoke1", "spoke2", "spoke3"} + cfg := &controllerv1alpha1.HubSpokeConfig{ + HubClusters: []string{"hub1"}, + } + + pairs, err := svc.resolveHubSpoke(clusters, cfg) + require.NoError(t, err) + assert.Len(t, pairs, 3) + assertContainsPair(t, pairs, "hub1", "spoke1", true) + assertContainsPair(t, pairs, "hub1", "spoke2", true) + assertContainsPair(t, pairs, "hub1", "spoke3", true) +} + +func TestResolveHubSpoke_MultipleHubs(t *testing.T) { + svc := &DefaultTopologyService{} + clusters := []string{"hub1", "hub2", "spoke1", "spoke2"} + cfg := &controllerv1alpha1.HubSpokeConfig{ + HubClusters: []string{"hub1", "hub2"}, + } + + pairs, err := svc.resolveHubSpoke(clusters, cfg) + require.NoError(t, err) + assert.Len(t, pairs, 4) + assertContainsPair(t, pairs, "hub1", "spoke1", true) + assertContainsPair(t, pairs, "hub1", "spoke2", true) + assertContainsPair(t, pairs, "hub2", "spoke1", true) + assertContainsPair(t, pairs, "hub2", "spoke2", true) +} + +func TestResolveHubSpoke_AllowSpokeToSpokeAll(t *testing.T) { + svc := &DefaultTopologyService{} + clusters := []string{"hub1", "spoke1", "spoke2", "spoke3"} + cfg := &controllerv1alpha1.HubSpokeConfig{ + HubClusters: []string{"hub1"}, + AllowSpokeToSpoke: true, + } + + pairs, err := svc.resolveHubSpoke(clusters, cfg) + require.NoError(t, err) + assert.Len(t, pairs, 6) + assertContainsPair(t, pairs, "spoke1", "spoke2", true) + assertContainsPair(t, pairs, "spoke1", "spoke3", true) + assertContainsPair(t, pairs, "spoke2", "spoke3", true) +} + +func TestResolveHubSpoke_SelectiveSpokeConnectivity(t *testing.T) { + svc := &DefaultTopologyService{} + clusters := []string{"hub1", "spoke1", "spoke2", "spoke3"} + cfg := &controllerv1alpha1.HubSpokeConfig{ + HubClusters: []string{"hub1"}, + AllowSpokeToSpoke: true, + SpokeConnectivity: []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "spoke1", TargetClusters: []string{"spoke2"}}, + }, + } + + pairs, err := svc.resolveHubSpoke(clusters, cfg) + require.NoError(t, err) + assert.Len(t, pairs, 4) + assertContainsPair(t, pairs, "hub1", "spoke1", true) + assertContainsPair(t, pairs, "hub1", "spoke2", true) + assertContainsPair(t, pairs, "hub1", "spoke3", true) + assertContainsPair(t, pairs, "spoke1", "spoke2", true) +} + +func TestResolveHubSpoke_NoHubError(t *testing.T) { + svc := &DefaultTopologyService{} + clusters := []string{"spoke1", "spoke2"} + cfg := &controllerv1alpha1.HubSpokeConfig{ + HubClusters: []string{}, + } + + _, err := svc.resolveHubSpoke(clusters, cfg) + require.Error(t, err) + assert.Contains(t, err.Error(), "at least one hub required") +} + +func TestResolveCustom(t *testing.T) { + svc := &DefaultTopologyService{} + clusters := []string{"c1", "c2", "c3"} + matrix := []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "c1", TargetClusters: []string{"c2"}}, + {SourceCluster: "c2", TargetClusters: []string{"c3"}}, + } + + pairs, err := svc.resolveCustom(clusters, matrix) + require.NoError(t, err) + assert.Len(t, pairs, 2) + assertContainsPair(t, pairs, "c1", "c2", true) + assertContainsPair(t, pairs, "c2", "c3", true) +} + +func TestResolveCustom_UnknownClusterError(t *testing.T) { + svc := &DefaultTopologyService{} + clusters := []string{"c1", "c2"} + matrix := []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "c1", TargetClusters: []string{"c99"}}, + } + + _, err := svc.resolveCustom(clusters, matrix) + require.Error(t, err) + assert.Contains(t, err.Error(), "unknown") +} + +func TestResolveAuto_NoForbidden(t *testing.T) { + svc := &DefaultTopologyService{} + clusters := []string{"c1", "c2", "c3"} + policyNodes := []string{} + + pairs, err := svc.resolveAuto(clusters, policyNodes) + require.NoError(t, err) + assert.Len(t, pairs, 3) + assertContainsPair(t, pairs, "c1", "c2", true) + assertContainsPair(t, pairs, "c1", "c3", true) + assertContainsPair(t, pairs, "c2", "c3", true) +} + +func TestResolveAuto_WithForbiddenEdges(t *testing.T) { + svc := &DefaultTopologyService{} + clusters := []string{"c1", "c2", "c3", "c4"} + policyNodes := []string{"c1"} + + pairs, err := svc.resolveAuto(clusters, policyNodes) + require.NoError(t, err) + assert.Len(t, pairs, 3) + assertContainsPair(t, pairs, "c2", "c3", true) + assertContainsPair(t, pairs, "c2", "c4", true) + assertContainsPair(t, pairs, "c3", "c4", true) + assertNotContainsPair(t, pairs, "c1", "c2") + assertNotContainsPair(t, pairs, "c1", "c3") + assertNotContainsPair(t, pairs, "c1", "c4") +} + +func TestResolveAuto_MultipleForbiddenPolicies(t *testing.T) { + svc := &DefaultTopologyService{} + clusters := []string{"c1", "c2", "c3", "c4"} + policyNodes := []string{"c1", "c2"} + + pairs, err := svc.resolveAuto(clusters, policyNodes) + require.NoError(t, err) + assert.Len(t, pairs, 1) + assertContainsPair(t, pairs, "c3", "c4", true) + assertNotContainsPair(t, pairs, "c1", "c2") + assertNotContainsPair(t, pairs, "c1", "c3") + assertNotContainsPair(t, pairs, "c1", "c4") + assertNotContainsPair(t, pairs, "c2", "c3") + assertNotContainsPair(t, pairs, "c2", "c4") +} + +func TestPairKey(t *testing.T) { + svc := &DefaultTopologyService{} + assert.Equal(t, "a-b", svc.pairKey("a", "b")) + assert.Equal(t, "a-b", svc.pairKey("b", "a")) + assert.Equal(t, "cluster-1-cluster-2", svc.pairKey("cluster-1", "cluster-2")) + assert.Equal(t, "cluster-1-cluster-2", svc.pairKey("cluster-2", "cluster-1")) +} + +func assertContainsPair(t *testing.T, pairs []GatewayPair, source, target string, bidirectional bool) { + for _, p := range pairs { + if (p.Source == source && p.Target == target) || (p.Source == target && p.Target == source) { + if p.Bidirectional == bidirectional { + return + } + } + } + t.Errorf("Expected to find pair %s <-> %s (bidirectional=%v)", source, target, bidirectional) +} + +func assertNotContainsPair(t *testing.T, pairs []GatewayPair, source, target string) { + for _, p := range pairs { + if (p.Source == source && p.Target == target) || (p.Source == target && p.Target == source) { + t.Errorf("Expected NOT to find pair %s <-> %s", source, target) + } + } +} From a9622cf42c313b6829fb9107e84b1b2309342e9d Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Wed, 15 Oct 2025 02:46:55 +0000 Subject: [PATCH 04/34] fix: removed partial mesh as auto and custom already implement it's utility Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- apis/controller/v1alpha1/sliceconfig_types.go | 11 ++- .../controller.kubeslice.io_sliceconfigs.yaml | 1 - service/topology_service_test.go | 85 +++++++++++++++++++ service/topology_validation.go | 2 +- 4 files changed, 91 insertions(+), 8 deletions(-) diff --git a/apis/controller/v1alpha1/sliceconfig_types.go b/apis/controller/v1alpha1/sliceconfig_types.go index 32c239c2e..8689bddcb 100644 --- a/apis/controller/v1alpha1/sliceconfig_types.go +++ b/apis/controller/v1alpha1/sliceconfig_types.go @@ -176,15 +176,14 @@ type VPNConfiguration struct { Cipher string `json:"cipher"` } -// +kubebuilder:validation:Enum:=auto;full-mesh;hub-spoke;partial-mesh;custom +// +kubebuilder:validation:Enum:=auto;full-mesh;hub-spoke;custom type TopologyType string const ( - TopologyAuto TopologyType = "auto" - TopologyFullMesh TopologyType = "full-mesh" - TopologyHubSpoke TopologyType = "hub-spoke" - TopologyPartialMesh TopologyType = "partial-mesh" - TopologyCustom TopologyType = "custom" + TopologyAuto TopologyType = "auto" + TopologyFullMesh TopologyType = "full-mesh" + TopologyHubSpoke TopologyType = "hub-spoke" + TopologyCustom TopologyType = "custom" ) type TopologyConfig struct { diff --git a/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml b/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml index 5d3fb46a1..d7623c62c 100644 --- a/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml +++ b/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml @@ -322,7 +322,6 @@ spec: - auto - full-mesh - hub-spoke - - partial-mesh - custom type: string type: object diff --git a/service/topology_service_test.go b/service/topology_service_test.go index d3f73c0ce..e379f5099 100644 --- a/service/topology_service_test.go +++ b/service/topology_service_test.go @@ -227,3 +227,88 @@ func assertNotContainsPair(t *testing.T, pairs []GatewayPair, source, target str } } } + +func TestResolveTopology_UnknownType(t *testing.T) { + svc := NewTopologyService() + sc := &controllerv1alpha1.SliceConfig{ + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{"c1", "c2"}, + TopologyConfig: &controllerv1alpha1.TopologyConfig{ + TopologyType: "invalid-topology-type", + }, + }, + } + + _, err := svc.ResolveTopology(sc) + require.Error(t, err) + assert.Contains(t, err.Error(), "unknown topology type") +} + +func TestResolveTopology_EmptyClusters(t *testing.T) { + svc := NewTopologyService() + sc := &controllerv1alpha1.SliceConfig{ + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{}, + }, + } + + pairs, err := svc.ResolveTopology(sc) + require.NoError(t, err) + assert.Empty(t, pairs) +} + +func TestResolveTopology_SingleCluster(t *testing.T) { + svc := NewTopologyService() + sc := &controllerv1alpha1.SliceConfig{ + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{"only-cluster"}, + }, + } + + pairs, err := svc.ResolveTopology(sc) + require.NoError(t, err) + assert.Empty(t, pairs) +} + +func TestResolveCustom_EmptyMatrix(t *testing.T) { + svc := &DefaultTopologyService{} + clusters := []string{"c1", "c2"} + matrix := []controllerv1alpha1.ConnectivityEntry{} + + _, err := svc.resolveCustom(clusters, matrix) + require.Error(t, err) + assert.Contains(t, err.Error(), "connectivity matrix required") +} + +func TestResolveHubSpoke_NilConfig(t *testing.T) { + svc := &DefaultTopologyService{} + clusters := []string{"c1", "c2"} + + _, err := svc.resolveHubSpoke(clusters, nil) + require.Error(t, err) + assert.Contains(t, err.Error(), "hub-spoke config required") +} + +func TestResolveAuto_AllPolicyNodes(t *testing.T) { + svc := &DefaultTopologyService{} + clusters := []string{"c1", "c2", "c3"} + policyNodes := []string{"c1", "c2", "c3"} + + pairs, err := svc.resolveAuto(clusters, policyNodes) + require.NoError(t, err) + assert.Empty(t, pairs, "All edges should be forbidden when all clusters are policy nodes") +} + +func TestResolveCustom_DuplicateConnections(t *testing.T) { + svc := &DefaultTopologyService{} + clusters := []string{"c1", "c2", "c3"} + matrix := []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "c1", TargetClusters: []string{"c2"}}, + {SourceCluster: "c2", TargetClusters: []string{"c1"}}, + } + + pairs, err := svc.resolveCustom(clusters, matrix) + require.NoError(t, err) + assert.Len(t, pairs, 2, "Should allow bidirectional explicit connections") +} + diff --git a/service/topology_validation.go b/service/topology_validation.go index 83b47a886..b336b83c7 100644 --- a/service/topology_validation.go +++ b/service/topology_validation.go @@ -31,7 +31,7 @@ func (v *TopologyValidator) ValidateTopologyConfig(topology *controllerv1alpha1. if err := v.validateAuto(topology, clusterSet); err != nil { return err } - case controllerv1alpha1.TopologyFullMesh, controllerv1alpha1.TopologyPartialMesh: + case controllerv1alpha1.TopologyFullMesh: case "": default: return fmt.Errorf("invalid topology type: %s", topology.TopologyType) From 0aca3ab5ee6aae9b4ab4e92788002dcc96839952 Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Wed, 22 Oct 2025 10:46:28 +0000 Subject: [PATCH 05/34] feat(topology): add topology service and validation (auto/full-mesh/custom) Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- apis/controller/v1alpha1/sliceconfig_types.go | 12 +- .../v1alpha1/zz_generated.deepcopy.go | 37 -- .../controller.kubeslice.io_sliceconfigs.yaml | 30 -- service/topology_service.go | 181 +++++----- service/topology_service_test.go | 324 ++++-------------- service/topology_status.go | 0 service/topology_validation.go | 46 --- 7 files changed, 175 insertions(+), 455 deletions(-) create mode 100644 service/topology_status.go diff --git a/apis/controller/v1alpha1/sliceconfig_types.go b/apis/controller/v1alpha1/sliceconfig_types.go index 8689bddcb..898c89113 100644 --- a/apis/controller/v1alpha1/sliceconfig_types.go +++ b/apis/controller/v1alpha1/sliceconfig_types.go @@ -176,34 +176,24 @@ type VPNConfiguration struct { Cipher string `json:"cipher"` } -// +kubebuilder:validation:Enum:=auto;full-mesh;hub-spoke;custom +// +kubebuilder:validation:Enum:=auto;full-mesh;custom type TopologyType string const ( TopologyAuto TopologyType = "auto" TopologyFullMesh TopologyType = "full-mesh" - TopologyHubSpoke TopologyType = "hub-spoke" TopologyCustom TopologyType = "custom" ) type TopologyConfig struct { //+kubebuilder:default:=auto TopologyType TopologyType `json:"topologyType,omitempty"` - HubSpoke *HubSpokeConfig `json:"hubSpoke,omitempty"` ConnectivityMatrix []ConnectivityEntry `json:"connectivityMatrix,omitempty"` ClusterRoles []ClusterRole `json:"clusterRoles,omitempty"` PolicyNodes []string `json:"policyNodes,omitempty"` AutoOptions *AutoTopologyOptions `json:"autoOptions,omitempty"` } -type HubSpokeConfig struct { - //+kubebuilder:validation:Required - HubClusters []string `json:"hubClusters"` - SpokeClusters []string `json:"spokeClusters,omitempty"` - AllowSpokeToSpoke bool `json:"allowSpokeToSpoke,omitempty"` - SpokeConnectivity []ConnectivityEntry `json:"spokeConnectivity,omitempty"` -} - type ConnectivityEntry struct { //+kubebuilder:validation:Required SourceCluster string `json:"sourceCluster"` diff --git a/apis/controller/v1alpha1/zz_generated.deepcopy.go b/apis/controller/v1alpha1/zz_generated.deepcopy.go index 2a394694d..d75b0a229 100644 --- a/apis/controller/v1alpha1/zz_generated.deepcopy.go +++ b/apis/controller/v1alpha1/zz_generated.deepcopy.go @@ -323,38 +323,6 @@ func (in *GeoLocation) DeepCopy() *GeoLocation { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HubSpokeConfig) DeepCopyInto(out *HubSpokeConfig) { - *out = *in - if in.HubClusters != nil { - in, out := &in.HubClusters, &out.HubClusters - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SpokeClusters != nil { - in, out := &in.SpokeClusters, &out.SpokeClusters - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SpokeConnectivity != nil { - in, out := &in.SpokeConnectivity, &out.SpokeConnectivity - *out = make([]ConnectivityEntry, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSpokeConfig. -func (in *HubSpokeConfig) DeepCopy() *HubSpokeConfig { - if in == nil { - return nil - } - out := new(HubSpokeConfig) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubernetesDashboard) DeepCopyInto(out *KubernetesDashboard) { *out = *in @@ -1023,11 +991,6 @@ func (in *Telemetry) DeepCopy() *Telemetry { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TopologyConfig) DeepCopyInto(out *TopologyConfig) { *out = *in - if in.HubSpoke != nil { - in, out := &in.HubSpoke, &out.HubSpoke - *out = new(HubSpokeConfig) - (*in).DeepCopyInto(*out) - } if in.ConnectivityMatrix != nil { in, out := &in.ConnectivityMatrix, &out.ConnectivityMatrix *out = make([]ConnectivityEntry, len(*in)) diff --git a/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml b/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml index d7623c62c..055027235 100644 --- a/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml +++ b/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml @@ -283,35 +283,6 @@ spec: - targetClusters type: object type: array - hubSpoke: - properties: - allowSpokeToSpoke: - type: boolean - hubClusters: - items: - type: string - type: array - spokeClusters: - items: - type: string - type: array - spokeConnectivity: - items: - properties: - sourceCluster: - type: string - targetClusters: - items: - type: string - type: array - required: - - sourceCluster - - targetClusters - type: object - type: array - required: - - hubClusters - type: object policyNodes: items: type: string @@ -321,7 +292,6 @@ spec: enum: - auto - full-mesh - - hub-spoke - custom type: string type: object diff --git a/service/topology_service.go b/service/topology_service.go index a5589b59f..ca72eeb6c 100644 --- a/service/topology_service.go +++ b/service/topology_service.go @@ -29,8 +29,6 @@ func (s *DefaultTopologyService) ResolveTopology(sc *controllerv1alpha1.SliceCon switch sc.Spec.TopologyConfig.TopologyType { case controllerv1alpha1.TopologyFullMesh, "": return s.resolveFullMesh(sc.Spec.Clusters) - case controllerv1alpha1.TopologyHubSpoke: - return s.resolveHubSpoke(sc.Spec.Clusters, sc.Spec.TopologyConfig.HubSpoke) case controllerv1alpha1.TopologyCustom: return s.resolveCustom(sc.Spec.Clusters, sc.Spec.TopologyConfig.ConnectivityMatrix) case controllerv1alpha1.TopologyAuto: @@ -58,85 +56,9 @@ func (s *DefaultTopologyService) resolveFullMesh(clusters []string) ([]GatewayPa return pairs, nil } -func (s *DefaultTopologyService) resolveHubSpoke(clusters []string, cfg *controllerv1alpha1.HubSpokeConfig) ([]GatewayPair, error) { - if cfg == nil { - return nil, fmt.Errorf("hub-spoke config required") - } - - hubs := cfg.HubClusters - spokes := cfg.SpokeClusters - if len(spokes) == 0 { - spokes = s.getSpokes(clusters, hubs) - } - - if len(hubs) == 0 { - return nil, fmt.Errorf("at least one hub required") - } - - pairs := make([]GatewayPair, 0, len(hubs)*len(spokes)) - for _, hub := range hubs { - for _, spoke := range spokes { - pairs = append(pairs, GatewayPair{ - Source: hub, - Target: spoke, - Bidirectional: true, - }) - } - } - - if cfg.AllowSpokeToSpoke { - pairs = append(pairs, s.resolveSpokePairs(spokes, cfg.SpokeConnectivity)...) - } - - return pairs, nil -} - -func (s *DefaultTopologyService) getSpokes(clusters []string, hubs []string) []string { - hubSet := s.toSet(hubs) - spokes := make([]string, 0, len(clusters)) - for _, cluster := range clusters { - if !hubSet[cluster] { - spokes = append(spokes, cluster) - } - } - return spokes -} - -func (s *DefaultTopologyService) resolveSpokePairs(spokes []string, connectivity []controllerv1alpha1.ConnectivityEntry) []GatewayPair { - if len(connectivity) > 0 { - return s.resolveSelectiveSpokes(connectivity) - } - - pairs := make([]GatewayPair, 0, len(spokes)*(len(spokes)-1)/2) - for i := 0; i < len(spokes); i++ { - for j := i + 1; j < len(spokes); j++ { - pairs = append(pairs, GatewayPair{ - Source: spokes[i], - Target: spokes[j], - Bidirectional: true, - }) - } - } - return pairs -} - -func (s *DefaultTopologyService) resolveSelectiveSpokes(entries []controllerv1alpha1.ConnectivityEntry) []GatewayPair { - pairs := make([]GatewayPair, 0) - for _, entry := range entries { - for _, target := range entry.TargetClusters { - pairs = append(pairs, GatewayPair{ - Source: entry.SourceCluster, - Target: target, - Bidirectional: true, - }) - } - } - return pairs -} - func (s *DefaultTopologyService) resolveCustom(clusters []string, matrix []controllerv1alpha1.ConnectivityEntry) ([]GatewayPair, error) { if len(matrix) == 0 { - return nil, fmt.Errorf("custom config with connectivity matrix required") + return nil, fmt.Errorf("custom topology requires connectivity matrix") } clusterSet := s.toSet(clusters) @@ -169,7 +91,106 @@ func (s *DefaultTopologyService) resolveAuto(clusters []string, policyNodes []st } forbidden := s.buildForbiddenSet(clusters, policyNodes) - return s.filterPairs(allPairs, forbidden), nil + filtered := s.filterPairs(allPairs, forbidden) + + preservedPairs, err := s.ensureConnectivity(clusters, filtered, forbidden) + if err != nil { + return nil, err + } + + return preservedPairs, nil +} + +func (s *DefaultTopologyService) ensureConnectivity(clusters []string, pairs []GatewayPair, forbidden map[string]bool) ([]GatewayPair, error) { + graph := s.buildGraph(pairs) + components := s.findComponents(clusters, graph) + + if len(components) <= 1 { + return pairs, nil + } + + bridgeEdges := s.findBridges(clusters, components, forbidden) + if len(bridgeEdges) == 0 { + return nil, fmt.Errorf("policy nodes create partitioned topology with no safe bridge edges available") + } + + for _, bridge := range bridgeEdges { + pairs = append(pairs, bridge) + } + + return pairs, nil +} + +func (s *DefaultTopologyService) buildGraph(pairs []GatewayPair) map[string][]string { + graph := make(map[string][]string) + for _, p := range pairs { + graph[p.Source] = append(graph[p.Source], p.Target) + graph[p.Target] = append(graph[p.Target], p.Source) + } + return graph +} + +func (s *DefaultTopologyService) findComponents(clusters []string, graph map[string][]string) [][]string { + visited := make(map[string]bool) + components := make([][]string, 0) + + for _, cluster := range clusters { + if !visited[cluster] { + component := s.dfs(cluster, graph, visited) + components = append(components, component) + } + } + + return components +} + +func (s *DefaultTopologyService) dfs(node string, graph map[string][]string, visited map[string]bool) []string { + visited[node] = true + component := []string{node} + + for _, neighbor := range graph[node] { + if !visited[neighbor] { + component = append(component, s.dfs(neighbor, graph, visited)...) + } + } + + return component +} + +func (s *DefaultTopologyService) findBridges(clusters []string, components [][]string, forbidden map[string]bool) []GatewayPair { + bridges := make([]GatewayPair, 0) + componentMap := make(map[string]int) + + for i, comp := range components { + for _, node := range comp { + componentMap[node] = i + } + } + + for i := 0; i < len(components); i++ { + for j := i + 1; j < len(components); j++ { + added := false + for _, ni := range components[i] { + if added { + break + } + for _, nj := range components[j] { + key := s.pairKey(ni, nj) + if !forbidden[key] { + bridges = append(bridges, GatewayPair{ + Source: ni, + Target: nj, + Bidirectional: true, + }) + added = true + break + } + } + } + } + } + + return bridges } func (s *DefaultTopologyService) buildForbiddenSet(clusters []string, policyNodes []string) map[string]bool { diff --git a/service/topology_service_test.go b/service/topology_service_test.go index e379f5099..faf3b8f57 100644 --- a/service/topology_service_test.go +++ b/service/topology_service_test.go @@ -8,307 +8,129 @@ import ( "github.com/stretchr/testify/require" ) -func TestResolveTopology_Legacy(t *testing.T) { +func TestResolveTopologyDefaultsToFullMesh(t *testing.T) { svc := NewTopologyService() sc := &controllerv1alpha1.SliceConfig{ Spec: controllerv1alpha1.SliceConfigSpec{ - Clusters: []string{"cluster-1", "cluster-2", "cluster-3"}, + Clusters: []string{"alpha", "beta", "gamma"}, }, } pairs, err := svc.ResolveTopology(sc) require.NoError(t, err) assert.Len(t, pairs, 3) - assertContainsPair(t, pairs, "cluster-1", "cluster-2", true) - assertContainsPair(t, pairs, "cluster-1", "cluster-3", true) - assertContainsPair(t, pairs, "cluster-2", "cluster-3", true) + assertContainsPair(t, pairs, "alpha", "beta") + assertContainsPair(t, pairs, "alpha", "gamma") + assertContainsPair(t, pairs, "beta", "gamma") } -func TestResolveFullMesh(t *testing.T) { - tests := []struct { - name string - clusters []string - expected int - }{ - {"2 clusters", []string{"c1", "c2"}, 1}, - {"3 clusters", []string{"c1", "c2", "c3"}, 3}, - {"4 clusters", []string{"c1", "c2", "c3", "c4"}, 6}, - {"1 cluster", []string{"c1"}, 0}, - {"0 clusters", []string{}, 0}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - svc := &DefaultTopologyService{} - pairs, err := svc.resolveFullMesh(tt.clusters) - require.NoError(t, err) - assert.Len(t, pairs, tt.expected) - for _, p := range pairs { - assert.True(t, p.Bidirectional) - } - }) - } -} - -func TestResolveHubSpoke_SingleHub(t *testing.T) { - svc := &DefaultTopologyService{} - clusters := []string{"hub1", "spoke1", "spoke2", "spoke3"} - cfg := &controllerv1alpha1.HubSpokeConfig{ - HubClusters: []string{"hub1"}, - } - - pairs, err := svc.resolveHubSpoke(clusters, cfg) - require.NoError(t, err) - assert.Len(t, pairs, 3) - assertContainsPair(t, pairs, "hub1", "spoke1", true) - assertContainsPair(t, pairs, "hub1", "spoke2", true) - assertContainsPair(t, pairs, "hub1", "spoke3", true) -} - -func TestResolveHubSpoke_MultipleHubs(t *testing.T) { - svc := &DefaultTopologyService{} - clusters := []string{"hub1", "hub2", "spoke1", "spoke2"} - cfg := &controllerv1alpha1.HubSpokeConfig{ - HubClusters: []string{"hub1", "hub2"}, - } - - pairs, err := svc.resolveHubSpoke(clusters, cfg) - require.NoError(t, err) - assert.Len(t, pairs, 4) - assertContainsPair(t, pairs, "hub1", "spoke1", true) - assertContainsPair(t, pairs, "hub1", "spoke2", true) - assertContainsPair(t, pairs, "hub2", "spoke1", true) - assertContainsPair(t, pairs, "hub2", "spoke2", true) -} - -func TestResolveHubSpoke_AllowSpokeToSpokeAll(t *testing.T) { - svc := &DefaultTopologyService{} - clusters := []string{"hub1", "spoke1", "spoke2", "spoke3"} - cfg := &controllerv1alpha1.HubSpokeConfig{ - HubClusters: []string{"hub1"}, - AllowSpokeToSpoke: true, - } - - pairs, err := svc.resolveHubSpoke(clusters, cfg) - require.NoError(t, err) - assert.Len(t, pairs, 6) - assertContainsPair(t, pairs, "spoke1", "spoke2", true) - assertContainsPair(t, pairs, "spoke1", "spoke3", true) - assertContainsPair(t, pairs, "spoke2", "spoke3", true) -} - -func TestResolveHubSpoke_SelectiveSpokeConnectivity(t *testing.T) { - svc := &DefaultTopologyService{} - clusters := []string{"hub1", "spoke1", "spoke2", "spoke3"} - cfg := &controllerv1alpha1.HubSpokeConfig{ - HubClusters: []string{"hub1"}, - AllowSpokeToSpoke: true, - SpokeConnectivity: []controllerv1alpha1.ConnectivityEntry{ - {SourceCluster: "spoke1", TargetClusters: []string{"spoke2"}}, - }, - } - - pairs, err := svc.resolveHubSpoke(clusters, cfg) - require.NoError(t, err) - assert.Len(t, pairs, 4) - assertContainsPair(t, pairs, "hub1", "spoke1", true) - assertContainsPair(t, pairs, "hub1", "spoke2", true) - assertContainsPair(t, pairs, "hub1", "spoke3", true) - assertContainsPair(t, pairs, "spoke1", "spoke2", true) -} - -func TestResolveHubSpoke_NoHubError(t *testing.T) { - svc := &DefaultTopologyService{} - clusters := []string{"spoke1", "spoke2"} - cfg := &controllerv1alpha1.HubSpokeConfig{ - HubClusters: []string{}, - } - - _, err := svc.resolveHubSpoke(clusters, cfg) - require.Error(t, err) - assert.Contains(t, err.Error(), "at least one hub required") -} - -func TestResolveCustom(t *testing.T) { - svc := &DefaultTopologyService{} - clusters := []string{"c1", "c2", "c3"} - matrix := []controllerv1alpha1.ConnectivityEntry{ - {SourceCluster: "c1", TargetClusters: []string{"c2"}}, - {SourceCluster: "c2", TargetClusters: []string{"c3"}}, - } - - pairs, err := svc.resolveCustom(clusters, matrix) - require.NoError(t, err) - assert.Len(t, pairs, 2) - assertContainsPair(t, pairs, "c1", "c2", true) - assertContainsPair(t, pairs, "c2", "c3", true) -} - -func TestResolveCustom_UnknownClusterError(t *testing.T) { - svc := &DefaultTopologyService{} - clusters := []string{"c1", "c2"} - matrix := []controllerv1alpha1.ConnectivityEntry{ - {SourceCluster: "c1", TargetClusters: []string{"c99"}}, - } - - _, err := svc.resolveCustom(clusters, matrix) - require.Error(t, err) - assert.Contains(t, err.Error(), "unknown") -} - -func TestResolveAuto_NoForbidden(t *testing.T) { - svc := &DefaultTopologyService{} - clusters := []string{"c1", "c2", "c3"} - policyNodes := []string{} - - pairs, err := svc.resolveAuto(clusters, policyNodes) - require.NoError(t, err) - assert.Len(t, pairs, 3) - assertContainsPair(t, pairs, "c1", "c2", true) - assertContainsPair(t, pairs, "c1", "c3", true) - assertContainsPair(t, pairs, "c2", "c3", true) -} - -func TestResolveAuto_WithForbiddenEdges(t *testing.T) { - svc := &DefaultTopologyService{} - clusters := []string{"c1", "c2", "c3", "c4"} - policyNodes := []string{"c1"} - - pairs, err := svc.resolveAuto(clusters, policyNodes) - require.NoError(t, err) - assert.Len(t, pairs, 3) - assertContainsPair(t, pairs, "c2", "c3", true) - assertContainsPair(t, pairs, "c2", "c4", true) - assertContainsPair(t, pairs, "c3", "c4", true) - assertNotContainsPair(t, pairs, "c1", "c2") - assertNotContainsPair(t, pairs, "c1", "c3") - assertNotContainsPair(t, pairs, "c1", "c4") -} - -func TestResolveAuto_MultipleForbiddenPolicies(t *testing.T) { - svc := &DefaultTopologyService{} - clusters := []string{"c1", "c2", "c3", "c4"} - policyNodes := []string{"c1", "c2"} - - pairs, err := svc.resolveAuto(clusters, policyNodes) - require.NoError(t, err) - assert.Len(t, pairs, 1) - assertContainsPair(t, pairs, "c3", "c4", true) - assertNotContainsPair(t, pairs, "c1", "c2") - assertNotContainsPair(t, pairs, "c1", "c3") - assertNotContainsPair(t, pairs, "c1", "c4") - assertNotContainsPair(t, pairs, "c2", "c3") - assertNotContainsPair(t, pairs, "c2", "c4") -} - -func TestPairKey(t *testing.T) { - svc := &DefaultTopologyService{} - assert.Equal(t, "a-b", svc.pairKey("a", "b")) - assert.Equal(t, "a-b", svc.pairKey("b", "a")) - assert.Equal(t, "cluster-1-cluster-2", svc.pairKey("cluster-1", "cluster-2")) - assert.Equal(t, "cluster-1-cluster-2", svc.pairKey("cluster-2", "cluster-1")) -} - -func assertContainsPair(t *testing.T, pairs []GatewayPair, source, target string, bidirectional bool) { - for _, p := range pairs { - if (p.Source == source && p.Target == target) || (p.Source == target && p.Target == source) { - if p.Bidirectional == bidirectional { - return - } - } - } - t.Errorf("Expected to find pair %s <-> %s (bidirectional=%v)", source, target, bidirectional) -} - -func assertNotContainsPair(t *testing.T, pairs []GatewayPair, source, target string) { - for _, p := range pairs { - if (p.Source == source && p.Target == target) || (p.Source == target && p.Target == source) { - t.Errorf("Expected NOT to find pair %s <-> %s", source, target) - } - } -} - -func TestResolveTopology_UnknownType(t *testing.T) { +func TestResolveTopologyFullMeshExplicit(t *testing.T) { svc := NewTopologyService() sc := &controllerv1alpha1.SliceConfig{ Spec: controllerv1alpha1.SliceConfigSpec{ - Clusters: []string{"c1", "c2"}, + Clusters: []string{"c1", "c2", "c3", "c4"}, TopologyConfig: &controllerv1alpha1.TopologyConfig{ - TopologyType: "invalid-topology-type", + TopologyType: controllerv1alpha1.TopologyFullMesh, }, }, } - _, err := svc.ResolveTopology(sc) - require.Error(t, err) - assert.Contains(t, err.Error(), "unknown topology type") + pairs, err := svc.ResolveTopology(sc) + require.NoError(t, err) + assert.Len(t, pairs, 6) + assertContainsPair(t, pairs, "c1", "c4") + assertContainsPair(t, pairs, "c2", "c3") } -func TestResolveTopology_EmptyClusters(t *testing.T) { +func TestResolveTopologyCustomMatrix(t *testing.T) { svc := NewTopologyService() sc := &controllerv1alpha1.SliceConfig{ Spec: controllerv1alpha1.SliceConfigSpec{ - Clusters: []string{}, + Clusters: []string{"dmz", "gateway", "internal"}, + TopologyConfig: &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyCustom, + ConnectivityMatrix: []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "dmz", TargetClusters: []string{"gateway"}}, + {SourceCluster: "gateway", TargetClusters: []string{"internal"}}, + }, + }, }, } pairs, err := svc.ResolveTopology(sc) require.NoError(t, err) - assert.Empty(t, pairs) + assert.Len(t, pairs, 2) + assertContainsPair(t, pairs, "dmz", "gateway") + assertContainsPair(t, pairs, "gateway", "internal") } -func TestResolveTopology_SingleCluster(t *testing.T) { +func TestResolveTopologyAutoPolicyNodesReturnsError(t *testing.T) { svc := NewTopologyService() sc := &controllerv1alpha1.SliceConfig{ Spec: controllerv1alpha1.SliceConfigSpec{ - Clusters: []string{"only-cluster"}, + Clusters: []string{"gateway", "dmz", "internal", "analytics"}, + TopologyConfig: &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyAuto, + PolicyNodes: []string{"dmz", "analytics"}, + }, }, } - pairs, err := svc.ResolveTopology(sc) - require.NoError(t, err) - assert.Empty(t, pairs) + _, err := svc.ResolveTopology(sc) + require.Error(t, err) + assert.Contains(t, err.Error(), "partitioned topology") } -func TestResolveCustom_EmptyMatrix(t *testing.T) { +func TestEnsureConnectivityAddsBridgeEdge(t *testing.T) { svc := &DefaultTopologyService{} - clusters := []string{"c1", "c2"} - matrix := []controllerv1alpha1.ConnectivityEntry{} + clusters := []string{"a", "b", "c"} + pairs := []GatewayPair{{Source: "a", Target: "b", Bidirectional: true}} - _, err := svc.resolveCustom(clusters, matrix) - require.Error(t, err) - assert.Contains(t, err.Error(), "connectivity matrix required") + bridged, err := svc.ensureConnectivity(clusters, pairs, map[string]bool{}) + require.NoError(t, err) + assert.Len(t, bridged, 2) + // ensure new edge connects the previously isolated node + assertContainsPair(t, bridged, "a", "b") + assert.Contains(t, endpointPartners(bridged, "c"), "a") } -func TestResolveHubSpoke_NilConfig(t *testing.T) { +func TestResolveTopologyAutoAllPolicyNodes(t *testing.T) { svc := &DefaultTopologyService{} - clusters := []string{"c1", "c2"} + clusters := []string{"one", "two"} + policyNodes := []string{"one", "two"} - _, err := svc.resolveHubSpoke(clusters, nil) + _, err := svc.resolveAuto(clusters, policyNodes) require.Error(t, err) - assert.Contains(t, err.Error(), "hub-spoke config required") + assert.Contains(t, err.Error(), "partitioned topology") } -func TestResolveAuto_AllPolicyNodes(t *testing.T) { - svc := &DefaultTopologyService{} - clusters := []string{"c1", "c2", "c3"} - policyNodes := []string{"c1", "c2", "c3"} - - pairs, err := svc.resolveAuto(clusters, policyNodes) - require.NoError(t, err) - assert.Empty(t, pairs, "All edges should be forbidden when all clusters are policy nodes") +func endpointPartners(pairs []GatewayPair, node string) []string { + partners := make([]string, 0) + for _, p := range pairs { + if p.Source == node { + partners = append(partners, p.Target) + } else if p.Target == node { + partners = append(partners, p.Source) + } + } + return partners } -func TestResolveCustom_DuplicateConnections(t *testing.T) { - svc := &DefaultTopologyService{} - clusters := []string{"c1", "c2", "c3"} - matrix := []controllerv1alpha1.ConnectivityEntry{ - {SourceCluster: "c1", TargetClusters: []string{"c2"}}, - {SourceCluster: "c2", TargetClusters: []string{"c1"}}, +func assertContainsPair(t *testing.T, pairs []GatewayPair, source, target string) { + t.Helper() + for _, p := range pairs { + if (p.Source == source && p.Target == target) || (p.Source == target && p.Target == source) { + return + } } - - pairs, err := svc.resolveCustom(clusters, matrix) - require.NoError(t, err) - assert.Len(t, pairs, 2, "Should allow bidirectional explicit connections") + t.Fatalf("expected to find pair %s-%s", source, target) } +func assertNotContainsPair(t *testing.T, pairs []GatewayPair, source, target string) { + t.Helper() + for _, p := range pairs { + if (p.Source == source && p.Target == target) || (p.Source == target && p.Target == source) { + t.Fatalf("did not expect to find pair %s-%s", source, target) + } + } +} diff --git a/service/topology_status.go b/service/topology_status.go new file mode 100644 index 000000000..e69de29bb diff --git a/service/topology_validation.go b/service/topology_validation.go index b336b83c7..6d29527ec 100644 --- a/service/topology_validation.go +++ b/service/topology_validation.go @@ -19,10 +19,6 @@ func (v *TopologyValidator) ValidateTopologyConfig(topology *controllerv1alpha1. clusterSet := toSet(clusters) switch topology.TopologyType { - case controllerv1alpha1.TopologyHubSpoke: - if err := v.validateHubSpoke(topology.HubSpoke, clusterSet); err != nil { - return err - } case controllerv1alpha1.TopologyCustom: if err := v.validateCustom(topology.ConnectivityMatrix, clusterSet); err != nil { return err @@ -44,48 +40,6 @@ func (v *TopologyValidator) ValidateTopologyConfig(topology *controllerv1alpha1. return v.validatePolicyNodes(topology.PolicyNodes, clusterSet) } -func (v *TopologyValidator) validateHubSpoke(config *controllerv1alpha1.HubSpokeConfig, clusterSet map[string]struct{}) error { - if config == nil { - return fmt.Errorf("hubSpoke config required for hub-spoke topology") - } - - if len(config.HubClusters) == 0 { - return fmt.Errorf("at least one hub cluster required") - } - - for _, hub := range config.HubClusters { - if _, exists := clusterSet[hub]; !exists { - return fmt.Errorf("hub cluster %s not in spec.clusters", hub) - } - } - - hubSet := toSet(config.HubClusters) - for _, spoke := range config.SpokeClusters { - if _, exists := clusterSet[spoke]; !exists { - return fmt.Errorf("spoke cluster %s not in spec.clusters", spoke) - } - if _, isHub := hubSet[spoke]; isHub { - return fmt.Errorf("cluster %s cannot be both hub and spoke", spoke) - } - } - - if config.SpokeConnectivity != nil { - spokeSet := toSet(config.SpokeClusters) - for _, entry := range config.SpokeConnectivity { - if _, exists := spokeSet[entry.SourceCluster]; !exists { - return fmt.Errorf("spokeConnectivity source %s not a spoke cluster", entry.SourceCluster) - } - for _, target := range entry.TargetClusters { - if _, exists := spokeSet[target]; !exists { - return fmt.Errorf("spokeConnectivity target %s not a spoke cluster", target) - } - } - } - } - - return nil -} - func (v *TopologyValidator) validateCustom(matrix []controllerv1alpha1.ConnectivityEntry, clusterSet map[string]struct{}) error { if len(matrix) == 0 { return fmt.Errorf("connectivityMatrix required for custom topology") From af6c1eeb55a92d0bdc815c43a11ea4f01f493a78 Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Mon, 27 Oct 2025 12:30:11 +0000 Subject: [PATCH 06/34] feat: Replace ClusterRole/PolicyNodes with ForbiddenEdges API Replace deprecated PolicyNodes/ClusterRoles API with ForbiddenEdges for explicit edge blacklisting in topology configuration. Changes: - Remove ClusterRole struct (ClusterName, VPNRole fields) - Add ForbiddenEdge struct (SourceCluster, TargetClusters fields) - Replace TopologyConfig.ClusterRoles with ForbiddenEdges - Replace TopologyConfig.PolicyNodes with ForbiddenEdges - Update CRD OpenAPI schema to reflect new field structure - Regenerate deepcopy code for ForbiddenEdge type Rationale: ForbiddenEdges provides simpler, more predictable topology control compared to PolicyNodes' implicit isolation behavior. Part-of: Week 1 API changes Related-to: #topology-refactor Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- apis/controller/v1alpha1/sliceconfig_types.go | 12 ++--- .../v1alpha1/zz_generated.deepcopy.go | 50 ++++++++++--------- .../controller.kubeslice.io_sliceconfigs.yaml | 24 ++++----- 3 files changed, 40 insertions(+), 46 deletions(-) diff --git a/apis/controller/v1alpha1/sliceconfig_types.go b/apis/controller/v1alpha1/sliceconfig_types.go index 898c89113..d841930ac 100644 --- a/apis/controller/v1alpha1/sliceconfig_types.go +++ b/apis/controller/v1alpha1/sliceconfig_types.go @@ -189,8 +189,7 @@ type TopologyConfig struct { //+kubebuilder:default:=auto TopologyType TopologyType `json:"topologyType,omitempty"` ConnectivityMatrix []ConnectivityEntry `json:"connectivityMatrix,omitempty"` - ClusterRoles []ClusterRole `json:"clusterRoles,omitempty"` - PolicyNodes []string `json:"policyNodes,omitempty"` + ForbiddenEdges []ForbiddenEdge `json:"forbiddenEdges,omitempty"` AutoOptions *AutoTopologyOptions `json:"autoOptions,omitempty"` } @@ -201,12 +200,11 @@ type ConnectivityEntry struct { TargetClusters []string `json:"targetClusters"` } -type ClusterRole struct { +type ForbiddenEdge struct { //+kubebuilder:validation:Required - ClusterName string `json:"clusterName"` - //+kubebuilder:default:=auto - //+kubebuilder:validation:Enum:=auto;server;client - VPNRole string `json:"vpnRole,omitempty"` + SourceCluster string `json:"sourceCluster"` + //+kubebuilder:validation:Required + TargetClusters []string `json:"targetClusters"` } type AutoTopologyOptions struct { diff --git a/apis/controller/v1alpha1/zz_generated.deepcopy.go b/apis/controller/v1alpha1/zz_generated.deepcopy.go index d75b0a229..641136622 100644 --- a/apis/controller/v1alpha1/zz_generated.deepcopy.go +++ b/apis/controller/v1alpha1/zz_generated.deepcopy.go @@ -137,21 +137,6 @@ func (in *ClusterProperty) DeepCopy() *ClusterProperty { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRole. -func (in *ClusterRole) DeepCopy() *ClusterRole { - if in == nil { - return nil - } - out := new(ClusterRole) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) { *out = *in @@ -292,6 +277,26 @@ func (in *ExternalGatewayConfigOptions) DeepCopy() *ExternalGatewayConfigOptions return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForbiddenEdge) DeepCopyInto(out *ForbiddenEdge) { + *out = *in + if in.TargetClusters != nil { + in, out := &in.TargetClusters, &out.TargetClusters + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForbiddenEdge. +func (in *ForbiddenEdge) DeepCopy() *ForbiddenEdge { + if in == nil { + return nil + } + out := new(ForbiddenEdge) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GPURestriction) DeepCopyInto(out *GPURestriction) { *out = *in @@ -998,15 +1003,12 @@ func (in *TopologyConfig) DeepCopyInto(out *TopologyConfig) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.ClusterRoles != nil { - in, out := &in.ClusterRoles, &out.ClusterRoles - *out = make([]ClusterRole, len(*in)) - copy(*out, *in) - } - if in.PolicyNodes != nil { - in, out := &in.PolicyNodes, &out.PolicyNodes - *out = make([]string, len(*in)) - copy(*out, *in) + if in.ForbiddenEdges != nil { + in, out := &in.ForbiddenEdges, &out.ForbiddenEdges + *out = make([]ForbiddenEdge, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } if in.AutoOptions != nil { in, out := &in.AutoOptions, &out.AutoOptions diff --git a/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml b/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml index 055027235..02e61abc3 100644 --- a/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml +++ b/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml @@ -253,23 +253,21 @@ spec: default: 5m type: string type: object - clusterRoles: + connectivityMatrix: items: properties: - clusterName: - type: string - vpnRole: - default: auto - enum: - - auto - - server - - client + sourceCluster: type: string + targetClusters: + items: + type: string + type: array required: - - clusterName + - sourceCluster + - targetClusters type: object type: array - connectivityMatrix: + forbiddenEdges: items: properties: sourceCluster: @@ -283,10 +281,6 @@ spec: - targetClusters type: object type: array - policyNodes: - items: - type: string - type: array topologyType: default: auto enum: From 5a5e553812fce93dd9b8f4c2b3f5816d50cdfdc9 Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Sun, 2 Nov 2025 12:32:11 +0000 Subject: [PATCH 07/34] refactor: remove topology service and related files Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- config/samples/topology-hub-spoke.yaml | 23 --- service/topology_service.go | 231 ------------------------- service/topology_service_test.go | 136 --------------- service/topology_validation.go | 101 ----------- 4 files changed, 491 deletions(-) delete mode 100644 config/samples/topology-hub-spoke.yaml delete mode 100644 service/topology_service.go delete mode 100644 service/topology_service_test.go delete mode 100644 service/topology_validation.go diff --git a/config/samples/topology-hub-spoke.yaml b/config/samples/topology-hub-spoke.yaml deleted file mode 100644 index b40394149..000000000 --- a/config/samples/topology-hub-spoke.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: controller.kubeslice.io/v1alpha1 -kind: SliceConfig -metadata: - name: demo-hub-spoke - namespace: kubeslice-avesha -spec: - sliceSubnet: "10.1.0.0/16" - clusters: ["hub", "spoke1", "spoke2", "spoke3"] - topologyConfig: - topologyType: hub-spoke - hubSpoke: - hubClusters: ["hub"] - spokeClusters: ["spoke1", "spoke2", "spoke3"] - allowSpokeToSpoke: false - clusterRoles: - - clusterName: hub - vpnRole: server - - clusterName: spoke1 - vpnRole: client - - clusterName: spoke2 - vpnRole: client - - clusterName: spoke3 - vpnRole: client \ No newline at end of file diff --git a/service/topology_service.go b/service/topology_service.go deleted file mode 100644 index ca72eeb6c..000000000 --- a/service/topology_service.go +++ /dev/null @@ -1,231 +0,0 @@ -package service - -import ( - "fmt" - controllerv1alpha1 "github.com/kubeslice/kubeslice-controller/apis/controller/v1alpha1" -) - -type GatewayPair struct { - Source string - Target string - Bidirectional bool -} - -type TopologyService interface { - ResolveTopology(sliceConfig *controllerv1alpha1.SliceConfig) ([]GatewayPair, error) -} - -type DefaultTopologyService struct{} - -func NewTopologyService() TopologyService { - return &DefaultTopologyService{} -} - -func (s *DefaultTopologyService) ResolveTopology(sc *controllerv1alpha1.SliceConfig) ([]GatewayPair, error) { - if sc.Spec.TopologyConfig == nil { - return s.resolveFullMesh(sc.Spec.Clusters) - } - - switch sc.Spec.TopologyConfig.TopologyType { - case controllerv1alpha1.TopologyFullMesh, "": - return s.resolveFullMesh(sc.Spec.Clusters) - case controllerv1alpha1.TopologyCustom: - return s.resolveCustom(sc.Spec.Clusters, sc.Spec.TopologyConfig.ConnectivityMatrix) - case controllerv1alpha1.TopologyAuto: - return s.resolveAuto(sc.Spec.Clusters, sc.Spec.TopologyConfig.PolicyNodes) - default: - return nil, fmt.Errorf("unknown topology type: %s", sc.Spec.TopologyConfig.TopologyType) - } -} - -func (s *DefaultTopologyService) resolveFullMesh(clusters []string) ([]GatewayPair, error) { - if len(clusters) < 2 { - return []GatewayPair{}, nil - } - - pairs := make([]GatewayPair, 0, len(clusters)*(len(clusters)-1)/2) - for i := 0; i < len(clusters); i++ { - for j := i + 1; j < len(clusters); j++ { - pairs = append(pairs, GatewayPair{ - Source: clusters[i], - Target: clusters[j], - Bidirectional: true, - }) - } - } - return pairs, nil -} - -func (s *DefaultTopologyService) resolveCustom(clusters []string, matrix []controllerv1alpha1.ConnectivityEntry) ([]GatewayPair, error) { - if len(matrix) == 0 { - return nil, fmt.Errorf("custom topology requires connectivity matrix") - } - - clusterSet := s.toSet(clusters) - pairs := make([]GatewayPair, 0) - - for _, entry := range matrix { - if !clusterSet[entry.SourceCluster] { - return nil, fmt.Errorf("connectivity entry references unknown source cluster: %s", entry.SourceCluster) - } - for _, target := range entry.TargetClusters { - if !clusterSet[target] { - return nil, fmt.Errorf("connectivity entry references unknown target cluster: %s", target) - } - pairs = append(pairs, GatewayPair{ - Source: entry.SourceCluster, - Target: target, - Bidirectional: true, - }) - } - } - - return pairs, nil -} - -func (s *DefaultTopologyService) resolveAuto(clusters []string, policyNodes []string) ([]GatewayPair, error) { - allPairs, _ := s.resolveFullMesh(clusters) - - if len(policyNodes) == 0 { - return allPairs, nil - } - - forbidden := s.buildForbiddenSet(clusters, policyNodes) - filtered := s.filterPairs(allPairs, forbidden) - - preservedPairs, err := s.ensureConnectivity(clusters, filtered, forbidden) - if err != nil { - return nil, err - } - - return preservedPairs, nil -} - -func (s *DefaultTopologyService) ensureConnectivity(clusters []string, pairs []GatewayPair, forbidden map[string]bool) ([]GatewayPair, error) { - graph := s.buildGraph(pairs) - components := s.findComponents(clusters, graph) - - if len(components) <= 1 { - return pairs, nil - } - - bridgeEdges := s.findBridges(clusters, components, forbidden) - if len(bridgeEdges) == 0 { - return nil, fmt.Errorf("policy nodes create partitioned topology with no safe bridge edges available") - } - - for _, bridge := range bridgeEdges { - pairs = append(pairs, bridge) - } - - return pairs, nil -} - -func (s *DefaultTopologyService) buildGraph(pairs []GatewayPair) map[string][]string { - graph := make(map[string][]string) - for _, p := range pairs { - graph[p.Source] = append(graph[p.Source], p.Target) - graph[p.Target] = append(graph[p.Target], p.Source) - } - return graph -} - -func (s *DefaultTopologyService) findComponents(clusters []string, graph map[string][]string) [][]string { - visited := make(map[string]bool) - components := make([][]string, 0) - - for _, cluster := range clusters { - if !visited[cluster] { - component := s.dfs(cluster, graph, visited) - components = append(components, component) - } - } - - return components -} - -func (s *DefaultTopologyService) dfs(node string, graph map[string][]string, visited map[string]bool) []string { - visited[node] = true - component := []string{node} - - for _, neighbor := range graph[node] { - if !visited[neighbor] { - component = append(component, s.dfs(neighbor, graph, visited)...) - } - } - - return component -} - -func (s *DefaultTopologyService) findBridges(clusters []string, components [][]string, forbidden map[string]bool) []GatewayPair { - bridges := make([]GatewayPair, 0) - componentMap := make(map[string]int) - - for i, comp := range components { - for _, node := range comp { - componentMap[node] = i - } - } - - for i := 0; i < len(components); i++ { - for j := i + 1; j < len(components); j++ { - added := false - for _, ni := range components[i] { - if added { - break - } - for _, nj := range components[j] { - key := s.pairKey(ni, nj) - if !forbidden[key] { - bridges = append(bridges, GatewayPair{ - Source: ni, - Target: nj, - Bidirectional: true, - }) - added = true - break - } - } - } - } - } - - return bridges -} - -func (s *DefaultTopologyService) buildForbiddenSet(clusters []string, policyNodes []string) map[string]bool { - forbidden := make(map[string]bool) - for _, node := range policyNodes { - for _, cluster := range clusters { - if cluster != node { - forbidden[s.pairKey(node, cluster)] = true - } - } - } - return forbidden -} - -func (s *DefaultTopologyService) filterPairs(pairs []GatewayPair, forbidden map[string]bool) []GatewayPair { - filtered := make([]GatewayPair, 0, len(pairs)) - for _, p := range pairs { - if !forbidden[s.pairKey(p.Source, p.Target)] { - filtered = append(filtered, p) - } - } - return filtered -} - -func (s *DefaultTopologyService) toSet(items []string) map[string]bool { - set := make(map[string]bool, len(items)) - for _, item := range items { - set[item] = true - } - return set -} - -func (s *DefaultTopologyService) pairKey(a, b string) string { - if a < b { - return a + "-" + b - } - return b + "-" + a -} diff --git a/service/topology_service_test.go b/service/topology_service_test.go deleted file mode 100644 index faf3b8f57..000000000 --- a/service/topology_service_test.go +++ /dev/null @@ -1,136 +0,0 @@ -package service - -import ( - "testing" - - controllerv1alpha1 "github.com/kubeslice/kubeslice-controller/apis/controller/v1alpha1" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestResolveTopologyDefaultsToFullMesh(t *testing.T) { - svc := NewTopologyService() - sc := &controllerv1alpha1.SliceConfig{ - Spec: controllerv1alpha1.SliceConfigSpec{ - Clusters: []string{"alpha", "beta", "gamma"}, - }, - } - - pairs, err := svc.ResolveTopology(sc) - require.NoError(t, err) - assert.Len(t, pairs, 3) - assertContainsPair(t, pairs, "alpha", "beta") - assertContainsPair(t, pairs, "alpha", "gamma") - assertContainsPair(t, pairs, "beta", "gamma") -} - -func TestResolveTopologyFullMeshExplicit(t *testing.T) { - svc := NewTopologyService() - sc := &controllerv1alpha1.SliceConfig{ - Spec: controllerv1alpha1.SliceConfigSpec{ - Clusters: []string{"c1", "c2", "c3", "c4"}, - TopologyConfig: &controllerv1alpha1.TopologyConfig{ - TopologyType: controllerv1alpha1.TopologyFullMesh, - }, - }, - } - - pairs, err := svc.ResolveTopology(sc) - require.NoError(t, err) - assert.Len(t, pairs, 6) - assertContainsPair(t, pairs, "c1", "c4") - assertContainsPair(t, pairs, "c2", "c3") -} - -func TestResolveTopologyCustomMatrix(t *testing.T) { - svc := NewTopologyService() - sc := &controllerv1alpha1.SliceConfig{ - Spec: controllerv1alpha1.SliceConfigSpec{ - Clusters: []string{"dmz", "gateway", "internal"}, - TopologyConfig: &controllerv1alpha1.TopologyConfig{ - TopologyType: controllerv1alpha1.TopologyCustom, - ConnectivityMatrix: []controllerv1alpha1.ConnectivityEntry{ - {SourceCluster: "dmz", TargetClusters: []string{"gateway"}}, - {SourceCluster: "gateway", TargetClusters: []string{"internal"}}, - }, - }, - }, - } - - pairs, err := svc.ResolveTopology(sc) - require.NoError(t, err) - assert.Len(t, pairs, 2) - assertContainsPair(t, pairs, "dmz", "gateway") - assertContainsPair(t, pairs, "gateway", "internal") -} - -func TestResolveTopologyAutoPolicyNodesReturnsError(t *testing.T) { - svc := NewTopologyService() - sc := &controllerv1alpha1.SliceConfig{ - Spec: controllerv1alpha1.SliceConfigSpec{ - Clusters: []string{"gateway", "dmz", "internal", "analytics"}, - TopologyConfig: &controllerv1alpha1.TopologyConfig{ - TopologyType: controllerv1alpha1.TopologyAuto, - PolicyNodes: []string{"dmz", "analytics"}, - }, - }, - } - - _, err := svc.ResolveTopology(sc) - require.Error(t, err) - assert.Contains(t, err.Error(), "partitioned topology") -} - -func TestEnsureConnectivityAddsBridgeEdge(t *testing.T) { - svc := &DefaultTopologyService{} - clusters := []string{"a", "b", "c"} - pairs := []GatewayPair{{Source: "a", Target: "b", Bidirectional: true}} - - bridged, err := svc.ensureConnectivity(clusters, pairs, map[string]bool{}) - require.NoError(t, err) - assert.Len(t, bridged, 2) - // ensure new edge connects the previously isolated node - assertContainsPair(t, bridged, "a", "b") - assert.Contains(t, endpointPartners(bridged, "c"), "a") -} - -func TestResolveTopologyAutoAllPolicyNodes(t *testing.T) { - svc := &DefaultTopologyService{} - clusters := []string{"one", "two"} - policyNodes := []string{"one", "two"} - - _, err := svc.resolveAuto(clusters, policyNodes) - require.Error(t, err) - assert.Contains(t, err.Error(), "partitioned topology") -} - -func endpointPartners(pairs []GatewayPair, node string) []string { - partners := make([]string, 0) - for _, p := range pairs { - if p.Source == node { - partners = append(partners, p.Target) - } else if p.Target == node { - partners = append(partners, p.Source) - } - } - return partners -} - -func assertContainsPair(t *testing.T, pairs []GatewayPair, source, target string) { - t.Helper() - for _, p := range pairs { - if (p.Source == source && p.Target == target) || (p.Source == target && p.Target == source) { - return - } - } - t.Fatalf("expected to find pair %s-%s", source, target) -} - -func assertNotContainsPair(t *testing.T, pairs []GatewayPair, source, target string) { - t.Helper() - for _, p := range pairs { - if (p.Source == source && p.Target == target) || (p.Source == target && p.Target == source) { - t.Fatalf("did not expect to find pair %s-%s", source, target) - } - } -} diff --git a/service/topology_validation.go b/service/topology_validation.go deleted file mode 100644 index 6d29527ec..000000000 --- a/service/topology_validation.go +++ /dev/null @@ -1,101 +0,0 @@ -package service - -import ( - "fmt" - controllerv1alpha1 "github.com/kubeslice/kubeslice-controller/apis/controller/v1alpha1" -) - -type TopologyValidator struct{} - -func NewTopologyValidator() *TopologyValidator { - return &TopologyValidator{} -} - -func (v *TopologyValidator) ValidateTopologyConfig(topology *controllerv1alpha1.TopologyConfig, clusters []string) error { - if topology == nil { - return nil - } - - clusterSet := toSet(clusters) - - switch topology.TopologyType { - case controllerv1alpha1.TopologyCustom: - if err := v.validateCustom(topology.ConnectivityMatrix, clusterSet); err != nil { - return err - } - case controllerv1alpha1.TopologyAuto: - if err := v.validateAuto(topology, clusterSet); err != nil { - return err - } - case controllerv1alpha1.TopologyFullMesh: - case "": - default: - return fmt.Errorf("invalid topology type: %s", topology.TopologyType) - } - - if err := v.validateClusterRoles(topology.ClusterRoles, clusterSet); err != nil { - return err - } - - return v.validatePolicyNodes(topology.PolicyNodes, clusterSet) -} - -func (v *TopologyValidator) validateCustom(matrix []controllerv1alpha1.ConnectivityEntry, clusterSet map[string]struct{}) error { - if len(matrix) == 0 { - return fmt.Errorf("connectivityMatrix required for custom topology") - } - - for _, entry := range matrix { - if _, exists := clusterSet[entry.SourceCluster]; !exists { - return fmt.Errorf("connectivityMatrix source %s not in spec.clusters", entry.SourceCluster) - } - for _, target := range entry.TargetClusters { - if _, exists := clusterSet[target]; !exists { - return fmt.Errorf("connectivityMatrix target %s not in spec.clusters", target) - } - } - } - - return nil -} - -func (v *TopologyValidator) validateAuto(topology *controllerv1alpha1.TopologyConfig, clusterSet map[string]struct{}) error { - if topology.AutoOptions != nil { - if topology.AutoOptions.RelativeThresholdPercent < 1 || topology.AutoOptions.RelativeThresholdPercent > 500 { - return fmt.Errorf("relativeThresholdPercent must be between 1 and 500 (represents 0.1%% to 50.0%%)") - } - if topology.AutoOptions.PersistenceWindows < 1 { - return fmt.Errorf("persistenceWindows must be at least 1") - } - if topology.AutoOptions.MaxShortcuts < 1 || topology.AutoOptions.MaxShortcuts > 50 { - return fmt.Errorf("maxShortcuts must be between 1 and 50") - } - } - return nil -} - -func (v *TopologyValidator) validateClusterRoles(roles []controllerv1alpha1.ClusterRole, clusterSet map[string]struct{}) error { - for _, role := range roles { - if _, exists := clusterSet[role.ClusterName]; !exists { - return fmt.Errorf("clusterRole %s not in spec.clusters", role.ClusterName) - } - } - return nil -} - -func (v *TopologyValidator) validatePolicyNodes(policyNodes []string, clusterSet map[string]struct{}) error { - for _, node := range policyNodes { - if _, exists := clusterSet[node]; !exists { - return fmt.Errorf("policyNode %s not in spec.clusters", node) - } - } - return nil -} - -func toSet(items []string) map[string]struct{} { - set := make(map[string]struct{}, len(items)) - for _, item := range items { - set[item] = struct{}{} - } - return set -} From f3a559196341443c9d3a70d74478c99427e058b6 Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Sun, 2 Nov 2025 12:40:49 +0000 Subject: [PATCH 08/34] feat(topology): add sample topology configuration files Add sample topology configurations for auto-secure, full-mesh, and custom-matrix topology types. Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- config/samples/topology-auto-secure.yaml | 9 +++------ config/samples/topology-custom-matrix.yaml | 7 +------ config/samples/topology-full-mesh.yaml | 18 ++++++++++++++++++ 3 files changed, 22 insertions(+), 12 deletions(-) create mode 100644 config/samples/topology-full-mesh.yaml diff --git a/config/samples/topology-auto-secure.yaml b/config/samples/topology-auto-secure.yaml index 8e108b920..086b14fa6 100644 --- a/config/samples/topology-auto-secure.yaml +++ b/config/samples/topology-auto-secure.yaml @@ -8,9 +8,6 @@ spec: clusters: ["dmz", "gateway", "internal", "analytics"] topologyConfig: topologyType: auto - policyNodes: ["gateway"] - clusterRoles: - - clusterName: gateway - vpnRole: server - - clusterName: dmz - vpnRole: client \ No newline at end of file + forbiddenEdges: + - sourceCluster: gateway + targetClusters: ["dmz", "analytics"] \ No newline at end of file diff --git a/config/samples/topology-custom-matrix.yaml b/config/samples/topology-custom-matrix.yaml index 7574a0726..449174f3a 100644 --- a/config/samples/topology-custom-matrix.yaml +++ b/config/samples/topology-custom-matrix.yaml @@ -14,9 +14,4 @@ spec: - sourceCluster: gateway targetClusters: ["internal", "dmz"] - sourceCluster: internal - targetClusters: ["database", "gateway"] - clusterRoles: - - clusterName: gateway - vpnRole: server - - clusterName: internal - vpnRole: server \ No newline at end of file + targetClusters: ["database", "gateway"] \ No newline at end of file diff --git a/config/samples/topology-full-mesh.yaml b/config/samples/topology-full-mesh.yaml new file mode 100644 index 000000000..5f5f25986 --- /dev/null +++ b/config/samples/topology-full-mesh.yaml @@ -0,0 +1,18 @@ +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: demo-full-mesh + namespace: kubeslice-avesha +spec: + sliceSubnet: "10.1.0.0/16" + clusters: ["cluster-1", "cluster-2", "cluster-3", "cluster-4"] + topologyConfig: + topologyType: full-mesh + # Full-mesh creates all possible connections between clusters + # For 4 clusters: 6 gateway pairs (n*(n-1)/2) + # cluster-1 <-> cluster-2 + # cluster-1 <-> cluster-3 + # cluster-1 <-> cluster-4 + # cluster-2 <-> cluster-3 + # cluster-2 <-> cluster-4 + # cluster-3 <-> cluster-4 From 84b163ca470fbd94573e78e3cef77fe0ee645be2 Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Sun, 2 Nov 2025 12:41:42 +0000 Subject: [PATCH 09/34] feat(topology): update slice config validation and service logic Update slice configuration validation and service logic related to topology implementation. Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- service/slice_config_service.go | 251 ++++++++++++++++++++- service/slice_config_webhook_validation.go | 85 +++++-- 2 files changed, 316 insertions(+), 20 deletions(-) diff --git a/service/slice_config_service.go b/service/slice_config_service.go index 539a82efa..c7d9463f8 100644 --- a/service/slice_config_service.go +++ b/service/slice_config_service.go @@ -203,26 +203,35 @@ func (s *SliceConfigService) ReconcileSliceConfig(ctx context.Context, req ctrl. return ctrl.Result{}, err } - // Step 5: Create gateways with minimum specification - _, err = s.sgs.CreateMinimumWorkerSliceGateways(ctx, sliceConfig.Name, sliceConfig.Spec.Clusters, req.Namespace, ownershipLabel, clusterMap, sliceConfig.Spec.SliceSubnet, clusterCidr, sliceGwSvcTypeMap) + // Step 5: Resolve topology to get gateway pairs + // Following pattern: calculate data in SliceConfigService before delegating to sub-services + gatewayPairs, err := s.resolveTopologyPairs(sliceConfig) + if err != nil { + logger.Errorf("Failed to resolve topology for slice %s: %v", sliceConfig.Name, err) + return ctrl.Result{}, err + } + logger.Infof("Resolved %d gateway pairs for slice %s", len(gatewayPairs), sliceConfig.Name) + + // Step 6: Create gateways with minimum specification + _, err = s.sgs.CreateMinimumWorkerSliceGateways(ctx, sliceConfig.Name, sliceConfig.Spec.Clusters, req.Namespace, ownershipLabel, clusterMap, sliceConfig.Spec.SliceSubnet, clusterCidr, sliceGwSvcTypeMap, gatewayPairs) if err != nil { return ctrl.Result{}, err } logger.Infof("sliceConfig %v reconciled", req.NamespacedName) - // Step 6: Create VPNKeyRotation CR + // Step 7: Create VPNKeyRotation CR // TODO(rahul): handle change in rotation interval if err := s.vpn.CreateMinimalVpnKeyRotationConfig(ctx, sliceConfig.Name, sliceConfig.Namespace, sliceConfig.Spec.RotationInterval); err != nil { // register an event util.RecordEvent(ctx, eventRecorder, sliceConfig, nil, events.EventVPNKeyRotationConfigCreationFailed) return ctrl.Result{}, err } - // Step 7: update cluster info into vpnkeyrotation Cconfig + // Step 8: update cluster info into vpnkeyrotation Config if _, err := s.vpn.ReconcileClusters(ctx, sliceConfig.Name, sliceConfig.Namespace, sliceConfig.Spec.Clusters); err != nil { return ctrl.Result{}, err } - // Step 8: Create ServiceImport Objects + // Step 9: Create ServiceImport Objects serviceExports := &v1alpha1.ServiceExportConfigList{} _, err = s.getServiceExportBySliceName(ctx, req.Namespace, sliceConfig.Name, serviceExports) if err != nil { @@ -431,3 +440,235 @@ func (s *SliceConfigService) handleDefaultSliceConfigAppns(ctx context.Context, } return ctrl.Result{}, nil } + +// GatewayPair represents a bidirectional connection between two clusters +type GatewayPair struct { + Source string + Target string + Bidirectional bool +} + +// resolveTopologyPairs calculates gateway pairs based on topology configuration +// Following pattern: direct usage from sliceConfig.Spec (like QOSProfile) +func (s *SliceConfigService) resolveTopologyPairs(sliceConfig *v1alpha1.SliceConfig) ([]GatewayPair, error) { + clusters := sliceConfig.Spec.Clusters + + // Default to full-mesh if no topology config + if sliceConfig.Spec.TopologyConfig == nil { + return s.resolveFullMeshTopology(clusters), nil + } + + switch sliceConfig.Spec.TopologyConfig.TopologyType { + case v1alpha1.TopologyFullMesh, "": + return s.resolveFullMeshTopology(clusters), nil + case v1alpha1.TopologyCustom: + return s.resolveCustomTopology(clusters, sliceConfig.Spec.TopologyConfig.ConnectivityMatrix) + case v1alpha1.TopologyAuto: + return s.resolveAutoTopology(clusters, sliceConfig.Spec.TopologyConfig.ForbiddenEdges) + default: + return nil, fmt.Errorf("unknown topology type: %s", sliceConfig.Spec.TopologyConfig.TopologyType) + } +} + +// resolveFullMeshTopology creates bidirectional pairs for all cluster combinations +func (s *SliceConfigService) resolveFullMeshTopology(clusters []string) []GatewayPair { + if len(clusters) < 2 { + return []GatewayPair{} + } + + pairs := make([]GatewayPair, 0, len(clusters)*(len(clusters)-1)/2) + for i := 0; i < len(clusters); i++ { + for j := i + 1; j < len(clusters); j++ { + pairs = append(pairs, GatewayPair{ + Source: clusters[i], + Target: clusters[j], + Bidirectional: true, + }) + } + } + return pairs +} + +// resolveCustomTopology creates pairs based on explicit connectivity matrix +func (s *SliceConfigService) resolveCustomTopology(clusters []string, matrix []v1alpha1.ConnectivityEntry) ([]GatewayPair, error) { + if len(matrix) == 0 { + return nil, fmt.Errorf("custom topology requires connectivity matrix") + } + + clusterSet := s.makeClusterSet(clusters) + pairs := make([]GatewayPair, 0) + + for _, entry := range matrix { + if !clusterSet[entry.SourceCluster] { + return nil, fmt.Errorf("connectivity entry references unknown source cluster: %s", entry.SourceCluster) + } + for _, target := range entry.TargetClusters { + if !clusterSet[target] { + return nil, fmt.Errorf("connectivity entry references unknown target cluster: %s", target) + } + pairs = append(pairs, GatewayPair{ + Source: entry.SourceCluster, + Target: target, + Bidirectional: true, + }) + } + } + + return pairs, nil +} + +// resolveAutoTopology creates full-mesh and removes forbidden edges +func (s *SliceConfigService) resolveAutoTopology(clusters []string, forbiddenEdges []v1alpha1.ForbiddenEdge) ([]GatewayPair, error) { + // Start with full mesh + allPairs := s.resolveFullMeshTopology(clusters) + + if len(forbiddenEdges) == 0 { + return allPairs, nil + } + + // Build forbidden set + forbidden := s.buildForbiddenSet(forbiddenEdges) + + // Filter out forbidden pairs + filtered := s.filterForbiddenPairs(allPairs, forbidden) + + // Ensure connectivity (add bridge edges if partitioned) + preservedPairs, err := s.ensureConnectivity(clusters, filtered, forbidden) + if err != nil { + return nil, err + } + + return preservedPairs, nil +} + +// buildForbiddenSet creates a map of forbidden edges +func (s *SliceConfigService) buildForbiddenSet(forbiddenEdges []v1alpha1.ForbiddenEdge) map[string]bool { + forbidden := make(map[string]bool) + for _, edge := range forbiddenEdges { + for _, target := range edge.TargetClusters { + forbidden[s.pairKey(edge.SourceCluster, target)] = true + } + } + return forbidden +} + +// filterForbiddenPairs removes pairs that are in the forbidden set +func (s *SliceConfigService) filterForbiddenPairs(pairs []GatewayPair, forbidden map[string]bool) []GatewayPair { + filtered := make([]GatewayPair, 0, len(pairs)) + for _, p := range pairs { + if !forbidden[s.pairKey(p.Source, p.Target)] { + filtered = append(filtered, p) + } + } + return filtered +} + +// ensureConnectivity adds bridge edges if forbidden edges create partitions +func (s *SliceConfigService) ensureConnectivity(clusters []string, pairs []GatewayPair, forbidden map[string]bool) ([]GatewayPair, error) { + graph := s.buildGraph(pairs) + components := s.findConnectedComponents(clusters, graph) + + if len(components) <= 1 { + return pairs, nil + } + + // Find bridge edges between components + bridgeEdges := s.findBridgeEdges(components, forbidden) + if len(bridgeEdges) == 0 { + return nil, fmt.Errorf("forbidden edges create partitioned topology with no safe bridge edges available") + } + + // Add bridge edges to reconnect components + for _, bridge := range bridgeEdges { + pairs = append(pairs, bridge) + } + + return pairs, nil +} + +// buildGraph creates adjacency list from gateway pairs +func (s *SliceConfigService) buildGraph(pairs []GatewayPair) map[string][]string { + graph := make(map[string][]string) + for _, p := range pairs { + graph[p.Source] = append(graph[p.Source], p.Target) + graph[p.Target] = append(graph[p.Target], p.Source) + } + return graph +} + +// findConnectedComponents uses DFS to find all connected components +func (s *SliceConfigService) findConnectedComponents(clusters []string, graph map[string][]string) [][]string { + visited := make(map[string]bool) + components := make([][]string, 0) + + for _, cluster := range clusters { + if !visited[cluster] { + component := s.dfsComponent(cluster, graph, visited) + components = append(components, component) + } + } + + return components +} + +// dfsComponent performs depth-first search to find a component +func (s *SliceConfigService) dfsComponent(node string, graph map[string][]string, visited map[string]bool) []string { + visited[node] = true + component := []string{node} + + for _, neighbor := range graph[node] { + if !visited[neighbor] { + component = append(component, s.dfsComponent(neighbor, graph, visited)...) + } + } + + return component +} + +// findBridgeEdges finds edges to connect partitioned components +func (s *SliceConfigService) findBridgeEdges(components [][]string, forbidden map[string]bool) []GatewayPair { + bridges := make([]GatewayPair, 0) + + // Connect each component to the next + for i := 0; i < len(components); i++ { + for j := i + 1; j < len(components); j++ { + added := false + for _, ni := range components[i] { + if added { + break + } + for _, nj := range components[j] { + key := s.pairKey(ni, nj) + if !forbidden[key] { + bridges = append(bridges, GatewayPair{ + Source: ni, + Target: nj, + Bidirectional: true, + }) + added = true + break + } + } + } + } + } + + return bridges +} + +// makeClusterSet creates a set from cluster list +func (s *SliceConfigService) makeClusterSet(clusters []string) map[string]bool { + set := make(map[string]bool, len(clusters)) + for _, cluster := range clusters { + set[cluster] = true + } + return set +} + +// pairKey creates a normalized key for a cluster pair +func (s *SliceConfigService) pairKey(a, b string) string { + if a < b { + return a + "-" + b + } + return b + "-" + a +} diff --git a/service/slice_config_webhook_validation.go b/service/slice_config_webhook_validation.go index 37c2b79ea..1f29ebc3d 100644 --- a/service/slice_config_webhook_validation.go +++ b/service/slice_config_webhook_validation.go @@ -59,13 +59,8 @@ func ValidateSliceConfigCreate(ctx context.Context, sliceConfig *controllerv1alp if err := validateMaxClusterCount(sliceConfig); err != nil { return nil, apierrors.NewInvalid(schema.GroupKind{Group: apiGroupKubeSliceControllers, Kind: "SliceConfig"}, sliceConfig.Name, field.ErrorList{err}) } - if sliceConfig.Spec.TopologyConfig != nil { - validator := NewTopologyValidator() - if err := validator.ValidateTopologyConfig(sliceConfig.Spec.TopologyConfig, sliceConfig.Spec.Clusters); err != nil { - return nil, apierrors.NewInvalid(schema.GroupKind{Group: apiGroupKubeSliceControllers, Kind: "SliceConfig"}, sliceConfig.Name, field.ErrorList{ - field.Invalid(field.NewPath("spec").Child("topologyConfig"), sliceConfig.Spec.TopologyConfig, err.Error()), - }) - } + if err := validateTopologyConfig(sliceConfig.Spec.TopologyConfig, sliceConfig.Spec.Clusters); err != nil { + return nil, apierrors.NewInvalid(schema.GroupKind{Group: apiGroupKubeSliceControllers, Kind: "SliceConfig"}, sliceConfig.Name, field.ErrorList{err}) } if sliceConfig.Spec.OverlayNetworkDeploymentMode != controllerv1alpha1.NONET { if err := validateSliceSubnet(sliceConfig); err != nil { @@ -114,15 +109,9 @@ func ValidateSliceConfigUpdate(ctx context.Context, sliceConfig *controllerv1alp if err := validateNamespaceIsolationProfile(sliceConfig); err != nil { return nil, apierrors.NewInvalid(schema.GroupKind{Group: apiGroupKubeSliceControllers, Kind: "SliceConfig"}, sliceConfig.Name, field.ErrorList{err}) } - if sliceConfig.Spec.TopologyConfig != nil { - validator := NewTopologyValidator() - if err := validator.ValidateTopologyConfig(sliceConfig.Spec.TopologyConfig, sliceConfig.Spec.Clusters); err != nil { - return nil, apierrors.NewInvalid(schema.GroupKind{Group: apiGroupKubeSliceControllers, Kind: "SliceConfig"}, sliceConfig.Name, field.ErrorList{ - field.Invalid(field.NewPath("spec").Child("topologyConfig"), sliceConfig.Spec.TopologyConfig, err.Error()), - }) - } + if err := validateTopologyConfig(sliceConfig.Spec.TopologyConfig, sliceConfig.Spec.Clusters); err != nil { + return nil, apierrors.NewInvalid(schema.GroupKind{Group: apiGroupKubeSliceControllers, Kind: "SliceConfig"}, sliceConfig.Name, field.ErrorList{err}) } - // Validate single/multi overlay network deployment mode specific fields if sliceConfig.Spec.OverlayNetworkDeploymentMode != controllerv1alpha1.NONET { if err := validateSliceSubnet(sliceConfig); err != nil { return nil, apierrors.NewInvalid(schema.GroupKind{Group: apiGroupKubeSliceControllers, Kind: "SliceConfig"}, sliceConfig.Name, field.ErrorList{err}) @@ -692,3 +681,69 @@ func checkIfQoSConfigExists(ctx context.Context, namespace string, qosProfileNam } return found } + +func validateTopologyConfig(topology *controllerv1alpha1.TopologyConfig, clusters []string) *field.Error { + if topology == nil { + return nil + } + clusterSet := make(map[string]struct{}, len(clusters)) + for _, c := range clusters { + clusterSet[c] = struct{}{} + } + topologyPath := field.NewPath("spec", "topologyConfig") + switch topology.TopologyType { + case controllerv1alpha1.TopologyCustom: + if err := validateCustomTopology(topology.ConnectivityMatrix, clusterSet, topologyPath); err != nil { + return err + } + case controllerv1alpha1.TopologyAuto: + if err := validateAutoTopology(topology, clusterSet, topologyPath); err != nil { + return err + } + case controllerv1alpha1.TopologyFullMesh, "": + default: + return field.Invalid(topologyPath.Child("topologyType"), topology.TopologyType, "must be one of: auto, full-mesh, custom") + } + return validateForbiddenEdges(topology.ForbiddenEdges, clusterSet, topologyPath) +} + +func validateCustomTopology(matrix []controllerv1alpha1.ConnectivityEntry, clusterSet map[string]struct{}, basePath *field.Path) *field.Error { + matrixPath := basePath.Child("connectivityMatrix") + if len(matrix) == 0 { + return field.Required(matrixPath, "required for custom topology") + } + for i, entry := range matrix { + entryPath := matrixPath.Index(i) + if _, exists := clusterSet[entry.SourceCluster]; !exists { + return field.Invalid(entryPath.Child("sourceCluster"), entry.SourceCluster, "not in spec.clusters") + } + for j, target := range entry.TargetClusters { + if _, exists := clusterSet[target]; !exists { + return field.Invalid(entryPath.Child("targetClusters").Index(j), target, "not in spec.clusters") + } + } + } + return nil +} + +func validateAutoTopology(topology *controllerv1alpha1.TopologyConfig, clusterSet map[string]struct{}, basePath *field.Path) *field.Error { + // Soft removal: ignore autoOptions fields entirely at webhook layer + // Controller will not act on these fields and webhook will not validate them + return nil +} + +func validateForbiddenEdges(edges []controllerv1alpha1.ForbiddenEdge, clusterSet map[string]struct{}, basePath *field.Path) *field.Error { + edgesPath := basePath.Child("forbiddenEdges") + for i, edge := range edges { + entryPath := edgesPath.Index(i) + if _, exists := clusterSet[edge.SourceCluster]; !exists { + return field.Invalid(entryPath.Child("sourceCluster"), edge.SourceCluster, "not in spec.clusters") + } + for j, target := range edge.TargetClusters { + if _, exists := clusterSet[target]; !exists { + return field.Invalid(entryPath.Child("targetClusters").Index(j), target, "not in spec.clusters") + } + } + } + return nil +} From 0804950f24872a6249de829df2da8a58f7b67880 Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Sun, 2 Nov 2025 12:42:15 +0000 Subject: [PATCH 10/34] test(topology): update slice config webhook validation tests Add and update tests for slice configuration webhook validation related to topology implementation. Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- .../slice_config_webhook_validation_test.go | 104 ++++++++++++++++++ 1 file changed, 104 insertions(+) diff --git a/service/slice_config_webhook_validation_test.go b/service/slice_config_webhook_validation_test.go index 303738117..4d721f815 100644 --- a/service/slice_config_webhook_validation_test.go +++ b/service/slice_config_webhook_validation_test.go @@ -2316,3 +2316,107 @@ func setupSliceConfigWebhookValidationTest(name string, namespace string) (*util ctx := util.PrepareKubeSliceControllersRequestContext(context.Background(), clientMock, nil, "SliceConfigWebhookValidationServiceTest", nil) return clientMock, sliceConfig, ctx } + +func TestValidateTopologyConfig_FullMesh(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyFullMesh, + } + clusters := []string{"c1", "c2", "c3"} + + err := validateTopologyConfig(topology, clusters) + require.Nil(t, err) +} + +func TestValidateTopologyConfig_CustomMatrix(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyCustom, + ConnectivityMatrix: []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "c1", TargetClusters: []string{"c2", "c3"}}, + }, + } + clusters := []string{"c1", "c2", "c3"} + + err := validateTopologyConfig(topology, clusters) + require.Nil(t, err) +} + +func TestValidateTopologyConfig_CustomEmptyMatrix(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyCustom, + ConnectivityMatrix: []controllerv1alpha1.ConnectivityEntry{}, + } + clusters := []string{"c1", "c2"} + + err := validateTopologyConfig(topology, clusters) + require.NotNil(t, err) + require.Contains(t, err.Error(), "required for custom topology") +} + +func TestValidateTopologyConfig_InvalidClusterInMatrix(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyCustom, + ConnectivityMatrix: []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "invalid", TargetClusters: []string{"c2"}}, + }, + } + clusters := []string{"c1", "c2"} + + err := validateTopologyConfig(topology, clusters) + require.NotNil(t, err) + require.Contains(t, err.Error(), "not in spec.clusters") +} + +func TestValidateTopologyConfig_AutoWithOptions(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyAuto, + AutoOptions: &controllerv1alpha1.AutoTopologyOptions{ + EnableShortcuts: true, + RelativeThresholdPercent: 20, + PersistenceWindows: 3, + MaxShortcuts: 5, + }, + } + clusters := []string{"c1", "c2", "c3"} + + err := validateTopologyConfig(topology, clusters) + require.Nil(t, err) +} + +func TestValidateTopologyConfig_AutoInvalidThreshold(t *testing.T) { + // Soft removal: invalid values should no longer trigger webhook validation errors + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyAuto, + AutoOptions: &controllerv1alpha1.AutoTopologyOptions{ + RelativeThresholdPercent: 600, + }, + } + clusters := []string{"c1", "c2"} + + err := validateTopologyConfig(topology, clusters) + require.Nil(t, err) +} + +func TestValidateTopologyConfig_InvalidForbiddenEdge(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyAuto, + ForbiddenEdges: []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "invalid", TargetClusters: []string{"c1"}}, + }, + } + clusters := []string{"c1", "c2"} + + err := validateTopologyConfig(topology, clusters) + require.NotNil(t, err) + require.Contains(t, err.Error(), "not in spec.clusters") +} + +func TestValidateTopologyConfig_InvalidType(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: "invalid-type", + } + clusters := []string{"c1", "c2"} + + err := validateTopologyConfig(topology, clusters) + require.NotNil(t, err) + require.Contains(t, err.Error(), "must be one of") +} From 15d2d46d07f9131834ae816ac2564a48de76fb0c Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Sun, 2 Nov 2025 12:43:43 +0000 Subject: [PATCH 11/34] feat(topology): update worker slice gateway service logic Update worker slice gateway service logic related to topology implementation. Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- service/worker_slice_gateway_service.go | 64 +++++++++++++++---------- 1 file changed, 40 insertions(+), 24 deletions(-) diff --git a/service/worker_slice_gateway_service.go b/service/worker_slice_gateway_service.go index 1da5079f1..cc1e2fce3 100644 --- a/service/worker_slice_gateway_service.go +++ b/service/worker_slice_gateway_service.go @@ -46,7 +46,8 @@ const gatewayName = "%s-%s-%s" type IWorkerSliceGatewayService interface { ReconcileWorkerSliceGateways(ctx context.Context, req ctrl.Request) (ctrl.Result, error) CreateMinimumWorkerSliceGateways(ctx context.Context, sliceName string, clusterNames []string, namespace string, - label map[string]string, clusterMap map[string]int, sliceSubnet string, clusterCidr string, sliceGwSvcTypeMap map[string]*controllerv1alpha1.SliceGatewayServiceType) (ctrl.Result, error) + label map[string]string, clusterMap map[string]int, sliceSubnet string, clusterCidr string, + sliceGwSvcTypeMap map[string]*controllerv1alpha1.SliceGatewayServiceType, gatewayPairs []GatewayPair) (ctrl.Result, error) ListWorkerSliceGateways(ctx context.Context, ownerLabel map[string]string, namespace string) ([]v1alpha1.WorkerSliceGateway, error) DeleteWorkerSliceGatewaysByLabel(ctx context.Context, label map[string]string, namespace string) error NodeIpReconciliationOfWorkerSliceGateways(ctx context.Context, cluster *controllerv1alpha1.Cluster, namespace string) error @@ -348,7 +349,7 @@ type IndividualCertPairRequest struct { // CreateMinimumWorkerSliceGateways is a function to create gateways with minimum specification func (s *WorkerSliceGatewayService) CreateMinimumWorkerSliceGateways(ctx context.Context, sliceName string, clusterNames []string, namespace string, label map[string]string, clusterMap map[string]int, - sliceSubnet string, clusterCidr string, sliceGwSvcTypeMap map[string]*controllerv1alpha1.SliceGatewayServiceType) (ctrl.Result, error) { + sliceSubnet string, clusterCidr string, sliceGwSvcTypeMap map[string]*controllerv1alpha1.SliceGatewayServiceType, gatewayPairs []GatewayPair) (ctrl.Result, error) { err := s.cleanupObsoleteGateways(ctx, namespace, label, clusterNames, clusterMap) if err != nil { @@ -358,7 +359,7 @@ func (s *WorkerSliceGatewayService) CreateMinimumWorkerSliceGateways(ctx context return ctrl.Result{}, nil } - _, err = s.createMinimumGatewaysIfNotExists(ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap) + _, err = s.createMinimumGatewaysIfNotExists(ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap, gatewayPairs) if err != nil { return ctrl.Result{}, err } @@ -438,9 +439,10 @@ func (s *WorkerSliceGatewayService) cleanupObsoleteGateways(ctx context.Context, // createMinimumGatewaysIfNotExists is a helper function to create the gateways between worker clusters if not exists func (s *WorkerSliceGatewayService) createMinimumGatewaysIfNotExists(ctx context.Context, sliceName string, clusterNames []string, namespace string, ownerLabel map[string]string, clusterMap map[string]int, - sliceSubnet string, clusterCidr string, sliceGwSvcTypeMap map[string]*controllerv1alpha1.SliceGatewayServiceType) (ctrl.Result, error) { - noClusters := len(clusterNames) + sliceSubnet string, clusterCidr string, sliceGwSvcTypeMap map[string]*controllerv1alpha1.SliceGatewayServiceType, gatewayPairs []GatewayPair) (ctrl.Result, error) { logger := util.CtxLogger(ctx) + + // Build cluster mapping clusterMapping := map[string]*controllerv1alpha1.Cluster{} for _, clusterName := range clusterNames { cluster := controllerv1alpha1.Cluster{} @@ -450,28 +452,42 @@ func (s *WorkerSliceGatewayService) createMinimumGatewaysIfNotExists(ctx context } clusterMapping[clusterName] = &cluster } - for i := 0; i < noClusters; i++ { - for j := i + 1; j < noClusters; j++ { - sourceCluster, destinationCluster := clusterMapping[clusterNames[i]], clusterMapping[clusterNames[j]] - gatewayNumber := s.calculateGatewayNumber(clusterMap[sourceCluster.Name], clusterMap[destinationCluster.Name]) - gatewayAddresses := s.BuildNetworkAddresses(sliceSubnet, sourceCluster.Name, destinationCluster.Name, clusterMap, clusterCidr) - // determine the gateway svc parameters - sliceGwSvcType := defaultSliceGatewayServiceType - gwSvcProtocol := defaultSliceGatewayServiceProtocol - if val, exists := sliceGwSvcTypeMap[sourceCluster.Name]; exists { - sliceGwSvcType = val.Type - gwSvcProtocol = val.Protocol - } - logger.Debugf("setting gwConType in create_minwsg %s", sliceGwSvcType) - logger.Debugf("setting gwProto in create_minwsg %s", gwSvcProtocol) - err := s.createMinimumGateWayPairIfNotExists(ctx, sourceCluster, destinationCluster, sliceName, namespace, sliceGwSvcType, gwSvcProtocol, ownerLabel, gatewayNumber, gatewayAddresses) - if err != nil { - return ctrl.Result{}, err - } + + logger.Infof("Creating %d gateway pairs for slice %s", len(gatewayPairs), sliceName) + + // Create gateways for each resolved pair + for _, pair := range gatewayPairs { + sourceCluster := clusterMapping[pair.Source] + destinationCluster := clusterMapping[pair.Target] + + if sourceCluster == nil || destinationCluster == nil { + logger.Errorf("Cluster not found in mapping: source=%s, dest=%s", pair.Source, pair.Target) + continue + } + + gatewayNumber := s.calculateGatewayNumber(clusterMap[sourceCluster.Name], clusterMap[destinationCluster.Name]) + gatewayAddresses := s.BuildNetworkAddresses(sliceSubnet, sourceCluster.Name, destinationCluster.Name, clusterMap, clusterCidr) + + // Determine the gateway svc parameters + sliceGwSvcType := defaultSliceGatewayServiceType + gwSvcProtocol := defaultSliceGatewayServiceProtocol + if val, exists := sliceGwSvcTypeMap[sourceCluster.Name]; exists { + sliceGwSvcType = val.Type + gwSvcProtocol = val.Protocol + } + + logger.Debugf("Creating gateway pair: %s <-> %s (type=%s, proto=%s)", + sourceCluster.Name, destinationCluster.Name, sliceGwSvcType, gwSvcProtocol) + + err := s.createMinimumGateWayPairIfNotExists(ctx, sourceCluster, destinationCluster, sliceName, namespace, + sliceGwSvcType, gwSvcProtocol, ownerLabel, gatewayNumber, gatewayAddresses) + if err != nil { + return ctrl.Result{}, err } } - return ctrl.Result{}, nil + logger.Infof("Successfully created %d gateway pairs for slice %s", len(gatewayPairs), sliceName) + return ctrl.Result{}, nil } // createMinimumGateWayPairIfNotExists is a function to create the pair of gatways between 2 clusters if not exists From ffd9bf38443f9fbc14166078e772be680121fcc5 Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Sun, 2 Nov 2025 12:44:13 +0000 Subject: [PATCH 12/34] test(topology): update worker slice gateway service tests Add and update tests for worker slice gateway service related to topology implementation. Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- service/worker_slice_gateway_service_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/service/worker_slice_gateway_service_test.go b/service/worker_slice_gateway_service_test.go index f09906b8f..9b27dd97d 100644 --- a/service/worker_slice_gateway_service_test.go +++ b/service/worker_slice_gateway_service_test.go @@ -551,7 +551,6 @@ func testNodeIpReconciliationOfWorkerSliceGatewaysExists(t *testing.T) { Annotations: nil, OwnerReferences: nil, Finalizers: nil, - ClusterName: "", ManagedFields: nil, }, Spec: controllerv1alpha1.ClusterSpec{}, From 0ae91dfa385df3ab05dbf598d7d28a936c806e42 Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Sun, 2 Nov 2025 13:42:22 +0000 Subject: [PATCH 13/34] Update topologyType default to full-mesh and clean up comments This change updates the default topology type from 'auto' to 'full-mesh' in both the Go types and CRD specification. It also removes some unnecessary comments in the slice config service. Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- apis/controller/v1alpha1/sliceconfig_types.go | 2 +- config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml | 2 +- service/slice_config_service.go | 2 -- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/apis/controller/v1alpha1/sliceconfig_types.go b/apis/controller/v1alpha1/sliceconfig_types.go index d841930ac..7ff49b63d 100644 --- a/apis/controller/v1alpha1/sliceconfig_types.go +++ b/apis/controller/v1alpha1/sliceconfig_types.go @@ -186,7 +186,7 @@ const ( ) type TopologyConfig struct { - //+kubebuilder:default:=auto + //+kubebuilder:default:=full-mesh TopologyType TopologyType `json:"topologyType,omitempty"` ConnectivityMatrix []ConnectivityEntry `json:"connectivityMatrix,omitempty"` ForbiddenEdges []ForbiddenEdge `json:"forbiddenEdges,omitempty"` diff --git a/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml b/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml index 02e61abc3..06fd1200f 100644 --- a/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml +++ b/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml @@ -282,7 +282,7 @@ spec: type: object type: array topologyType: - default: auto + default: full-mesh enum: - auto - full-mesh diff --git a/service/slice_config_service.go b/service/slice_config_service.go index c7d9463f8..b336793e1 100644 --- a/service/slice_config_service.go +++ b/service/slice_config_service.go @@ -204,7 +204,6 @@ func (s *SliceConfigService) ReconcileSliceConfig(ctx context.Context, req ctrl. } // Step 5: Resolve topology to get gateway pairs - // Following pattern: calculate data in SliceConfigService before delegating to sub-services gatewayPairs, err := s.resolveTopologyPairs(sliceConfig) if err != nil { logger.Errorf("Failed to resolve topology for slice %s: %v", sliceConfig.Name, err) @@ -449,7 +448,6 @@ type GatewayPair struct { } // resolveTopologyPairs calculates gateway pairs based on topology configuration -// Following pattern: direct usage from sliceConfig.Spec (like QOSProfile) func (s *SliceConfigService) resolveTopologyPairs(sliceConfig *v1alpha1.SliceConfig) ([]GatewayPair, error) { clusters := sliceConfig.Spec.Clusters From 0ce3632f352aebf20f229223be6a154bd85b8ba4 Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Sun, 2 Nov 2025 14:36:45 +0000 Subject: [PATCH 14/34] Add changes to slice config service test Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- service/slice_config_service_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/service/slice_config_service_test.go b/service/slice_config_service_test.go index 1f317b5a1..ad6cd85b0 100644 --- a/service/slice_config_service_test.go +++ b/service/slice_config_service_test.go @@ -737,3 +737,4 @@ func setupSliceConfigTest(name string, namespace string) (*mocks.IWorkerSliceGat ctx := util.PrepareKubeSliceControllersRequestContext(context.Background(), clientMock, scheme, "SliceConfigServiceTest", &eventRecorder) return workerSliceGatewayMock, workerSliceConfigMock, serviceExportConfigMock, workerServiceImportMock, workerSliceGatewayRecyclerMock, clientMock, sliceConfig, ctx, sliceConfigService, requestObj, mMock } + From 4ab93f053657fcc8e959b1277073d4ef9dd1a2fe Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Sun, 2 Nov 2025 14:47:42 +0000 Subject: [PATCH 15/34] Update service/slice_config_service_test.go with new changes Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- service/slice_config_service_test.go | 462 +++++++++++++++++++++++++++ 1 file changed, 462 insertions(+) diff --git a/service/slice_config_service_test.go b/service/slice_config_service_test.go index ad6cd85b0..9145632b5 100644 --- a/service/slice_config_service_test.go +++ b/service/slice_config_service_test.go @@ -738,3 +738,465 @@ func setupSliceConfigTest(name string, namespace string) (*mocks.IWorkerSliceGat return workerSliceGatewayMock, workerSliceConfigMock, serviceExportConfigMock, workerServiceImportMock, workerSliceGatewayRecyclerMock, clientMock, sliceConfig, ctx, sliceConfigService, requestObj, mMock } +func TestResolveTopologyPairs_DefaultFullMesh(t *testing.T) { + service := &SliceConfigService{} + sliceConfig := &controllerv1alpha1.SliceConfig{ + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{"cluster1", "cluster2", "cluster3"}, + }, + } + + pairs, err := service.resolveTopologyPairs(sliceConfig) + require.NoError(t, err) + require.Len(t, pairs, 3) + + expectedPairs := []GatewayPair{ + {Source: "cluster1", Target: "cluster2", Bidirectional: true}, + {Source: "cluster1", Target: "cluster3", Bidirectional: true}, + {Source: "cluster2", Target: "cluster3", Bidirectional: true}, + } + require.ElementsMatch(t, expectedPairs, pairs) +} + +func TestResolveTopologyPairs_ExplicitFullMesh(t *testing.T) { + service := &SliceConfigService{} + sliceConfig := &controllerv1alpha1.SliceConfig{ + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{"cluster1", "cluster2"}, + TopologyConfig: &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyFullMesh, + }, + }, + } + + pairs, err := service.resolveTopologyPairs(sliceConfig) + require.NoError(t, err) + require.Len(t, pairs, 1) + require.Equal(t, "cluster1", pairs[0].Source) + require.Equal(t, "cluster2", pairs[0].Target) + require.True(t, pairs[0].Bidirectional) +} + +func TestResolveTopologyPairs_CustomTopology(t *testing.T) { + service := &SliceConfigService{} + sliceConfig := &controllerv1alpha1.SliceConfig{ + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{"cluster1", "cluster2", "cluster3"}, + TopologyConfig: &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyCustom, + ConnectivityMatrix: []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "cluster1", TargetClusters: []string{"cluster2"}}, + {SourceCluster: "cluster2", TargetClusters: []string{"cluster3"}}, + }, + }, + }, + } + + pairs, err := service.resolveTopologyPairs(sliceConfig) + require.NoError(t, err) + require.Len(t, pairs, 2) + + expectedPairs := []GatewayPair{ + {Source: "cluster1", Target: "cluster2", Bidirectional: true}, + {Source: "cluster2", Target: "cluster3", Bidirectional: true}, + } + require.ElementsMatch(t, expectedPairs, pairs) +} + +func TestResolveTopologyPairs_CustomTopologyEmptyMatrix(t *testing.T) { + service := &SliceConfigService{} + sliceConfig := &controllerv1alpha1.SliceConfig{ + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{"cluster1", "cluster2"}, + TopologyConfig: &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyCustom, + ConnectivityMatrix: []controllerv1alpha1.ConnectivityEntry{}, + }, + }, + } + + pairs, err := service.resolveTopologyPairs(sliceConfig) + require.Error(t, err) + require.Contains(t, err.Error(), "custom topology requires connectivity matrix") + require.Nil(t, pairs) +} + +func TestResolveTopologyPairs_AutoTopology(t *testing.T) { + service := &SliceConfigService{} + sliceConfig := &controllerv1alpha1.SliceConfig{ + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{"cluster1", "cluster2", "cluster3"}, + TopologyConfig: &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyAuto, + ForbiddenEdges: []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "cluster1", TargetClusters: []string{"cluster3"}}, + }, + }, + }, + } + + pairs, err := service.resolveTopologyPairs(sliceConfig) + require.NoError(t, err) + require.Len(t, pairs, 2) + + expectedPairs := []GatewayPair{ + {Source: "cluster1", Target: "cluster2", Bidirectional: true}, + {Source: "cluster2", Target: "cluster3", Bidirectional: true}, + } + require.ElementsMatch(t, expectedPairs, pairs) +} + +func TestResolveTopologyPairs_AutoTopologyNoForbidden(t *testing.T) { + service := &SliceConfigService{} + sliceConfig := &controllerv1alpha1.SliceConfig{ + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{"cluster1", "cluster2"}, + TopologyConfig: &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyAuto, + }, + }, + } + + pairs, err := service.resolveTopologyPairs(sliceConfig) + require.NoError(t, err) + require.Len(t, pairs, 1) + require.Equal(t, "cluster1", pairs[0].Source) + require.Equal(t, "cluster2", pairs[0].Target) +} + +func TestResolveTopologyPairs_UnknownType(t *testing.T) { + service := &SliceConfigService{} + sliceConfig := &controllerv1alpha1.SliceConfig{ + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{"cluster1", "cluster2"}, + TopologyConfig: &controllerv1alpha1.TopologyConfig{ + TopologyType: "unknown-type", + }, + }, + } + + pairs, err := service.resolveTopologyPairs(sliceConfig) + require.Error(t, err) + require.Contains(t, err.Error(), "unknown topology type: unknown-type") + require.Nil(t, pairs) +} + +func TestResolveFullMeshTopology(t *testing.T) { + service := &SliceConfigService{} + + t.Run("SingleCluster", func(t *testing.T) { + pairs := service.resolveFullMeshTopology([]string{"cluster1"}) + require.Empty(t, pairs) + }) + + t.Run("TwoClusters", func(t *testing.T) { + pairs := service.resolveFullMeshTopology([]string{"cluster1", "cluster2"}) + require.Len(t, pairs, 1) + require.Equal(t, "cluster1", pairs[0].Source) + require.Equal(t, "cluster2", pairs[0].Target) + require.True(t, pairs[0].Bidirectional) + }) + + t.Run("ThreeClusters", func(t *testing.T) { + pairs := service.resolveFullMeshTopology([]string{"cluster1", "cluster2", "cluster3"}) + require.Len(t, pairs, 3) + + expectedPairs := []GatewayPair{ + {Source: "cluster1", Target: "cluster2", Bidirectional: true}, + {Source: "cluster1", Target: "cluster3", Bidirectional: true}, + {Source: "cluster2", Target: "cluster3", Bidirectional: true}, + } + require.ElementsMatch(t, expectedPairs, pairs) + }) + + t.Run("FourClusters", func(t *testing.T) { + pairs := service.resolveFullMeshTopology([]string{"cluster1", "cluster2", "cluster3", "cluster4"}) + require.Len(t, pairs, 6) + }) +} + +func TestResolveCustomTopology(t *testing.T) { + service := &SliceConfigService{} + clusters := []string{"cluster1", "cluster2", "cluster3"} + + t.Run("ValidMatrix", func(t *testing.T) { + matrix := []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "cluster1", TargetClusters: []string{"cluster2", "cluster3"}}, + } + + pairs, err := service.resolveCustomTopology(clusters, matrix) + require.NoError(t, err) + require.Len(t, pairs, 2) + + expectedPairs := []GatewayPair{ + {Source: "cluster1", Target: "cluster2", Bidirectional: true}, + {Source: "cluster1", Target: "cluster3", Bidirectional: true}, + } + require.ElementsMatch(t, expectedPairs, pairs) + }) + + t.Run("EmptyMatrix", func(t *testing.T) { + matrix := []controllerv1alpha1.ConnectivityEntry{} + + pairs, err := service.resolveCustomTopology(clusters, matrix) + require.Error(t, err) + require.Contains(t, err.Error(), "custom topology requires connectivity matrix") + require.Nil(t, pairs) + }) + + t.Run("InvalidSourceCluster", func(t *testing.T) { + matrix := []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "invalid", TargetClusters: []string{"cluster2"}}, + } + + pairs, err := service.resolveCustomTopology(clusters, matrix) + require.Error(t, err) + require.Contains(t, err.Error(), "unknown source cluster: invalid") + }) + + t.Run("InvalidTargetCluster", func(t *testing.T) { + matrix := []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "cluster1", TargetClusters: []string{"invalid"}}, + } + + pairs, err := service.resolveCustomTopology(clusters, matrix) + require.Error(t, err) + require.Contains(t, err.Error(), "unknown target cluster: invalid") + }) + + t.Run("MultipleSourceClusters", func(t *testing.T) { + matrix := []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "cluster1", TargetClusters: []string{"cluster2"}}, + {SourceCluster: "cluster2", TargetClusters: []string{"cluster3"}}, + {SourceCluster: "cluster3", TargetClusters: []string{"cluster1"}}, + } + + pairs, err := service.resolveCustomTopology(clusters, matrix) + require.NoError(t, err) + require.Len(t, pairs, 3) + }) +} + +func TestResolveAutoTopology(t *testing.T) { + service := &SliceConfigService{} + clusters := []string{"cluster1", "cluster2", "cluster3"} + + t.Run("NoForbiddenEdges", func(t *testing.T) { + pairs, err := service.resolveAutoTopology(clusters, []controllerv1alpha1.ForbiddenEdge{}) + require.NoError(t, err) + require.Len(t, pairs, 3) + + expectedPairs := []GatewayPair{ + {Source: "cluster1", Target: "cluster2", Bidirectional: true}, + {Source: "cluster1", Target: "cluster3", Bidirectional: true}, + {Source: "cluster2", Target: "cluster3", Bidirectional: true}, + } + require.ElementsMatch(t, expectedPairs, pairs) + }) + + t.Run("WithForbiddenEdges", func(t *testing.T) { + forbiddenEdges := []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "cluster1", TargetClusters: []string{"cluster3"}}, + } + + pairs, err := service.resolveAutoTopology(clusters, forbiddenEdges) + require.NoError(t, err) + require.Len(t, pairs, 2) + + expectedPairs := []GatewayPair{ + {Source: "cluster1", Target: "cluster2", Bidirectional: true}, + {Source: "cluster2", Target: "cluster3", Bidirectional: true}, + } + require.ElementsMatch(t, expectedPairs, pairs) + }) + + t.Run("PartitionedTopology", func(t *testing.T) { + forbiddenEdges := []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "cluster1", TargetClusters: []string{"cluster2", "cluster3"}}, + {SourceCluster: "cluster2", TargetClusters: []string{"cluster3"}}, + } + + pairs, err := service.resolveAutoTopology(clusters, forbiddenEdges) + require.Error(t, err) + require.Contains(t, err.Error(), "partitioned topology with no safe bridge edges available") + }) + + t.Run("FourClustersWithBridge", func(t *testing.T) { + clusters4 := []string{"cluster1", "cluster2", "cluster3", "cluster4"} + forbiddenEdges := []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "cluster1", TargetClusters: []string{"cluster3", "cluster4"}}, + {SourceCluster: "cluster2", TargetClusters: []string{"cluster4"}}, + } + + pairs, err := service.resolveAutoTopology(clusters4, forbiddenEdges) + require.NoError(t, err) + require.GreaterOrEqual(t, len(pairs), 3) + }) +} + +func TestBuildForbiddenSet(t *testing.T) { + service := &SliceConfigService{} + + t.Run("SingleEdge", func(t *testing.T) { + forbiddenEdges := []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "cluster1", TargetClusters: []string{"cluster2"}}, + } + + forbidden := service.buildForbiddenSet(forbiddenEdges) + require.Len(t, forbidden, 1) + require.True(t, forbidden["cluster1-cluster2"]) + }) + + t.Run("MultipleEdges", func(t *testing.T) { + forbiddenEdges := []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "cluster1", TargetClusters: []string{"cluster2", "cluster3"}}, + {SourceCluster: "cluster2", TargetClusters: []string{"cluster3"}}, + } + + forbidden := service.buildForbiddenSet(forbiddenEdges) + require.Len(t, forbidden, 3) + require.True(t, forbidden["cluster1-cluster2"]) + require.True(t, forbidden["cluster1-cluster3"]) + require.True(t, forbidden["cluster2-cluster3"]) + }) + + t.Run("EmptyEdges", func(t *testing.T) { + forbidden := service.buildForbiddenSet([]controllerv1alpha1.ForbiddenEdge{}) + require.Empty(t, forbidden) + }) +} + +func TestFilterForbiddenPairs(t *testing.T) { + service := &SliceConfigService{} + + t.Run("FilterSomePairs", func(t *testing.T) { + pairs := []GatewayPair{ + {Source: "cluster1", Target: "cluster2", Bidirectional: true}, + {Source: "cluster1", Target: "cluster3", Bidirectional: true}, + {Source: "cluster2", Target: "cluster3", Bidirectional: true}, + } + + forbidden := map[string]bool{ + "cluster1-cluster3": true, + } + + filtered := service.filterForbiddenPairs(pairs, forbidden) + require.Len(t, filtered, 2) + + expectedPairs := []GatewayPair{ + {Source: "cluster1", Target: "cluster2", Bidirectional: true}, + {Source: "cluster2", Target: "cluster3", Bidirectional: true}, + } + require.ElementsMatch(t, expectedPairs, filtered) + }) + + t.Run("NoForbiddenPairs", func(t *testing.T) { + pairs := []GatewayPair{ + {Source: "cluster1", Target: "cluster2", Bidirectional: true}, + } + + forbidden := map[string]bool{} + + filtered := service.filterForbiddenPairs(pairs, forbidden) + require.Equal(t, pairs, filtered) + }) + + t.Run("AllPairsForbidden", func(t *testing.T) { + pairs := []GatewayPair{ + {Source: "cluster1", Target: "cluster2", Bidirectional: true}, + } + + forbidden := map[string]bool{ + "cluster1-cluster2": true, + } + + filtered := service.filterForbiddenPairs(pairs, forbidden) + require.Empty(t, filtered) + }) +} + +func TestPairKey(t *testing.T) { + service := &SliceConfigService{} + + t.Run("OrderIndependent", func(t *testing.T) { + require.Equal(t, "a-b", service.pairKey("a", "b")) + require.Equal(t, "a-b", service.pairKey("b", "a")) + }) + + t.Run("ClusterNames", func(t *testing.T) { + require.Equal(t, "cluster1-cluster2", service.pairKey("cluster1", "cluster2")) + require.Equal(t, "cluster1-cluster2", service.pairKey("cluster2", "cluster1")) + }) + + t.Run("SameName", func(t *testing.T) { + require.Equal(t, "cluster1-cluster1", service.pairKey("cluster1", "cluster1")) + }) +} + +func TestEnsureConnectivity(t *testing.T) { + service := &SliceConfigService{} + + t.Run("AlreadyConnected", func(t *testing.T) { + clusters := []string{"cluster1", "cluster2", "cluster3"} + pairs := []GatewayPair{ + {Source: "cluster1", Target: "cluster2", Bidirectional: true}, + {Source: "cluster2", Target: "cluster3", Bidirectional: true}, + } + forbidden := map[string]bool{} + + result, err := service.ensureConnectivity(clusters, pairs, forbidden) + require.NoError(t, err) + require.Equal(t, pairs, result) + }) + + t.Run("DisconnectedWithBridge", func(t *testing.T) { + clusters := []string{"cluster1", "cluster2", "cluster3"} + pairs := []GatewayPair{ + {Source: "cluster1", Target: "cluster2", Bidirectional: true}, + } + forbidden := map[string]bool{ + "cluster1-cluster3": true, + } + + result, err := service.ensureConnectivity(clusters, pairs, forbidden) + require.NoError(t, err) + require.Len(t, result, 2) + require.Contains(t, result, GatewayPair{Source: "cluster2", Target: "cluster3", Bidirectional: true}) + }) + + t.Run("CompletelyDisconnected", func(t *testing.T) { + clusters := []string{"cluster1", "cluster2", "cluster3"} + pairs := []GatewayPair{} + forbidden := map[string]bool{ + "cluster1-cluster2": true, + "cluster1-cluster3": true, + "cluster2-cluster3": true, + } + + result, err := service.ensureConnectivity(clusters, pairs, forbidden) + require.Error(t, err) + require.Contains(t, err.Error(), "partitioned topology with no safe bridge edges available") + require.Nil(t, result) + }) +} + +func TestMakeClusterSet(t *testing.T) { + service := &SliceConfigService{} + + t.Run("ValidClusters", func(t *testing.T) { + clusters := []string{"cluster1", "cluster2", "cluster3"} + set := service.makeClusterSet(clusters) + + require.Len(t, set, 3) + require.True(t, set["cluster1"]) + require.True(t, set["cluster2"]) + require.True(t, set["cluster3"]) + require.False(t, set["nonexistent"]) + }) + + t.Run("EmptyClusters", func(t *testing.T) { + set := service.makeClusterSet([]string{}) + require.Empty(t, set) + }) +} + From 197c3e1d8c53ef7afdd8c8e190c0e43a7f29b681 Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Mon, 3 Nov 2025 06:23:25 +0000 Subject: [PATCH 16/34] Remove autotopology telemetry options from API definitions - Remove AutoOptions struct containing telemetry fields - Remove telemetry-specific configuration options - Keep topology type enum with 'auto', 'full-mesh', and 'custom' values - Update TopologyConfig to only include connectivity-based options This removes all telemetry-based autotopology functionality while maintaining the same API interface Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- apis/controller/v1alpha1/sliceconfig_types.go | 24 +++---------------- .../v1alpha1/zz_generated.deepcopy.go | 20 ---------------- 2 files changed, 3 insertions(+), 41 deletions(-) diff --git a/apis/controller/v1alpha1/sliceconfig_types.go b/apis/controller/v1alpha1/sliceconfig_types.go index 7ff49b63d..a44ef9edf 100644 --- a/apis/controller/v1alpha1/sliceconfig_types.go +++ b/apis/controller/v1alpha1/sliceconfig_types.go @@ -187,10 +187,9 @@ const ( type TopologyConfig struct { //+kubebuilder:default:=full-mesh - TopologyType TopologyType `json:"topologyType,omitempty"` - ConnectivityMatrix []ConnectivityEntry `json:"connectivityMatrix,omitempty"` - ForbiddenEdges []ForbiddenEdge `json:"forbiddenEdges,omitempty"` - AutoOptions *AutoTopologyOptions `json:"autoOptions,omitempty"` + TopologyType TopologyType `json:"topologyType,omitempty"` + ConnectivityMatrix []ConnectivityEntry `json:"connectivityMatrix,omitempty"` + ForbiddenEdges []ForbiddenEdge `json:"forbiddenEdges,omitempty"` } type ConnectivityEntry struct { @@ -207,23 +206,6 @@ type ForbiddenEdge struct { TargetClusters []string `json:"targetClusters"` } -type AutoTopologyOptions struct { - //+kubebuilder:default:=false - EnableShortcuts bool `json:"enableShortcuts,omitempty"` - //+kubebuilder:default:=20 - //+kubebuilder:validation:Minimum=1 - //+kubebuilder:validation:Maximum=500 - RelativeThresholdPercent int `json:"relativeThresholdPercent,omitempty"` - //+kubebuilder:default:=3 - //+kubebuilder:validation:Minimum=1 - PersistenceWindows int `json:"persistenceWindows,omitempty"` - //+kubebuilder:default:=10 - //+kubebuilder:validation:Minimum=1 - //+kubebuilder:validation:Maximum=50 - MaxShortcuts int `json:"maxShortcuts,omitempty"` - //+kubebuilder:default:="5m" - TelemetryWindow string `json:"telemetryWindow,omitempty"` -} type KubesliceEvent struct { // Type of the event. Can be one of Error, Success or InProgress diff --git a/apis/controller/v1alpha1/zz_generated.deepcopy.go b/apis/controller/v1alpha1/zz_generated.deepcopy.go index 641136622..e1749a662 100644 --- a/apis/controller/v1alpha1/zz_generated.deepcopy.go +++ b/apis/controller/v1alpha1/zz_generated.deepcopy.go @@ -24,21 +24,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AutoTopologyOptions) DeepCopyInto(out *AutoTopologyOptions) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoTopologyOptions. -func (in *AutoTopologyOptions) DeepCopy() *AutoTopologyOptions { - if in == nil { - return nil - } - out := new(AutoTopologyOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Cluster) DeepCopyInto(out *Cluster) { *out = *in @@ -1010,11 +995,6 @@ func (in *TopologyConfig) DeepCopyInto(out *TopologyConfig) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.AutoOptions != nil { - in, out := &in.AutoOptions, &out.AutoOptions - *out = new(AutoTopologyOptions) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologyConfig. From 2afe629e22b406ccab5535295bef60081abb05d4 Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Mon, 3 Nov 2025 06:46:12 +0000 Subject: [PATCH 17/34] Remove autotopology telemetry options from API definitions - Remove AutoOptions struct containing telemetry fields - Remove telemetry-specific configuration options - Keep topology type enum with 'auto', 'full-mesh', and 'custom' values - Update TopologyConfig to only include connectivity-based options This removes all telemetry-based autotopology functionality while maintaining the same API interface Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- .../controller.kubeslice.io_sliceconfigs.yaml | 23 ------------------- .../slice_config_webhook_validation_test.go | 10 -------- service/topology_status.go | 0 3 files changed, 33 deletions(-) delete mode 100644 service/topology_status.go diff --git a/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml b/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml index 06fd1200f..56b43932e 100644 --- a/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml +++ b/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml @@ -230,29 +230,6 @@ spec: topologyConfig: description: TopologyConfig defines cluster connectivity patterns properties: - autoOptions: - properties: - enableShortcuts: - default: false - type: boolean - maxShortcuts: - default: 10 - maximum: 50 - minimum: 1 - type: integer - persistenceWindows: - default: 3 - minimum: 1 - type: integer - relativeThresholdPercent: - default: 20 - maximum: 500 - minimum: 1 - type: integer - telemetryWindow: - default: 5m - type: string - type: object connectivityMatrix: items: properties: diff --git a/service/slice_config_webhook_validation_test.go b/service/slice_config_webhook_validation_test.go index 4d721f815..cc1d94aff 100644 --- a/service/slice_config_webhook_validation_test.go +++ b/service/slice_config_webhook_validation_test.go @@ -2369,12 +2369,6 @@ func TestValidateTopologyConfig_InvalidClusterInMatrix(t *testing.T) { func TestValidateTopologyConfig_AutoWithOptions(t *testing.T) { topology := &controllerv1alpha1.TopologyConfig{ TopologyType: controllerv1alpha1.TopologyAuto, - AutoOptions: &controllerv1alpha1.AutoTopologyOptions{ - EnableShortcuts: true, - RelativeThresholdPercent: 20, - PersistenceWindows: 3, - MaxShortcuts: 5, - }, } clusters := []string{"c1", "c2", "c3"} @@ -2383,12 +2377,8 @@ func TestValidateTopologyConfig_AutoWithOptions(t *testing.T) { } func TestValidateTopologyConfig_AutoInvalidThreshold(t *testing.T) { - // Soft removal: invalid values should no longer trigger webhook validation errors topology := &controllerv1alpha1.TopologyConfig{ TopologyType: controllerv1alpha1.TopologyAuto, - AutoOptions: &controllerv1alpha1.AutoTopologyOptions{ - RelativeThresholdPercent: 600, - }, } clusters := []string{"c1", "c2"} diff --git a/service/topology_status.go b/service/topology_status.go deleted file mode 100644 index e69de29bb..000000000 From c34be3e42afe2d64bc3a214fe4a05b2ce2dfb5ce Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Mon, 10 Nov 2025 06:06:03 +0000 Subject: [PATCH 18/34] fix: move GatewayPair to util package to resolve circular dependency - Move GatewayPair struct from slice_config_service.go to util/common.go - Update all service files to use util.GatewayPair - Regenerate mocks with updated interface signature - Fix test compilation by using util.GatewayPair in tests - This resolves circular dependency issue in mocks package Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- .gitignore | 6 ++- service/mocks/IWorkerSliceGatewayService.go | 57 ++++++++++++++------ service/slice_config_service.go | 40 ++++++-------- service/slice_config_service_test.go | 36 ++++++------- service/worker_slice_gateway_service.go | 6 +-- service/worker_slice_gateway_service_test.go | 4 +- util/common.go | 6 +++ 7 files changed, 92 insertions(+), 63 deletions(-) diff --git a/.gitignore b/.gitignore index d0b26d3eb..b65eaf9e7 100644 --- a/.gitignore +++ b/.gitignore @@ -32,4 +32,8 @@ work cover.out coverage.out -.vscode \ No newline at end of file +.vscode +*.md +demo/ +scripts/ +tests/ diff --git a/service/mocks/IWorkerSliceGatewayService.go b/service/mocks/IWorkerSliceGatewayService.go index 602ed66b1..507f5b245 100644 --- a/service/mocks/IWorkerSliceGatewayService.go +++ b/service/mocks/IWorkerSliceGatewayService.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.28.1. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks @@ -24,6 +24,10 @@ type IWorkerSliceGatewayService struct { func (_m *IWorkerSliceGatewayService) BuildNetworkAddresses(sliceSubnet string, sourceClusterName string, destinationClusterName string, clusterMap map[string]int, clusterCidr string) util.WorkerSliceGatewayNetworkAddresses { ret := _m.Called(sliceSubnet, sourceClusterName, destinationClusterName, clusterMap, clusterCidr) + if len(ret) == 0 { + panic("no return value specified for BuildNetworkAddresses") + } + var r0 util.WorkerSliceGatewayNetworkAddresses if rf, ok := ret.Get(0).(func(string, string, string, map[string]int, string) util.WorkerSliceGatewayNetworkAddresses); ok { r0 = rf(sliceSubnet, sourceClusterName, destinationClusterName, clusterMap, clusterCidr) @@ -34,23 +38,27 @@ func (_m *IWorkerSliceGatewayService) BuildNetworkAddresses(sliceSubnet string, return r0 } -// CreateMinimumWorkerSliceGateways provides a mock function with given fields: ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap -func (_m *IWorkerSliceGatewayService) CreateMinimumWorkerSliceGateways(ctx context.Context, sliceName string, clusterNames []string, namespace string, label map[string]string, clusterMap map[string]int, sliceSubnet string, clusterCidr string, sliceGwSvcTypeMap map[string]*v1alpha1.SliceGatewayServiceType) (reconcile.Result, error) { - ret := _m.Called(ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap) +// CreateMinimumWorkerSliceGateways provides a mock function with given fields: ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap, gatewayPairs +func (_m *IWorkerSliceGatewayService) CreateMinimumWorkerSliceGateways(ctx context.Context, sliceName string, clusterNames []string, namespace string, label map[string]string, clusterMap map[string]int, sliceSubnet string, clusterCidr string, sliceGwSvcTypeMap map[string]*v1alpha1.SliceGatewayServiceType, gatewayPairs []util.GatewayPair) (reconcile.Result, error) { + ret := _m.Called(ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap, gatewayPairs) + + if len(ret) == 0 { + panic("no return value specified for CreateMinimumWorkerSliceGateways") + } var r0 reconcile.Result var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, []string, string, map[string]string, map[string]int, string, string, map[string]*v1alpha1.SliceGatewayServiceType) (reconcile.Result, error)); ok { - return rf(ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap) + if rf, ok := ret.Get(0).(func(context.Context, string, []string, string, map[string]string, map[string]int, string, string, map[string]*v1alpha1.SliceGatewayServiceType, []util.GatewayPair) (reconcile.Result, error)); ok { + return rf(ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap, gatewayPairs) } - if rf, ok := ret.Get(0).(func(context.Context, string, []string, string, map[string]string, map[string]int, string, string, map[string]*v1alpha1.SliceGatewayServiceType) reconcile.Result); ok { - r0 = rf(ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap) + if rf, ok := ret.Get(0).(func(context.Context, string, []string, string, map[string]string, map[string]int, string, string, map[string]*v1alpha1.SliceGatewayServiceType, []util.GatewayPair) reconcile.Result); ok { + r0 = rf(ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap, gatewayPairs) } else { r0 = ret.Get(0).(reconcile.Result) } - if rf, ok := ret.Get(1).(func(context.Context, string, []string, string, map[string]string, map[string]int, string, string, map[string]*v1alpha1.SliceGatewayServiceType) error); ok { - r1 = rf(ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap) + if rf, ok := ret.Get(1).(func(context.Context, string, []string, string, map[string]string, map[string]int, string, string, map[string]*v1alpha1.SliceGatewayServiceType, []util.GatewayPair) error); ok { + r1 = rf(ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap, gatewayPairs) } else { r1 = ret.Error(1) } @@ -62,6 +70,10 @@ func (_m *IWorkerSliceGatewayService) CreateMinimumWorkerSliceGateways(ctx conte func (_m *IWorkerSliceGatewayService) DeleteWorkerSliceGatewaysByLabel(ctx context.Context, label map[string]string, namespace string) error { ret := _m.Called(ctx, label, namespace) + if len(ret) == 0 { + panic("no return value specified for DeleteWorkerSliceGatewaysByLabel") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, map[string]string, string) error); ok { r0 = rf(ctx, label, namespace) @@ -76,6 +88,10 @@ func (_m *IWorkerSliceGatewayService) DeleteWorkerSliceGatewaysByLabel(ctx conte func (_m *IWorkerSliceGatewayService) GenerateCerts(ctx context.Context, sliceName string, namespace string, gatewayProtocol string, serverGateway *workerv1alpha1.WorkerSliceGateway, clientGateway *workerv1alpha1.WorkerSliceGateway, gatewayAddresses util.WorkerSliceGatewayNetworkAddresses) error { ret := _m.Called(ctx, sliceName, namespace, gatewayProtocol, serverGateway, clientGateway, gatewayAddresses) + if len(ret) == 0 { + panic("no return value specified for GenerateCerts") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, string, string, *workerv1alpha1.WorkerSliceGateway, *workerv1alpha1.WorkerSliceGateway, util.WorkerSliceGatewayNetworkAddresses) error); ok { r0 = rf(ctx, sliceName, namespace, gatewayProtocol, serverGateway, clientGateway, gatewayAddresses) @@ -90,6 +106,10 @@ func (_m *IWorkerSliceGatewayService) GenerateCerts(ctx context.Context, sliceNa func (_m *IWorkerSliceGatewayService) ListWorkerSliceGateways(ctx context.Context, ownerLabel map[string]string, namespace string) ([]workerv1alpha1.WorkerSliceGateway, error) { ret := _m.Called(ctx, ownerLabel, namespace) + if len(ret) == 0 { + panic("no return value specified for ListWorkerSliceGateways") + } + var r0 []workerv1alpha1.WorkerSliceGateway var r1 error if rf, ok := ret.Get(0).(func(context.Context, map[string]string, string) ([]workerv1alpha1.WorkerSliceGateway, error)); ok { @@ -116,6 +136,10 @@ func (_m *IWorkerSliceGatewayService) ListWorkerSliceGateways(ctx context.Contex func (_m *IWorkerSliceGatewayService) NodeIpReconciliationOfWorkerSliceGateways(ctx context.Context, cluster *v1alpha1.Cluster, namespace string) error { ret := _m.Called(ctx, cluster, namespace) + if len(ret) == 0 { + panic("no return value specified for NodeIpReconciliationOfWorkerSliceGateways") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *v1alpha1.Cluster, string) error); ok { r0 = rf(ctx, cluster, namespace) @@ -130,6 +154,10 @@ func (_m *IWorkerSliceGatewayService) NodeIpReconciliationOfWorkerSliceGateways( func (_m *IWorkerSliceGatewayService) ReconcileWorkerSliceGateways(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { ret := _m.Called(ctx, req) + if len(ret) == 0 { + panic("no return value specified for ReconcileWorkerSliceGateways") + } + var r0 reconcile.Result var r1 error if rf, ok := ret.Get(0).(func(context.Context, reconcile.Request) (reconcile.Result, error)); ok { @@ -150,13 +178,12 @@ func (_m *IWorkerSliceGatewayService) ReconcileWorkerSliceGateways(ctx context.C return r0, r1 } -type mockConstructorTestingTNewIWorkerSliceGatewayService interface { +// NewIWorkerSliceGatewayService creates a new instance of IWorkerSliceGatewayService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewIWorkerSliceGatewayService(t interface { mock.TestingT Cleanup(func()) -} - -// NewIWorkerSliceGatewayService creates a new instance of IWorkerSliceGatewayService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewIWorkerSliceGatewayService(t mockConstructorTestingTNewIWorkerSliceGatewayService) *IWorkerSliceGatewayService { +}) *IWorkerSliceGatewayService { mock := &IWorkerSliceGatewayService{} mock.Mock.Test(t) diff --git a/service/slice_config_service.go b/service/slice_config_service.go index b336793e1..363465a7b 100644 --- a/service/slice_config_service.go +++ b/service/slice_config_service.go @@ -440,15 +440,7 @@ func (s *SliceConfigService) handleDefaultSliceConfigAppns(ctx context.Context, return ctrl.Result{}, nil } -// GatewayPair represents a bidirectional connection between two clusters -type GatewayPair struct { - Source string - Target string - Bidirectional bool -} - -// resolveTopologyPairs calculates gateway pairs based on topology configuration -func (s *SliceConfigService) resolveTopologyPairs(sliceConfig *v1alpha1.SliceConfig) ([]GatewayPair, error) { +func (s *SliceConfigService) resolveTopologyPairs(sliceConfig *v1alpha1.SliceConfig) ([]util.GatewayPair, error) { clusters := sliceConfig.Spec.Clusters // Default to full-mesh if no topology config @@ -469,15 +461,15 @@ func (s *SliceConfigService) resolveTopologyPairs(sliceConfig *v1alpha1.SliceCon } // resolveFullMeshTopology creates bidirectional pairs for all cluster combinations -func (s *SliceConfigService) resolveFullMeshTopology(clusters []string) []GatewayPair { +func (s *SliceConfigService) resolveFullMeshTopology(clusters []string) []util.GatewayPair { if len(clusters) < 2 { - return []GatewayPair{} + return []util.GatewayPair{} } - pairs := make([]GatewayPair, 0, len(clusters)*(len(clusters)-1)/2) + pairs := make([]util.GatewayPair, 0, len(clusters)*(len(clusters)-1)/2) for i := 0; i < len(clusters); i++ { for j := i + 1; j < len(clusters); j++ { - pairs = append(pairs, GatewayPair{ + pairs = append(pairs, util.GatewayPair{ Source: clusters[i], Target: clusters[j], Bidirectional: true, @@ -488,13 +480,13 @@ func (s *SliceConfigService) resolveFullMeshTopology(clusters []string) []Gatewa } // resolveCustomTopology creates pairs based on explicit connectivity matrix -func (s *SliceConfigService) resolveCustomTopology(clusters []string, matrix []v1alpha1.ConnectivityEntry) ([]GatewayPair, error) { +func (s *SliceConfigService) resolveCustomTopology(clusters []string, matrix []v1alpha1.ConnectivityEntry) ([]util.GatewayPair, error) { if len(matrix) == 0 { return nil, fmt.Errorf("custom topology requires connectivity matrix") } clusterSet := s.makeClusterSet(clusters) - pairs := make([]GatewayPair, 0) + pairs := make([]util.GatewayPair, 0) for _, entry := range matrix { if !clusterSet[entry.SourceCluster] { @@ -504,7 +496,7 @@ func (s *SliceConfigService) resolveCustomTopology(clusters []string, matrix []v if !clusterSet[target] { return nil, fmt.Errorf("connectivity entry references unknown target cluster: %s", target) } - pairs = append(pairs, GatewayPair{ + pairs = append(pairs, util.GatewayPair{ Source: entry.SourceCluster, Target: target, Bidirectional: true, @@ -516,7 +508,7 @@ func (s *SliceConfigService) resolveCustomTopology(clusters []string, matrix []v } // resolveAutoTopology creates full-mesh and removes forbidden edges -func (s *SliceConfigService) resolveAutoTopology(clusters []string, forbiddenEdges []v1alpha1.ForbiddenEdge) ([]GatewayPair, error) { +func (s *SliceConfigService) resolveAutoTopology(clusters []string, forbiddenEdges []v1alpha1.ForbiddenEdge) ([]util.GatewayPair, error) { // Start with full mesh allPairs := s.resolveFullMeshTopology(clusters) @@ -551,8 +543,8 @@ func (s *SliceConfigService) buildForbiddenSet(forbiddenEdges []v1alpha1.Forbidd } // filterForbiddenPairs removes pairs that are in the forbidden set -func (s *SliceConfigService) filterForbiddenPairs(pairs []GatewayPair, forbidden map[string]bool) []GatewayPair { - filtered := make([]GatewayPair, 0, len(pairs)) +func (s *SliceConfigService) filterForbiddenPairs(pairs []util.GatewayPair, forbidden map[string]bool) []util.GatewayPair { + filtered := make([]util.GatewayPair, 0, len(pairs)) for _, p := range pairs { if !forbidden[s.pairKey(p.Source, p.Target)] { filtered = append(filtered, p) @@ -562,7 +554,7 @@ func (s *SliceConfigService) filterForbiddenPairs(pairs []GatewayPair, forbidden } // ensureConnectivity adds bridge edges if forbidden edges create partitions -func (s *SliceConfigService) ensureConnectivity(clusters []string, pairs []GatewayPair, forbidden map[string]bool) ([]GatewayPair, error) { +func (s *SliceConfigService) ensureConnectivity(clusters []string, pairs []util.GatewayPair, forbidden map[string]bool) ([]util.GatewayPair, error) { graph := s.buildGraph(pairs) components := s.findConnectedComponents(clusters, graph) @@ -585,7 +577,7 @@ func (s *SliceConfigService) ensureConnectivity(clusters []string, pairs []Gatew } // buildGraph creates adjacency list from gateway pairs -func (s *SliceConfigService) buildGraph(pairs []GatewayPair) map[string][]string { +func (s *SliceConfigService) buildGraph(pairs []util.GatewayPair) map[string][]string { graph := make(map[string][]string) for _, p := range pairs { graph[p.Source] = append(graph[p.Source], p.Target) @@ -624,8 +616,8 @@ func (s *SliceConfigService) dfsComponent(node string, graph map[string][]string } // findBridgeEdges finds edges to connect partitioned components -func (s *SliceConfigService) findBridgeEdges(components [][]string, forbidden map[string]bool) []GatewayPair { - bridges := make([]GatewayPair, 0) +func (s *SliceConfigService) findBridgeEdges(components [][]string, forbidden map[string]bool) []util.GatewayPair { + bridges := make([]util.GatewayPair, 0) // Connect each component to the next for i := 0; i < len(components); i++ { @@ -638,7 +630,7 @@ func (s *SliceConfigService) findBridgeEdges(components [][]string, forbidden ma for _, nj := range components[j] { key := s.pairKey(ni, nj) if !forbidden[key] { - bridges = append(bridges, GatewayPair{ + bridges = append(bridges, util.GatewayPair{ Source: ni, Target: nj, Bidirectional: true, diff --git a/service/slice_config_service_test.go b/service/slice_config_service_test.go index 9145632b5..0a2e08522 100644 --- a/service/slice_config_service_test.go +++ b/service/slice_config_service_test.go @@ -750,7 +750,7 @@ func TestResolveTopologyPairs_DefaultFullMesh(t *testing.T) { require.NoError(t, err) require.Len(t, pairs, 3) - expectedPairs := []GatewayPair{ + expectedPairs := []util.GatewayPair{ {Source: "cluster1", Target: "cluster2", Bidirectional: true}, {Source: "cluster1", Target: "cluster3", Bidirectional: true}, {Source: "cluster2", Target: "cluster3", Bidirectional: true}, @@ -796,7 +796,7 @@ func TestResolveTopologyPairs_CustomTopology(t *testing.T) { require.NoError(t, err) require.Len(t, pairs, 2) - expectedPairs := []GatewayPair{ + expectedPairs := []util.GatewayPair{ {Source: "cluster1", Target: "cluster2", Bidirectional: true}, {Source: "cluster2", Target: "cluster3", Bidirectional: true}, } @@ -839,7 +839,7 @@ func TestResolveTopologyPairs_AutoTopology(t *testing.T) { require.NoError(t, err) require.Len(t, pairs, 2) - expectedPairs := []GatewayPair{ + expectedPairs := []util.GatewayPair{ {Source: "cluster1", Target: "cluster2", Bidirectional: true}, {Source: "cluster2", Target: "cluster3", Bidirectional: true}, } @@ -901,7 +901,7 @@ func TestResolveFullMeshTopology(t *testing.T) { pairs := service.resolveFullMeshTopology([]string{"cluster1", "cluster2", "cluster3"}) require.Len(t, pairs, 3) - expectedPairs := []GatewayPair{ + expectedPairs := []util.GatewayPair{ {Source: "cluster1", Target: "cluster2", Bidirectional: true}, {Source: "cluster1", Target: "cluster3", Bidirectional: true}, {Source: "cluster2", Target: "cluster3", Bidirectional: true}, @@ -928,7 +928,7 @@ func TestResolveCustomTopology(t *testing.T) { require.NoError(t, err) require.Len(t, pairs, 2) - expectedPairs := []GatewayPair{ + expectedPairs := []util.GatewayPair{ {Source: "cluster1", Target: "cluster2", Bidirectional: true}, {Source: "cluster1", Target: "cluster3", Bidirectional: true}, } @@ -949,7 +949,7 @@ func TestResolveCustomTopology(t *testing.T) { {SourceCluster: "invalid", TargetClusters: []string{"cluster2"}}, } - pairs, err := service.resolveCustomTopology(clusters, matrix) + _, err := service.resolveCustomTopology(clusters, matrix) require.Error(t, err) require.Contains(t, err.Error(), "unknown source cluster: invalid") }) @@ -959,7 +959,7 @@ func TestResolveCustomTopology(t *testing.T) { {SourceCluster: "cluster1", TargetClusters: []string{"invalid"}}, } - pairs, err := service.resolveCustomTopology(clusters, matrix) + _, err := service.resolveCustomTopology(clusters, matrix) require.Error(t, err) require.Contains(t, err.Error(), "unknown target cluster: invalid") }) @@ -986,7 +986,7 @@ func TestResolveAutoTopology(t *testing.T) { require.NoError(t, err) require.Len(t, pairs, 3) - expectedPairs := []GatewayPair{ + expectedPairs := []util.GatewayPair{ {Source: "cluster1", Target: "cluster2", Bidirectional: true}, {Source: "cluster1", Target: "cluster3", Bidirectional: true}, {Source: "cluster2", Target: "cluster3", Bidirectional: true}, @@ -1003,7 +1003,7 @@ func TestResolveAutoTopology(t *testing.T) { require.NoError(t, err) require.Len(t, pairs, 2) - expectedPairs := []GatewayPair{ + expectedPairs := []util.GatewayPair{ {Source: "cluster1", Target: "cluster2", Bidirectional: true}, {Source: "cluster2", Target: "cluster3", Bidirectional: true}, } @@ -1016,7 +1016,7 @@ func TestResolveAutoTopology(t *testing.T) { {SourceCluster: "cluster2", TargetClusters: []string{"cluster3"}}, } - pairs, err := service.resolveAutoTopology(clusters, forbiddenEdges) + _, err := service.resolveAutoTopology(clusters, forbiddenEdges) require.Error(t, err) require.Contains(t, err.Error(), "partitioned topology with no safe bridge edges available") }) @@ -1070,7 +1070,7 @@ func TestFilterForbiddenPairs(t *testing.T) { service := &SliceConfigService{} t.Run("FilterSomePairs", func(t *testing.T) { - pairs := []GatewayPair{ + pairs := []util.GatewayPair{ {Source: "cluster1", Target: "cluster2", Bidirectional: true}, {Source: "cluster1", Target: "cluster3", Bidirectional: true}, {Source: "cluster2", Target: "cluster3", Bidirectional: true}, @@ -1083,7 +1083,7 @@ func TestFilterForbiddenPairs(t *testing.T) { filtered := service.filterForbiddenPairs(pairs, forbidden) require.Len(t, filtered, 2) - expectedPairs := []GatewayPair{ + expectedPairs := []util.GatewayPair{ {Source: "cluster1", Target: "cluster2", Bidirectional: true}, {Source: "cluster2", Target: "cluster3", Bidirectional: true}, } @@ -1091,7 +1091,7 @@ func TestFilterForbiddenPairs(t *testing.T) { }) t.Run("NoForbiddenPairs", func(t *testing.T) { - pairs := []GatewayPair{ + pairs := []util.GatewayPair{ {Source: "cluster1", Target: "cluster2", Bidirectional: true}, } @@ -1102,7 +1102,7 @@ func TestFilterForbiddenPairs(t *testing.T) { }) t.Run("AllPairsForbidden", func(t *testing.T) { - pairs := []GatewayPair{ + pairs := []util.GatewayPair{ {Source: "cluster1", Target: "cluster2", Bidirectional: true}, } @@ -1138,7 +1138,7 @@ func TestEnsureConnectivity(t *testing.T) { t.Run("AlreadyConnected", func(t *testing.T) { clusters := []string{"cluster1", "cluster2", "cluster3"} - pairs := []GatewayPair{ + pairs := []util.GatewayPair{ {Source: "cluster1", Target: "cluster2", Bidirectional: true}, {Source: "cluster2", Target: "cluster3", Bidirectional: true}, } @@ -1151,7 +1151,7 @@ func TestEnsureConnectivity(t *testing.T) { t.Run("DisconnectedWithBridge", func(t *testing.T) { clusters := []string{"cluster1", "cluster2", "cluster3"} - pairs := []GatewayPair{ + pairs := []util.GatewayPair{ {Source: "cluster1", Target: "cluster2", Bidirectional: true}, } forbidden := map[string]bool{ @@ -1161,12 +1161,12 @@ func TestEnsureConnectivity(t *testing.T) { result, err := service.ensureConnectivity(clusters, pairs, forbidden) require.NoError(t, err) require.Len(t, result, 2) - require.Contains(t, result, GatewayPair{Source: "cluster2", Target: "cluster3", Bidirectional: true}) + require.Contains(t, result, util.GatewayPair{Source: "cluster2", Target: "cluster3", Bidirectional: true}) }) t.Run("CompletelyDisconnected", func(t *testing.T) { clusters := []string{"cluster1", "cluster2", "cluster3"} - pairs := []GatewayPair{} + pairs := []util.GatewayPair{} forbidden := map[string]bool{ "cluster1-cluster2": true, "cluster1-cluster3": true, diff --git a/service/worker_slice_gateway_service.go b/service/worker_slice_gateway_service.go index cc1e2fce3..b1392a10c 100644 --- a/service/worker_slice_gateway_service.go +++ b/service/worker_slice_gateway_service.go @@ -47,7 +47,7 @@ type IWorkerSliceGatewayService interface { ReconcileWorkerSliceGateways(ctx context.Context, req ctrl.Request) (ctrl.Result, error) CreateMinimumWorkerSliceGateways(ctx context.Context, sliceName string, clusterNames []string, namespace string, label map[string]string, clusterMap map[string]int, sliceSubnet string, clusterCidr string, - sliceGwSvcTypeMap map[string]*controllerv1alpha1.SliceGatewayServiceType, gatewayPairs []GatewayPair) (ctrl.Result, error) + sliceGwSvcTypeMap map[string]*controllerv1alpha1.SliceGatewayServiceType, gatewayPairs []util.GatewayPair) (ctrl.Result, error) ListWorkerSliceGateways(ctx context.Context, ownerLabel map[string]string, namespace string) ([]v1alpha1.WorkerSliceGateway, error) DeleteWorkerSliceGatewaysByLabel(ctx context.Context, label map[string]string, namespace string) error NodeIpReconciliationOfWorkerSliceGateways(ctx context.Context, cluster *controllerv1alpha1.Cluster, namespace string) error @@ -349,7 +349,7 @@ type IndividualCertPairRequest struct { // CreateMinimumWorkerSliceGateways is a function to create gateways with minimum specification func (s *WorkerSliceGatewayService) CreateMinimumWorkerSliceGateways(ctx context.Context, sliceName string, clusterNames []string, namespace string, label map[string]string, clusterMap map[string]int, - sliceSubnet string, clusterCidr string, sliceGwSvcTypeMap map[string]*controllerv1alpha1.SliceGatewayServiceType, gatewayPairs []GatewayPair) (ctrl.Result, error) { + sliceSubnet string, clusterCidr string, sliceGwSvcTypeMap map[string]*controllerv1alpha1.SliceGatewayServiceType, gatewayPairs []util.GatewayPair) (ctrl.Result, error) { err := s.cleanupObsoleteGateways(ctx, namespace, label, clusterNames, clusterMap) if err != nil { @@ -439,7 +439,7 @@ func (s *WorkerSliceGatewayService) cleanupObsoleteGateways(ctx context.Context, // createMinimumGatewaysIfNotExists is a helper function to create the gateways between worker clusters if not exists func (s *WorkerSliceGatewayService) createMinimumGatewaysIfNotExists(ctx context.Context, sliceName string, clusterNames []string, namespace string, ownerLabel map[string]string, clusterMap map[string]int, - sliceSubnet string, clusterCidr string, sliceGwSvcTypeMap map[string]*controllerv1alpha1.SliceGatewayServiceType, gatewayPairs []GatewayPair) (ctrl.Result, error) { + sliceSubnet string, clusterCidr string, sliceGwSvcTypeMap map[string]*controllerv1alpha1.SliceGatewayServiceType, gatewayPairs []util.GatewayPair) (ctrl.Result, error) { logger := util.CtxLogger(ctx) // Build cluster mapping diff --git a/service/worker_slice_gateway_service_test.go b/service/worker_slice_gateway_service_test.go index 9b27dd97d..af024b103 100644 --- a/service/worker_slice_gateway_service_test.go +++ b/service/worker_slice_gateway_service_test.go @@ -320,7 +320,7 @@ func testCreateMinimumWorkerSliceGatewaysAlreadyExists(t *testing.T) { //environment := make(map[string]string, 5) //jobMock.On("CreateJob", ctx, requestObj.Namespace, "image", environment).Return(ctrl.Result{}, nil).Once() - result, err := workerSliceGatewayService.CreateMinimumWorkerSliceGateways(ctx, "red", clusterNames, requestObj.Namespace, label, clusterMap, "10.10.10.10/16", "/16", nil) + result, err := workerSliceGatewayService.CreateMinimumWorkerSliceGateways(ctx, "red", clusterNames, requestObj.Namespace, label, clusterMap, "10.10.10.10/16", "/16", nil, nil) expectedResult := ctrl.Result{} require.NoError(t, nil) require.Equal(t, result, expectedResult) @@ -407,7 +407,7 @@ func testCreateMinimumWorkerSliceGatewaysNotExists(t *testing.T) { clientMock.On("Update", ctx, mock.AnythingOfType("*v1.Event")).Return(nil).Once() clientMock.On("Get", ctx, mock.Anything, mock.Anything).Return(nil).Once() mMock.On("RecordCounterMetric", mock.Anything, mock.Anything).Return().Once() - result, err := workerSliceGatewayService.CreateMinimumWorkerSliceGateways(ctx, "red", clusterNames, requestObj.Namespace, label, clusterMap, "10.10.10.10/16", "/16", nil) + result, err := workerSliceGatewayService.CreateMinimumWorkerSliceGateways(ctx, "red", clusterNames, requestObj.Namespace, label, clusterMap, "10.10.10.10/16", "/16", nil, nil) expectedResult := ctrl.Result{} require.NoError(t, nil) require.Equal(t, result, expectedResult) diff --git a/util/common.go b/util/common.go index 72c2f72b2..7596db805 100644 --- a/util/common.go +++ b/util/common.go @@ -38,6 +38,12 @@ type WorkerSliceGatewayNetworkAddresses struct { ClientVpnAddress string } +type GatewayPair struct { + Source string + Target string + Bidirectional bool +} + // AppendHyphenToString is a function add hyphen at the end of string func AppendHyphenToString(stringToAppend string) string { if strings.HasSuffix(stringToAppend, "-") { From 114578894d84ec3e7c2745ca954c4197868620a1 Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Mon, 10 Nov 2025 07:18:54 +0000 Subject: [PATCH 19/34] refactor: rename topology from auto to restricted Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- apis/controller/v1alpha1/sliceconfig_types.go | 8 ++++---- ...o-secure.yaml => topology-restricted-secure.yaml} | 4 ++-- service/slice_config_service.go | 8 ++++---- service/slice_config_service_test.go | 12 ++++++------ service/slice_config_webhook_validation.go | 8 ++++---- service/slice_config_webhook_validation_test.go | 6 +++--- 6 files changed, 23 insertions(+), 23 deletions(-) rename config/samples/{topology-auto-secure.yaml => topology-restricted-secure.yaml} (83%) diff --git a/apis/controller/v1alpha1/sliceconfig_types.go b/apis/controller/v1alpha1/sliceconfig_types.go index a44ef9edf..a88a2ad4a 100644 --- a/apis/controller/v1alpha1/sliceconfig_types.go +++ b/apis/controller/v1alpha1/sliceconfig_types.go @@ -176,13 +176,13 @@ type VPNConfiguration struct { Cipher string `json:"cipher"` } -// +kubebuilder:validation:Enum:=auto;full-mesh;custom +// +kubebuilder:validation:Enum:=restricted;full-mesh;custom type TopologyType string const ( - TopologyAuto TopologyType = "auto" - TopologyFullMesh TopologyType = "full-mesh" - TopologyCustom TopologyType = "custom" + TopologyRestricted TopologyType = "restricted" + TopologyFullMesh TopologyType = "full-mesh" + TopologyCustom TopologyType = "custom" ) type TopologyConfig struct { diff --git a/config/samples/topology-auto-secure.yaml b/config/samples/topology-restricted-secure.yaml similarity index 83% rename from config/samples/topology-auto-secure.yaml rename to config/samples/topology-restricted-secure.yaml index 086b14fa6..77c0d44fc 100644 --- a/config/samples/topology-auto-secure.yaml +++ b/config/samples/topology-restricted-secure.yaml @@ -1,13 +1,13 @@ apiVersion: controller.kubeslice.io/v1alpha1 kind: SliceConfig metadata: - name: demo-auto-secure + name: demo-restricted-secure namespace: kubeslice-avesha spec: sliceSubnet: "10.2.0.0/16" clusters: ["dmz", "gateway", "internal", "analytics"] topologyConfig: - topologyType: auto + topologyType: restricted forbiddenEdges: - sourceCluster: gateway targetClusters: ["dmz", "analytics"] \ No newline at end of file diff --git a/service/slice_config_service.go b/service/slice_config_service.go index 363465a7b..6bf813475 100644 --- a/service/slice_config_service.go +++ b/service/slice_config_service.go @@ -453,8 +453,8 @@ func (s *SliceConfigService) resolveTopologyPairs(sliceConfig *v1alpha1.SliceCon return s.resolveFullMeshTopology(clusters), nil case v1alpha1.TopologyCustom: return s.resolveCustomTopology(clusters, sliceConfig.Spec.TopologyConfig.ConnectivityMatrix) - case v1alpha1.TopologyAuto: - return s.resolveAutoTopology(clusters, sliceConfig.Spec.TopologyConfig.ForbiddenEdges) + case v1alpha1.TopologyRestricted: + return s.resolveRestrictedTopology(clusters, sliceConfig.Spec.TopologyConfig.ForbiddenEdges) default: return nil, fmt.Errorf("unknown topology type: %s", sliceConfig.Spec.TopologyConfig.TopologyType) } @@ -507,8 +507,8 @@ func (s *SliceConfigService) resolveCustomTopology(clusters []string, matrix []v return pairs, nil } -// resolveAutoTopology creates full-mesh and removes forbidden edges -func (s *SliceConfigService) resolveAutoTopology(clusters []string, forbiddenEdges []v1alpha1.ForbiddenEdge) ([]util.GatewayPair, error) { +// resolveRestrictedTopology creates full-mesh and removes forbidden edges +func (s *SliceConfigService) resolveRestrictedTopology(clusters []string, forbiddenEdges []v1alpha1.ForbiddenEdge) ([]util.GatewayPair, error) { // Start with full mesh allPairs := s.resolveFullMeshTopology(clusters) diff --git a/service/slice_config_service_test.go b/service/slice_config_service_test.go index 0a2e08522..606a07bbc 100644 --- a/service/slice_config_service_test.go +++ b/service/slice_config_service_test.go @@ -827,7 +827,7 @@ func TestResolveTopologyPairs_AutoTopology(t *testing.T) { Spec: controllerv1alpha1.SliceConfigSpec{ Clusters: []string{"cluster1", "cluster2", "cluster3"}, TopologyConfig: &controllerv1alpha1.TopologyConfig{ - TopologyType: controllerv1alpha1.TopologyAuto, + TopologyType: controllerv1alpha1.TopologyRestricted, ForbiddenEdges: []controllerv1alpha1.ForbiddenEdge{ {SourceCluster: "cluster1", TargetClusters: []string{"cluster3"}}, }, @@ -852,7 +852,7 @@ func TestResolveTopologyPairs_AutoTopologyNoForbidden(t *testing.T) { Spec: controllerv1alpha1.SliceConfigSpec{ Clusters: []string{"cluster1", "cluster2"}, TopologyConfig: &controllerv1alpha1.TopologyConfig{ - TopologyType: controllerv1alpha1.TopologyAuto, + TopologyType: controllerv1alpha1.TopologyRestricted, }, }, } @@ -982,7 +982,7 @@ func TestResolveAutoTopology(t *testing.T) { clusters := []string{"cluster1", "cluster2", "cluster3"} t.Run("NoForbiddenEdges", func(t *testing.T) { - pairs, err := service.resolveAutoTopology(clusters, []controllerv1alpha1.ForbiddenEdge{}) + pairs, err := service.resolveRestrictedTopology(clusters, []controllerv1alpha1.ForbiddenEdge{}) require.NoError(t, err) require.Len(t, pairs, 3) @@ -999,7 +999,7 @@ func TestResolveAutoTopology(t *testing.T) { {SourceCluster: "cluster1", TargetClusters: []string{"cluster3"}}, } - pairs, err := service.resolveAutoTopology(clusters, forbiddenEdges) + pairs, err := service.resolveRestrictedTopology(clusters, forbiddenEdges) require.NoError(t, err) require.Len(t, pairs, 2) @@ -1016,7 +1016,7 @@ func TestResolveAutoTopology(t *testing.T) { {SourceCluster: "cluster2", TargetClusters: []string{"cluster3"}}, } - _, err := service.resolveAutoTopology(clusters, forbiddenEdges) + _, err := service.resolveRestrictedTopology(clusters, forbiddenEdges) require.Error(t, err) require.Contains(t, err.Error(), "partitioned topology with no safe bridge edges available") }) @@ -1028,7 +1028,7 @@ func TestResolveAutoTopology(t *testing.T) { {SourceCluster: "cluster2", TargetClusters: []string{"cluster4"}}, } - pairs, err := service.resolveAutoTopology(clusters4, forbiddenEdges) + pairs, err := service.resolveRestrictedTopology(clusters4, forbiddenEdges) require.NoError(t, err) require.GreaterOrEqual(t, len(pairs), 3) }) diff --git a/service/slice_config_webhook_validation.go b/service/slice_config_webhook_validation.go index 1f29ebc3d..a2fa91208 100644 --- a/service/slice_config_webhook_validation.go +++ b/service/slice_config_webhook_validation.go @@ -696,13 +696,13 @@ func validateTopologyConfig(topology *controllerv1alpha1.TopologyConfig, cluster if err := validateCustomTopology(topology.ConnectivityMatrix, clusterSet, topologyPath); err != nil { return err } - case controllerv1alpha1.TopologyAuto: - if err := validateAutoTopology(topology, clusterSet, topologyPath); err != nil { + case controllerv1alpha1.TopologyRestricted: + if err := validateRestrictedTopology(topology, clusterSet, topologyPath); err != nil { return err } case controllerv1alpha1.TopologyFullMesh, "": default: - return field.Invalid(topologyPath.Child("topologyType"), topology.TopologyType, "must be one of: auto, full-mesh, custom") + return field.Invalid(topologyPath.Child("topologyType"), topology.TopologyType, "must be one of: restricted, full-mesh, custom") } return validateForbiddenEdges(topology.ForbiddenEdges, clusterSet, topologyPath) } @@ -726,7 +726,7 @@ func validateCustomTopology(matrix []controllerv1alpha1.ConnectivityEntry, clust return nil } -func validateAutoTopology(topology *controllerv1alpha1.TopologyConfig, clusterSet map[string]struct{}, basePath *field.Path) *field.Error { +func validateRestrictedTopology(topology *controllerv1alpha1.TopologyConfig, clusterSet map[string]struct{}, basePath *field.Path) *field.Error { // Soft removal: ignore autoOptions fields entirely at webhook layer // Controller will not act on these fields and webhook will not validate them return nil diff --git a/service/slice_config_webhook_validation_test.go b/service/slice_config_webhook_validation_test.go index cc1d94aff..45c0357eb 100644 --- a/service/slice_config_webhook_validation_test.go +++ b/service/slice_config_webhook_validation_test.go @@ -2368,7 +2368,7 @@ func TestValidateTopologyConfig_InvalidClusterInMatrix(t *testing.T) { func TestValidateTopologyConfig_AutoWithOptions(t *testing.T) { topology := &controllerv1alpha1.TopologyConfig{ - TopologyType: controllerv1alpha1.TopologyAuto, + TopologyType: controllerv1alpha1.TopologyRestricted, } clusters := []string{"c1", "c2", "c3"} @@ -2378,7 +2378,7 @@ func TestValidateTopologyConfig_AutoWithOptions(t *testing.T) { func TestValidateTopologyConfig_AutoInvalidThreshold(t *testing.T) { topology := &controllerv1alpha1.TopologyConfig{ - TopologyType: controllerv1alpha1.TopologyAuto, + TopologyType: controllerv1alpha1.TopologyRestricted, } clusters := []string{"c1", "c2"} @@ -2388,7 +2388,7 @@ func TestValidateTopologyConfig_AutoInvalidThreshold(t *testing.T) { func TestValidateTopologyConfig_InvalidForbiddenEdge(t *testing.T) { topology := &controllerv1alpha1.TopologyConfig{ - TopologyType: controllerv1alpha1.TopologyAuto, + TopologyType: controllerv1alpha1.TopologyRestricted, ForbiddenEdges: []controllerv1alpha1.ForbiddenEdge{ {SourceCluster: "invalid", TargetClusters: []string{"c1"}}, }, From f03da5686495b0b51c1a38d8d07537d53f311b1c Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Mon, 10 Nov 2025 07:19:19 +0000 Subject: [PATCH 20/34] fix: resolve util.Client compilation error Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- service/access_control_service_test.go | 6 +++--- service/namespace_service_test.go | 6 +++--- service/project_service_test.go | 7 ++++--- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/service/access_control_service_test.go b/service/access_control_service_test.go index 996350357..801c93f7e 100644 --- a/service/access_control_service_test.go +++ b/service/access_control_service_test.go @@ -1270,19 +1270,19 @@ func ACS_ReconcileWorkerClusterServiceAccountAndRoleBindings(t *testing.T) { mMock.AssertExpectations(t) } -func prepareACSTestContext(ctx context.Context, client util.Client, +func prepareACSTestContext(ctx context.Context, c client.Client, scheme *runtime.Scheme) context.Context { if scheme == nil { scheme = runtime.NewScheme() } controllerv1alpha1.AddToScheme(scheme) rbacv1.AddToScheme(scheme) - eventRecorder := events.NewEventRecorder(client, scheme, ossEvents.EventsMap, events.EventRecorderOptions{ + eventRecorder := events.NewEventRecorder(c, scheme, ossEvents.EventsMap, events.EventRecorderOptions{ Version: "v1alpha1", Cluster: util.ClusterController, Component: util.ComponentController, Slice: util.NotApplicable, }) - preparedCtx := util.PrepareKubeSliceControllersRequestContext(ctx, client, scheme, "ProjectTestController", &eventRecorder) + preparedCtx := util.PrepareKubeSliceControllersRequestContext(ctx, c, scheme, "ProjectTestController", &eventRecorder) return preparedCtx } diff --git a/service/namespace_service_test.go b/service/namespace_service_test.go index 42a15a513..7368d06c0 100644 --- a/service/namespace_service_test.go +++ b/service/namespace_service_test.go @@ -177,13 +177,13 @@ func TestDeleteNamespace_DoesNothingIfNamespaceDoNotExist(t *testing.T) { mMock.AssertExpectations(t) } -func prepareNamespaceTestContext(ctx context.Context, client util.Client, scheme *runtime.Scheme) context.Context { - eventRecorder := events.NewEventRecorder(client, scheme, ossEvents.EventsMap, events.EventRecorderOptions{ +func prepareNamespaceTestContext(ctx context.Context, c client.Client, scheme *runtime.Scheme) context.Context { + eventRecorder := events.NewEventRecorder(c, scheme, ossEvents.EventsMap, events.EventRecorderOptions{ Version: "v1alpha1", Cluster: util.ClusterController, Component: util.ComponentController, Slice: util.NotApplicable, }) - preparedCtx := util.PrepareKubeSliceControllersRequestContext(ctx, client, scheme, "NamespaceTestController", &eventRecorder) + preparedCtx := util.PrepareKubeSliceControllersRequestContext(ctx, c, scheme, "NamespaceTestController", &eventRecorder) return preparedCtx } diff --git a/service/project_service_test.go b/service/project_service_test.go index 21398c6cb..15322c7d7 100644 --- a/service/project_service_test.go +++ b/service/project_service_test.go @@ -38,6 +38,7 @@ import ( k8sError "k8s.io/apimachinery/pkg/api/errors" k8sapimachinery "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" @@ -276,14 +277,14 @@ func setupProjectTest(name string, namespace string) (*mocks.INamespaceService, return nsServiceMock, acsServicemOCK, projectService, requestObj, clientMock, project, ctx, clusterServiceMock, sliceConfigServiceMock, serviceExportConfigServiceMock, sliceQoSConfigServiceMock, mMock } -func prepareProjectTestContext(ctx context.Context, client util.Client, +func prepareProjectTestContext(ctx context.Context, c client.Client, scheme *runtime.Scheme) context.Context { - eventRecorder := events.NewEventRecorder(client, scheme, ossEvents.EventsMap, events.EventRecorderOptions{ + eventRecorder := events.NewEventRecorder(c, scheme, ossEvents.EventsMap, events.EventRecorderOptions{ Version: "v1alpha1", Cluster: util.ClusterController, Component: util.ComponentController, Slice: util.NotApplicable, }) - preparedCtx := util.PrepareKubeSliceControllersRequestContext(ctx, client, scheme, "ProjectTestController", &eventRecorder) + preparedCtx := util.PrepareKubeSliceControllersRequestContext(ctx, c, scheme, "ProjectTestController", &eventRecorder) return preparedCtx } From 5fa703e2a7b3572a513287b8f8a2b9b9cf3a6c46 Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Wed, 12 Nov 2025 12:04:39 +0000 Subject: [PATCH 21/34] Update sliceconfigs CRD Co-authored-by: Qwen-Coder Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml b/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml index 56b43932e..388914f0f 100644 --- a/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml +++ b/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml @@ -261,11 +261,12 @@ spec: topologyType: default: full-mesh enum: - - auto + - restricted - full-mesh - custom type: string type: object + vpnConfig: description: VPNConfiguration defines the additional (optional) VPN Configuration to customise From b042c172ec8375f3934f0f1d2604a5d415005b6a Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Thu, 13 Nov 2025 04:59:59 +0000 Subject: [PATCH 22/34] gitignore fixed Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- .gitignore | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.gitignore b/.gitignore index b65eaf9e7..b725d7add 100644 --- a/.gitignore +++ b/.gitignore @@ -33,7 +33,3 @@ work cover.out coverage.out .vscode -*.md -demo/ -scripts/ -tests/ From 27c2121a2ad21f1487ffc7c91b9bbdfcf27d8674 Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Thu, 13 Nov 2025 15:14:32 +0000 Subject: [PATCH 23/34] test(topology): rename auto tests to restricted, remove partition healing tests - Rename TestResolveTopologyPairs_AutoTopology to TestResolveTopologyPairs_RestrictedTopology - Rename TestResolveTopologyPairs_AutoTopologyNoForbidden to TestResolveTopologyPairs_RestrictedTopologyNoForbidden - Rename TestResolveAutoTopology to TestResolveRestrictedTopology - Remove PartitionedTopology and FourClustersWithBridge subtests that relied on removed partition healing logic These renames align test names with the TopologyRestricted type and remove tests that validated the partition healing feature which was intentionally removed to simplify the topology resolution logic. Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- service/slice_config_service_test.go | 76 ++-------------------------- 1 file changed, 3 insertions(+), 73 deletions(-) diff --git a/service/slice_config_service_test.go b/service/slice_config_service_test.go index 606a07bbc..7a6905156 100644 --- a/service/slice_config_service_test.go +++ b/service/slice_config_service_test.go @@ -821,7 +821,7 @@ func TestResolveTopologyPairs_CustomTopologyEmptyMatrix(t *testing.T) { require.Nil(t, pairs) } -func TestResolveTopologyPairs_AutoTopology(t *testing.T) { +func TestResolveTopologyPairs_RestrictedTopology(t *testing.T) { service := &SliceConfigService{} sliceConfig := &controllerv1alpha1.SliceConfig{ Spec: controllerv1alpha1.SliceConfigSpec{ @@ -846,7 +846,7 @@ func TestResolveTopologyPairs_AutoTopology(t *testing.T) { require.ElementsMatch(t, expectedPairs, pairs) } -func TestResolveTopologyPairs_AutoTopologyNoForbidden(t *testing.T) { +func TestResolveTopologyPairs_RestrictedTopologyNoForbidden(t *testing.T) { service := &SliceConfigService{} sliceConfig := &controllerv1alpha1.SliceConfig{ Spec: controllerv1alpha1.SliceConfigSpec{ @@ -977,7 +977,7 @@ func TestResolveCustomTopology(t *testing.T) { }) } -func TestResolveAutoTopology(t *testing.T) { +func TestResolveRestrictedTopology(t *testing.T) { service := &SliceConfigService{} clusters := []string{"cluster1", "cluster2", "cluster3"} @@ -1009,29 +1009,6 @@ func TestResolveAutoTopology(t *testing.T) { } require.ElementsMatch(t, expectedPairs, pairs) }) - - t.Run("PartitionedTopology", func(t *testing.T) { - forbiddenEdges := []controllerv1alpha1.ForbiddenEdge{ - {SourceCluster: "cluster1", TargetClusters: []string{"cluster2", "cluster3"}}, - {SourceCluster: "cluster2", TargetClusters: []string{"cluster3"}}, - } - - _, err := service.resolveRestrictedTopology(clusters, forbiddenEdges) - require.Error(t, err) - require.Contains(t, err.Error(), "partitioned topology with no safe bridge edges available") - }) - - t.Run("FourClustersWithBridge", func(t *testing.T) { - clusters4 := []string{"cluster1", "cluster2", "cluster3", "cluster4"} - forbiddenEdges := []controllerv1alpha1.ForbiddenEdge{ - {SourceCluster: "cluster1", TargetClusters: []string{"cluster3", "cluster4"}}, - {SourceCluster: "cluster2", TargetClusters: []string{"cluster4"}}, - } - - pairs, err := service.resolveRestrictedTopology(clusters4, forbiddenEdges) - require.NoError(t, err) - require.GreaterOrEqual(t, len(pairs), 3) - }) } func TestBuildForbiddenSet(t *testing.T) { @@ -1133,53 +1110,6 @@ func TestPairKey(t *testing.T) { }) } -func TestEnsureConnectivity(t *testing.T) { - service := &SliceConfigService{} - - t.Run("AlreadyConnected", func(t *testing.T) { - clusters := []string{"cluster1", "cluster2", "cluster3"} - pairs := []util.GatewayPair{ - {Source: "cluster1", Target: "cluster2", Bidirectional: true}, - {Source: "cluster2", Target: "cluster3", Bidirectional: true}, - } - forbidden := map[string]bool{} - - result, err := service.ensureConnectivity(clusters, pairs, forbidden) - require.NoError(t, err) - require.Equal(t, pairs, result) - }) - - t.Run("DisconnectedWithBridge", func(t *testing.T) { - clusters := []string{"cluster1", "cluster2", "cluster3"} - pairs := []util.GatewayPair{ - {Source: "cluster1", Target: "cluster2", Bidirectional: true}, - } - forbidden := map[string]bool{ - "cluster1-cluster3": true, - } - - result, err := service.ensureConnectivity(clusters, pairs, forbidden) - require.NoError(t, err) - require.Len(t, result, 2) - require.Contains(t, result, util.GatewayPair{Source: "cluster2", Target: "cluster3", Bidirectional: true}) - }) - - t.Run("CompletelyDisconnected", func(t *testing.T) { - clusters := []string{"cluster1", "cluster2", "cluster3"} - pairs := []util.GatewayPair{} - forbidden := map[string]bool{ - "cluster1-cluster2": true, - "cluster1-cluster3": true, - "cluster2-cluster3": true, - } - - result, err := service.ensureConnectivity(clusters, pairs, forbidden) - require.Error(t, err) - require.Contains(t, err.Error(), "partitioned topology with no safe bridge edges available") - require.Nil(t, result) - }) -} - func TestMakeClusterSet(t *testing.T) { service := &SliceConfigService{} From b8fed54e7c51f14e80a90ca8ade40621068adb18 Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Thu, 13 Nov 2025 15:21:56 +0000 Subject: [PATCH 24/34] remove partition-healing code Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- service/slice_config_service.go | 101 +------------------------------- 1 file changed, 1 insertion(+), 100 deletions(-) diff --git a/service/slice_config_service.go b/service/slice_config_service.go index 6bf813475..55e05cf82 100644 --- a/service/slice_config_service.go +++ b/service/slice_config_service.go @@ -522,13 +522,7 @@ func (s *SliceConfigService) resolveRestrictedTopology(clusters []string, forbid // Filter out forbidden pairs filtered := s.filterForbiddenPairs(allPairs, forbidden) - // Ensure connectivity (add bridge edges if partitioned) - preservedPairs, err := s.ensureConnectivity(clusters, filtered, forbidden) - if err != nil { - return nil, err - } - - return preservedPairs, nil + return filtered, nil } // buildForbiddenSet creates a map of forbidden edges @@ -553,99 +547,6 @@ func (s *SliceConfigService) filterForbiddenPairs(pairs []util.GatewayPair, forb return filtered } -// ensureConnectivity adds bridge edges if forbidden edges create partitions -func (s *SliceConfigService) ensureConnectivity(clusters []string, pairs []util.GatewayPair, forbidden map[string]bool) ([]util.GatewayPair, error) { - graph := s.buildGraph(pairs) - components := s.findConnectedComponents(clusters, graph) - - if len(components) <= 1 { - return pairs, nil - } - - // Find bridge edges between components - bridgeEdges := s.findBridgeEdges(components, forbidden) - if len(bridgeEdges) == 0 { - return nil, fmt.Errorf("forbidden edges create partitioned topology with no safe bridge edges available") - } - - // Add bridge edges to reconnect components - for _, bridge := range bridgeEdges { - pairs = append(pairs, bridge) - } - - return pairs, nil -} - -// buildGraph creates adjacency list from gateway pairs -func (s *SliceConfigService) buildGraph(pairs []util.GatewayPair) map[string][]string { - graph := make(map[string][]string) - for _, p := range pairs { - graph[p.Source] = append(graph[p.Source], p.Target) - graph[p.Target] = append(graph[p.Target], p.Source) - } - return graph -} - -// findConnectedComponents uses DFS to find all connected components -func (s *SliceConfigService) findConnectedComponents(clusters []string, graph map[string][]string) [][]string { - visited := make(map[string]bool) - components := make([][]string, 0) - - for _, cluster := range clusters { - if !visited[cluster] { - component := s.dfsComponent(cluster, graph, visited) - components = append(components, component) - } - } - - return components -} - -// dfsComponent performs depth-first search to find a component -func (s *SliceConfigService) dfsComponent(node string, graph map[string][]string, visited map[string]bool) []string { - visited[node] = true - component := []string{node} - - for _, neighbor := range graph[node] { - if !visited[neighbor] { - component = append(component, s.dfsComponent(neighbor, graph, visited)...) - } - } - - return component -} - -// findBridgeEdges finds edges to connect partitioned components -func (s *SliceConfigService) findBridgeEdges(components [][]string, forbidden map[string]bool) []util.GatewayPair { - bridges := make([]util.GatewayPair, 0) - - // Connect each component to the next - for i := 0; i < len(components); i++ { - for j := i + 1; j < len(components); j++ { - added := false - for _, ni := range components[i] { - if added { - break - } - for _, nj := range components[j] { - key := s.pairKey(ni, nj) - if !forbidden[key] { - bridges = append(bridges, util.GatewayPair{ - Source: ni, - Target: nj, - Bidirectional: true, - }) - added = true - break - } - } - } - } - } - - return bridges -} - // makeClusterSet creates a set from cluster list func (s *SliceConfigService) makeClusterSet(clusters []string) map[string]bool { set := make(map[string]bool, len(clusters)) From d3aa18ebf51140461a27c5dac38efc72d2f7c814 Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Thu, 13 Nov 2025 16:08:24 +0000 Subject: [PATCH 25/34] feat(topology): remove bidirectional field and implement isolated cluster detection MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Changes: - Remove Bidirectional field from util.GatewayPair struct - Update resolveFullMeshTopology to create all directional pairs (n*(n-1) pairs for n clusters) - Update resolveCustomTopology to remove Bidirectional initialization - Update buildForbiddenSet and filterForbiddenPairs to use directional keys - Implement validateRestrictedTopology with O(n²) BFS connectivity check - Detect isolated clusters and reject topologies with unreachable clusters - Add buildForbiddenSetStatic helper for webhook validation (O(n) complexity) - Add tests for isolated cluster validation scenarios - Update all service tests to expect directional pairs instead of bidirectional Gateway pairs now explicitly represent directional connections. For full-mesh with 3 clusters, creates 6 pairs (c1->c2, c1->c3, c2->c1, c2->c3, c3->c1, c3->c2) instead of 3 bidirectional pairs. Webhook validation ensures forbidden edges do not partition the cluster graph by performing reachability analysis using BFS algorithm on the complement graph. Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- .gitignore | 5 + coverage.html | 8027 +++++++++++++++++ service/slice_config_service.go | 28 +- service/slice_config_service_test.go | 89 +- service/slice_config_webhook_validation.go | 51 +- .../slice_config_webhook_validation_test.go | 30 + util/common.go | 5 +- 7 files changed, 8181 insertions(+), 54 deletions(-) create mode 100644 coverage.html diff --git a/.gitignore b/.gitignore index b725d7add..523f6ba7a 100644 --- a/.gitignore +++ b/.gitignore @@ -33,3 +33,8 @@ work cover.out coverage.out .vscode +demo/ +tests/ +scripts/ +docs/ +copilot* diff --git a/coverage.html b/coverage.html new file mode 100644 index 000000000..7d5a2c2f5 --- /dev/null +++ b/coverage.html @@ -0,0 +1,8027 @@ + + + + + + service: Go Coverage Report + + + +
+ +
+ not tracked + + not covered + covered + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + diff --git a/service/slice_config_service.go b/service/slice_config_service.go index 55e05cf82..11e0fab85 100644 --- a/service/slice_config_service.go +++ b/service/slice_config_service.go @@ -460,26 +460,25 @@ func (s *SliceConfigService) resolveTopologyPairs(sliceConfig *v1alpha1.SliceCon } } -// resolveFullMeshTopology creates bidirectional pairs for all cluster combinations func (s *SliceConfigService) resolveFullMeshTopology(clusters []string) []util.GatewayPair { if len(clusters) < 2 { return []util.GatewayPair{} } - pairs := make([]util.GatewayPair, 0, len(clusters)*(len(clusters)-1)/2) + pairs := make([]util.GatewayPair, 0, len(clusters)*(len(clusters)-1)) for i := 0; i < len(clusters); i++ { - for j := i + 1; j < len(clusters); j++ { - pairs = append(pairs, util.GatewayPair{ - Source: clusters[i], - Target: clusters[j], - Bidirectional: true, - }) + for j := 0; j < len(clusters); j++ { + if i != j { + pairs = append(pairs, util.GatewayPair{ + Source: clusters[i], + Target: clusters[j], + }) + } } } return pairs } -// resolveCustomTopology creates pairs based on explicit connectivity matrix func (s *SliceConfigService) resolveCustomTopology(clusters []string, matrix []v1alpha1.ConnectivityEntry) ([]util.GatewayPair, error) { if len(matrix) == 0 { return nil, fmt.Errorf("custom topology requires connectivity matrix") @@ -497,9 +496,8 @@ func (s *SliceConfigService) resolveCustomTopology(clusters []string, matrix []v return nil, fmt.Errorf("connectivity entry references unknown target cluster: %s", target) } pairs = append(pairs, util.GatewayPair{ - Source: entry.SourceCluster, - Target: target, - Bidirectional: true, + Source: entry.SourceCluster, + Target: target, }) } } @@ -530,7 +528,8 @@ func (s *SliceConfigService) buildForbiddenSet(forbiddenEdges []v1alpha1.Forbidd forbidden := make(map[string]bool) for _, edge := range forbiddenEdges { for _, target := range edge.TargetClusters { - forbidden[s.pairKey(edge.SourceCluster, target)] = true + key := edge.SourceCluster + "-" + target + forbidden[key] = true } } return forbidden @@ -540,7 +539,8 @@ func (s *SliceConfigService) buildForbiddenSet(forbiddenEdges []v1alpha1.Forbidd func (s *SliceConfigService) filterForbiddenPairs(pairs []util.GatewayPair, forbidden map[string]bool) []util.GatewayPair { filtered := make([]util.GatewayPair, 0, len(pairs)) for _, p := range pairs { - if !forbidden[s.pairKey(p.Source, p.Target)] { + key := p.Source + "-" + p.Target + if !forbidden[key] { filtered = append(filtered, p) } } diff --git a/service/slice_config_service_test.go b/service/slice_config_service_test.go index 7a6905156..f7c3433cd 100644 --- a/service/slice_config_service_test.go +++ b/service/slice_config_service_test.go @@ -748,12 +748,15 @@ func TestResolveTopologyPairs_DefaultFullMesh(t *testing.T) { pairs, err := service.resolveTopologyPairs(sliceConfig) require.NoError(t, err) - require.Len(t, pairs, 3) + require.Len(t, pairs, 6) expectedPairs := []util.GatewayPair{ - {Source: "cluster1", Target: "cluster2", Bidirectional: true}, - {Source: "cluster1", Target: "cluster3", Bidirectional: true}, - {Source: "cluster2", Target: "cluster3", Bidirectional: true}, + {Source: "cluster1", Target: "cluster2"}, + {Source: "cluster1", Target: "cluster3"}, + {Source: "cluster2", Target: "cluster1"}, + {Source: "cluster2", Target: "cluster3"}, + {Source: "cluster3", Target: "cluster1"}, + {Source: "cluster3", Target: "cluster2"}, } require.ElementsMatch(t, expectedPairs, pairs) } @@ -771,10 +774,11 @@ func TestResolveTopologyPairs_ExplicitFullMesh(t *testing.T) { pairs, err := service.resolveTopologyPairs(sliceConfig) require.NoError(t, err) - require.Len(t, pairs, 1) + require.Len(t, pairs, 2) require.Equal(t, "cluster1", pairs[0].Source) require.Equal(t, "cluster2", pairs[0].Target) - require.True(t, pairs[0].Bidirectional) + require.Equal(t, "cluster2", pairs[1].Source) + require.Equal(t, "cluster1", pairs[1].Target) } func TestResolveTopologyPairs_CustomTopology(t *testing.T) { @@ -797,8 +801,8 @@ func TestResolveTopologyPairs_CustomTopology(t *testing.T) { require.Len(t, pairs, 2) expectedPairs := []util.GatewayPair{ - {Source: "cluster1", Target: "cluster2", Bidirectional: true}, - {Source: "cluster2", Target: "cluster3", Bidirectional: true}, + {Source: "cluster1", Target: "cluster2"}, + {Source: "cluster2", Target: "cluster3"}, } require.ElementsMatch(t, expectedPairs, pairs) } @@ -837,11 +841,14 @@ func TestResolveTopologyPairs_RestrictedTopology(t *testing.T) { pairs, err := service.resolveTopologyPairs(sliceConfig) require.NoError(t, err) - require.Len(t, pairs, 2) + require.Len(t, pairs, 5) expectedPairs := []util.GatewayPair{ - {Source: "cluster1", Target: "cluster2", Bidirectional: true}, - {Source: "cluster2", Target: "cluster3", Bidirectional: true}, + {Source: "cluster1", Target: "cluster2"}, + {Source: "cluster2", Target: "cluster1"}, + {Source: "cluster2", Target: "cluster3"}, + {Source: "cluster3", Target: "cluster1"}, + {Source: "cluster3", Target: "cluster2"}, } require.ElementsMatch(t, expectedPairs, pairs) } @@ -859,9 +866,11 @@ func TestResolveTopologyPairs_RestrictedTopologyNoForbidden(t *testing.T) { pairs, err := service.resolveTopologyPairs(sliceConfig) require.NoError(t, err) - require.Len(t, pairs, 1) + require.Len(t, pairs, 2) require.Equal(t, "cluster1", pairs[0].Source) require.Equal(t, "cluster2", pairs[0].Target) + require.Equal(t, "cluster2", pairs[1].Source) + require.Equal(t, "cluster1", pairs[1].Target) } func TestResolveTopologyPairs_UnknownType(t *testing.T) { @@ -891,27 +900,31 @@ func TestResolveFullMeshTopology(t *testing.T) { t.Run("TwoClusters", func(t *testing.T) { pairs := service.resolveFullMeshTopology([]string{"cluster1", "cluster2"}) - require.Len(t, pairs, 1) + require.Len(t, pairs, 2) require.Equal(t, "cluster1", pairs[0].Source) require.Equal(t, "cluster2", pairs[0].Target) - require.True(t, pairs[0].Bidirectional) + require.Equal(t, "cluster2", pairs[1].Source) + require.Equal(t, "cluster1", pairs[1].Target) }) t.Run("ThreeClusters", func(t *testing.T) { pairs := service.resolveFullMeshTopology([]string{"cluster1", "cluster2", "cluster3"}) - require.Len(t, pairs, 3) + require.Len(t, pairs, 6) expectedPairs := []util.GatewayPair{ - {Source: "cluster1", Target: "cluster2", Bidirectional: true}, - {Source: "cluster1", Target: "cluster3", Bidirectional: true}, - {Source: "cluster2", Target: "cluster3", Bidirectional: true}, + {Source: "cluster1", Target: "cluster2"}, + {Source: "cluster1", Target: "cluster3"}, + {Source: "cluster2", Target: "cluster1"}, + {Source: "cluster2", Target: "cluster3"}, + {Source: "cluster3", Target: "cluster1"}, + {Source: "cluster3", Target: "cluster2"}, } require.ElementsMatch(t, expectedPairs, pairs) }) t.Run("FourClusters", func(t *testing.T) { pairs := service.resolveFullMeshTopology([]string{"cluster1", "cluster2", "cluster3", "cluster4"}) - require.Len(t, pairs, 6) + require.Len(t, pairs, 12) }) } @@ -929,8 +942,8 @@ func TestResolveCustomTopology(t *testing.T) { require.Len(t, pairs, 2) expectedPairs := []util.GatewayPair{ - {Source: "cluster1", Target: "cluster2", Bidirectional: true}, - {Source: "cluster1", Target: "cluster3", Bidirectional: true}, + {Source: "cluster1", Target: "cluster2"}, + {Source: "cluster1", Target: "cluster3"}, } require.ElementsMatch(t, expectedPairs, pairs) }) @@ -984,12 +997,15 @@ func TestResolveRestrictedTopology(t *testing.T) { t.Run("NoForbiddenEdges", func(t *testing.T) { pairs, err := service.resolveRestrictedTopology(clusters, []controllerv1alpha1.ForbiddenEdge{}) require.NoError(t, err) - require.Len(t, pairs, 3) + require.Len(t, pairs, 6) expectedPairs := []util.GatewayPair{ - {Source: "cluster1", Target: "cluster2", Bidirectional: true}, - {Source: "cluster1", Target: "cluster3", Bidirectional: true}, - {Source: "cluster2", Target: "cluster3", Bidirectional: true}, + {Source: "cluster1", Target: "cluster2"}, + {Source: "cluster1", Target: "cluster3"}, + {Source: "cluster2", Target: "cluster1"}, + {Source: "cluster2", Target: "cluster3"}, + {Source: "cluster3", Target: "cluster1"}, + {Source: "cluster3", Target: "cluster2"}, } require.ElementsMatch(t, expectedPairs, pairs) }) @@ -1001,11 +1017,14 @@ func TestResolveRestrictedTopology(t *testing.T) { pairs, err := service.resolveRestrictedTopology(clusters, forbiddenEdges) require.NoError(t, err) - require.Len(t, pairs, 2) + require.Len(t, pairs, 5) expectedPairs := []util.GatewayPair{ - {Source: "cluster1", Target: "cluster2", Bidirectional: true}, - {Source: "cluster2", Target: "cluster3", Bidirectional: true}, + {Source: "cluster1", Target: "cluster2"}, + {Source: "cluster2", Target: "cluster1"}, + {Source: "cluster2", Target: "cluster3"}, + {Source: "cluster3", Target: "cluster1"}, + {Source: "cluster3", Target: "cluster2"}, } require.ElementsMatch(t, expectedPairs, pairs) }) @@ -1048,9 +1067,9 @@ func TestFilterForbiddenPairs(t *testing.T) { t.Run("FilterSomePairs", func(t *testing.T) { pairs := []util.GatewayPair{ - {Source: "cluster1", Target: "cluster2", Bidirectional: true}, - {Source: "cluster1", Target: "cluster3", Bidirectional: true}, - {Source: "cluster2", Target: "cluster3", Bidirectional: true}, + {Source: "cluster1", Target: "cluster2"}, + {Source: "cluster1", Target: "cluster3"}, + {Source: "cluster2", Target: "cluster3"}, } forbidden := map[string]bool{ @@ -1061,15 +1080,15 @@ func TestFilterForbiddenPairs(t *testing.T) { require.Len(t, filtered, 2) expectedPairs := []util.GatewayPair{ - {Source: "cluster1", Target: "cluster2", Bidirectional: true}, - {Source: "cluster2", Target: "cluster3", Bidirectional: true}, + {Source: "cluster1", Target: "cluster2"}, + {Source: "cluster2", Target: "cluster3"}, } require.ElementsMatch(t, expectedPairs, filtered) }) t.Run("NoForbiddenPairs", func(t *testing.T) { pairs := []util.GatewayPair{ - {Source: "cluster1", Target: "cluster2", Bidirectional: true}, + {Source: "cluster1", Target: "cluster2"}, } forbidden := map[string]bool{} @@ -1080,7 +1099,7 @@ func TestFilterForbiddenPairs(t *testing.T) { t.Run("AllPairsForbidden", func(t *testing.T) { pairs := []util.GatewayPair{ - {Source: "cluster1", Target: "cluster2", Bidirectional: true}, + {Source: "cluster1", Target: "cluster2"}, } forbidden := map[string]bool{ diff --git a/service/slice_config_webhook_validation.go b/service/slice_config_webhook_validation.go index a2fa91208..b2e33dc06 100644 --- a/service/slice_config_webhook_validation.go +++ b/service/slice_config_webhook_validation.go @@ -727,11 +727,58 @@ func validateCustomTopology(matrix []controllerv1alpha1.ConnectivityEntry, clust } func validateRestrictedTopology(topology *controllerv1alpha1.TopologyConfig, clusterSet map[string]struct{}, basePath *field.Path) *field.Error { - // Soft removal: ignore autoOptions fields entirely at webhook layer - // Controller will not act on these fields and webhook will not validate them + if len(topology.ForbiddenEdges) == 0 { + return nil + } + + clusters := make([]string, 0, len(clusterSet)) + for c := range clusterSet { + clusters = append(clusters, c) + } + + forbidden := buildForbiddenSetStatic(topology.ForbiddenEdges) + + reachable := make(map[string]struct{}) + if len(clusters) > 0 { + queue := []string{clusters[0]} + reachable[clusters[0]] = struct{}{} + for len(queue) > 0 { + current := queue[0] + queue = queue[1:] + + for _, next := range clusters { + if next == current { + continue + } + if _, exists := reachable[next]; exists { + continue + } + key := current + "-" + next + if !forbidden[key] { + reachable[next] = struct{}{} + queue = append(queue, next) + } + } + } + } + + if len(reachable) != len(clusterSet) { + return field.Invalid(basePath, topology, "forbidden edges create isolated clusters") + } + return nil } +func buildForbiddenSetStatic(forbiddenEdges []controllerv1alpha1.ForbiddenEdge) map[string]bool { + forbidden := make(map[string]bool) + for _, edge := range forbiddenEdges { + for _, target := range edge.TargetClusters { + forbidden[edge.SourceCluster+"-"+target] = true + } + } + return forbidden +} + func validateForbiddenEdges(edges []controllerv1alpha1.ForbiddenEdge, clusterSet map[string]struct{}, basePath *field.Path) *field.Error { edgesPath := basePath.Child("forbiddenEdges") for i, edge := range edges { diff --git a/service/slice_config_webhook_validation_test.go b/service/slice_config_webhook_validation_test.go index 45c0357eb..5c3f639f5 100644 --- a/service/slice_config_webhook_validation_test.go +++ b/service/slice_config_webhook_validation_test.go @@ -2410,3 +2410,33 @@ func TestValidateTopologyConfig_InvalidType(t *testing.T) { require.NotNil(t, err) require.Contains(t, err.Error(), "must be one of") } + +func TestValidateTopologyConfig_RestrictedIsolatedClusters(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyRestricted, + ForbiddenEdges: []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "c1", TargetClusters: []string{"c2", "c3"}}, + {SourceCluster: "c2", TargetClusters: []string{"c1", "c3"}}, + {SourceCluster: "c3", TargetClusters: []string{"c1", "c2"}}, + }, + } + clusters := []string{"c1", "c2", "c3"} + + err := validateTopologyConfig(topology, clusters) + require.NotNil(t, err) + require.Contains(t, err.Error(), "isolated clusters") +} + +func TestValidateTopologyConfig_RestrictedPartiallyConnected(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyRestricted, + ForbiddenEdges: []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "c1", TargetClusters: []string{"c3"}}, + }, + } + clusters := []string{"c1", "c2", "c3"} + + err := validateTopologyConfig(topology, clusters) + require.Nil(t, err) +} + diff --git a/util/common.go b/util/common.go index 7596db805..311737750 100644 --- a/util/common.go +++ b/util/common.go @@ -39,9 +39,8 @@ type WorkerSliceGatewayNetworkAddresses struct { } type GatewayPair struct { - Source string - Target string - Bidirectional bool + Source string + Target string } // AppendHyphenToString is a function add hyphen at the end of string From 02ca23d13104e9ea78f9fac13d99fabd85f0a286 Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Thu, 13 Nov 2025 16:11:07 +0000 Subject: [PATCH 26/34] chore: remove coverage Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- coverage.html | 8027 ------------------------------------------------- 1 file changed, 8027 deletions(-) delete mode 100644 coverage.html diff --git a/coverage.html b/coverage.html deleted file mode 100644 index 7d5a2c2f5..000000000 --- a/coverage.html +++ /dev/null @@ -1,8027 +0,0 @@ - - - - - - service: Go Coverage Report - - - -
- -
- not tracked - - not covered - covered - -
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - From d6ee3e03035397f5d683ae5fe8238f45dafffda9 Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Thu, 13 Nov 2025 16:12:34 +0000 Subject: [PATCH 27/34] chore: cleaned gitignore Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- .gitignore | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/.gitignore b/.gitignore index 523f6ba7a..ff017f161 100644 --- a/.gitignore +++ b/.gitignore @@ -33,8 +33,4 @@ work cover.out coverage.out .vscode -demo/ -tests/ -scripts/ -docs/ -copilot* + From 0f1b678c2169f68395c7d9eca7b7c648d50ac3a2 Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Fri, 14 Nov 2025 12:14:03 +0000 Subject: [PATCH 28/34] feat: comprehensive topology tests and fixes Co-authored-by: Qwen-Coder This commit includes: - Added comprehensive integration tests for topology features - Fixed bidirectional filtering in restricted topology - Updated unit tests for topology validation and service logic - Removed obsolete auto topology tests Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- .../controller/sliceconfig_controller_test.go | 395 ++++++++++++++++++ service/slice_config_service.go | 17 +- service/slice_config_service_test.go | 175 +++++++- .../slice_config_webhook_validation_test.go | 202 ++++++++- 4 files changed, 737 insertions(+), 52 deletions(-) diff --git a/controllers/controller/sliceconfig_controller_test.go b/controllers/controller/sliceconfig_controller_test.go index a8bf60773..99435e832 100644 --- a/controllers/controller/sliceconfig_controller_test.go +++ b/controllers/controller/sliceconfig_controller_test.go @@ -87,6 +87,7 @@ var _ = Describe("Slice Config controller Tests", Ordered, func() { }, timeout, interval).Should(BeTrue()) Cluster1.Status.CniSubnet = []string{"192.168.0.0/24"} Cluster1.Status.RegistrationStatus = v1alpha1.RegistrationStatusRegistered + Cluster1.Status.NetworkPresent = true Eventually(func() bool { err := k8sClient.Status().Update(ctx, Cluster1) @@ -117,6 +118,7 @@ var _ = Describe("Slice Config controller Tests", Ordered, func() { }, timeout, interval).Should(BeTrue()) Cluster2.Status.CniSubnet = []string{"192.168.1.0/24"} Cluster2.Status.RegistrationStatus = v1alpha1.RegistrationStatusRegistered + Cluster2.Status.NetworkPresent = true Eventually(func() bool { err := k8sClient.Status().Update(ctx, Cluster2) @@ -427,3 +429,396 @@ var _ = Describe("Slice Config controller Tests", Ordered, func() { }) }) }) + +var _ = Describe("Slice Config controller - Topology Tests", Ordered, func() { + var slice *v1alpha1.SliceConfig + var topologyCluster1 *v1alpha1.Cluster + var topologyCluster2 *v1alpha1.Cluster + var topologyCluster3 *v1alpha1.Cluster + const topologySliceName = "test-topology-slice" + const topoProjectName = "topology-project" + const topoSliceNamespace = "kubeslice-topology-project" + + BeforeAll(func() { + ctx := context.Background() + + // Create project for topology tests + topoProject := &v1alpha1.Project{ + ObjectMeta: metav1.ObjectMeta{ + Name: topoProjectName, + Namespace: controlPlaneNamespace, + }, + } + + Eventually(func() bool { + err := k8sClient.Create(ctx, topoProject) + return err == nil + }, timeout, interval).Should(BeTrue()) + + // Check namespace is created + ns := v1.Namespace{} + Eventually(func() bool { + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: topoSliceNamespace, + }, &ns) + return err == nil + }, timeout, interval).Should(BeTrue()) + + // Create topology test clusters + topologyCluster1 = &v1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "topo-worker-1", + Namespace: topoSliceNamespace, + }, + Spec: v1alpha1.ClusterSpec{ + NodeIPs: []string{"11.11.11.20"}, + }, + } + + topologyCluster2 = &v1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "topo-worker-2", + Namespace: topoSliceNamespace, + }, + Spec: v1alpha1.ClusterSpec{ + NodeIPs: []string{"11.11.11.21"}, + }, + } + + topologyCluster3 = &v1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "topo-worker-3", + Namespace: topoSliceNamespace, + }, + Spec: v1alpha1.ClusterSpec{ + NodeIPs: []string{"11.11.11.22"}, + }, + } + + // Create and register first cluster + Eventually(func() bool { + err := k8sClient.Create(ctx, topologyCluster1) + return err == nil + }, timeout, interval).Should(BeTrue()) + + getKey := types.NamespacedName{ + Namespace: topologyCluster1.Namespace, + Name: topologyCluster1.Name, + } + Eventually(func() bool { + err := k8sClient.Get(ctx, getKey, topologyCluster1) + return err == nil + }, timeout, interval).Should(BeTrue()) + + topologyCluster1.Status.CniSubnet = []string{"192.168.2.0/24"} + topologyCluster1.Status.RegistrationStatus = v1alpha1.RegistrationStatusRegistered + topologyCluster1.Status.ClusterHealth = &v1alpha1.ClusterHealth{ClusterHealthStatus: v1alpha1.ClusterHealthStatusNormal} + topologyCluster1.Status.NetworkPresent = true + + Eventually(func() bool { + err := k8sClient.Status().Update(ctx, topologyCluster1) + return err == nil + }, timeout, interval).Should(BeTrue()) + + // Create and register second cluster + Eventually(func() bool { + err := k8sClient.Create(ctx, topologyCluster2) + return err == nil + }, timeout, interval).Should(BeTrue()) + + getKey = types.NamespacedName{ + Namespace: topologyCluster2.Namespace, + Name: topologyCluster2.Name, + } + Eventually(func() bool { + err := k8sClient.Get(ctx, getKey, topologyCluster2) + return err == nil + }, timeout, interval).Should(BeTrue()) + + topologyCluster2.Status.CniSubnet = []string{"192.168.3.0/24"} + topologyCluster2.Status.RegistrationStatus = v1alpha1.RegistrationStatusRegistered + topologyCluster2.Status.ClusterHealth = &v1alpha1.ClusterHealth{ClusterHealthStatus: v1alpha1.ClusterHealthStatusNormal} + topologyCluster2.Status.NetworkPresent = true + + Eventually(func() bool { + err := k8sClient.Status().Update(ctx, topologyCluster2) + return err == nil + }, timeout, interval).Should(BeTrue()) + + // Create and register third cluster + Eventually(func() bool { + err := k8sClient.Create(ctx, topologyCluster3) + return err == nil + }, timeout, interval).Should(BeTrue()) + + getKey = types.NamespacedName{ + Namespace: topologyCluster3.Namespace, + Name: topologyCluster3.Name, + } + Eventually(func() bool { + err := k8sClient.Get(ctx, getKey, topologyCluster3) + return err == nil + }, timeout, interval).Should(BeTrue()) + + topologyCluster3.Status.CniSubnet = []string{"192.168.4.0/24"} + topologyCluster3.Status.RegistrationStatus = v1alpha1.RegistrationStatusRegistered + topologyCluster3.Status.ClusterHealth = &v1alpha1.ClusterHealth{ClusterHealthStatus: v1alpha1.ClusterHealthStatusNormal} + topologyCluster3.Status.NetworkPresent = true + + Eventually(func() bool { + err := k8sClient.Status().Update(ctx, topologyCluster3) + return err == nil + }, timeout, interval).Should(BeTrue()) + }) + + BeforeEach(func() { + slice = &v1alpha1.SliceConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: topologySliceName, + Namespace: topoSliceNamespace, + }, + Spec: v1alpha1.SliceConfigSpec{ + Clusters: []string{"topo-worker-1", "topo-worker-2"}, + MaxClusters: 10, + SliceSubnet: "10.2.0.0/16", + SliceGatewayProvider: &v1alpha1.WorkerSliceGatewayProvider{ + SliceGatewayType: "OpenVPN", + SliceCaType: "Local", + }, + SliceIpamType: "Local", + SliceType: "Application", + QosProfileDetails: &v1alpha1.QOSProfile{ + BandwidthCeilingKbps: 5120, + DscpClass: "AF11", + }, + }, + } + }) + + AfterEach(func() { + ls := map[string]string{ + "original-slice-name": topologySliceName, + } + listOpts := []client.ListOption{ + client.MatchingLabels(ls), + } + + getKey := types.NamespacedName{ + Name: topologySliceName, + Namespace: topoSliceNamespace, + } + + existingSlice := &v1alpha1.SliceConfig{} + err := k8sClient.Get(ctx, getKey, existingSlice) + if err != nil { + Expect(errors.IsNotFound(err)).To(BeTrue()) + return + } + + Expect(k8sClient.Delete(ctx, existingSlice)).Should(Succeed()) + + Eventually(func() bool { + workerSliceConfigList := workerv1alpha1.WorkerSliceConfigList{} + err := k8sClient.List(ctx, &workerSliceConfigList, listOpts...) + if err != nil { + return false + } + if len(workerSliceConfigList.Items) == 0 { + return true + } + for i := range workerSliceConfigList.Items { + if delErr := k8sClient.Delete(ctx, &workerSliceConfigList.Items[i]); delErr != nil && !errors.IsNotFound(delErr) { + GinkgoWriter.Printf("failed deleting WorkerSliceConfig %s/%s: %v\n", workerSliceConfigList.Items[i].Namespace, workerSliceConfigList.Items[i].Name, delErr) + } + } + return false + }, timeout, interval).Should(BeTrue()) + + Eventually(func() bool { + workerSliceGatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &workerSliceGatewayList, listOpts...) + if err != nil { + return false + } + if len(workerSliceGatewayList.Items) == 0 { + return true + } + for i := range workerSliceGatewayList.Items { + if delErr := k8sClient.Delete(ctx, &workerSliceGatewayList.Items[i]); delErr != nil && !errors.IsNotFound(delErr) { + GinkgoWriter.Printf("failed deleting WorkerSliceGateway %s/%s: %v\n", workerSliceGatewayList.Items[i].Namespace, workerSliceGatewayList.Items[i].Name, delErr) + } + } + return false + }, timeout, interval).Should(BeTrue()) + + Eventually(func() bool { + fresh := &v1alpha1.SliceConfig{} + err := k8sClient.Get(ctx, getKey, fresh) + return errors.IsNotFound(err) + }, timeout, interval).Should(BeTrue()) + }) + + It("Should create 1 gateway pair for full-mesh topology with 2 clusters", func() { + By("Creating a SliceConfig with full-mesh topology and 2 clusters") + slice.Spec.TopologyConfig = &v1alpha1.TopologyConfig{ + TopologyType: v1alpha1.TopologyFullMesh, + } + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying 2 gateway objects are created for 1 bidirectional pair (n*(n-1)/2 = 1 pair for n=2, but 2 gateway objects)") + sliceKey := types.NamespacedName{ + Name: topologySliceName, + Namespace: topoSliceNamespace, + } + Eventually(func() bool { + createdSlice := &v1alpha1.SliceConfig{} + err := k8sClient.Get(ctx, sliceKey, createdSlice) + if err != nil { + GinkgoWriter.Println("Error getting slice:", err) + return false + } + + // Check that gateway pairs were created (2 gateways for 1 bidirectional pair) + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err = k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Gateway count:", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 2 + }, timeout, interval).Should(BeTrue()) + }) + + It("Should create 3 gateway pairs for full-mesh topology with 3 clusters", func() { + By("Creating SliceConfig with full-mesh topology and 3 clusters") + slice.Spec.Clusters = []string{"topo-worker-1", "topo-worker-2", "topo-worker-3"} + slice.Spec.TopologyConfig = &v1alpha1.TopologyConfig{ + TopologyType: v1alpha1.TopologyFullMesh, + } + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying 6 gateway objects are created for 3 pairs (n*(n-1)/2 = 3 pairs for n=3, but 6 gateway objects)") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Gateway count for 3 clusters:", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 6 + }, timeout, interval).Should(BeTrue()) + }) + + It("Should default to full-mesh when topology config is nil", func() { + By("Creating SliceConfig without topology config") + slice.Spec.TopologyConfig = nil // No topology specified + slice.Spec.Clusters = []string{"topo-worker-1", "topo-worker-2"} + + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying it defaults to full-mesh (2 gateway objects for 1 bidirectional pair with 2 clusters)") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Gateway count for nil topology:", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 2 + }, timeout, interval).Should(BeTrue()) + }) + + It("Should exclude forbidden edges from restricted topology", func() { + By("Creating SliceConfig with restricted topology and forbidden edges") + slice.Spec.Clusters = []string{"topo-worker-1", "topo-worker-2", "topo-worker-3"} + slice.Spec.TopologyConfig = &v1alpha1.TopologyConfig{ + TopologyType: v1alpha1.TopologyRestricted, + ForbiddenEdges: []v1alpha1.ForbiddenEdge{ + { + SourceCluster: "topo-worker-1", + TargetClusters: []string{"topo-worker-3"}, // block 1->3 + }, + }, + } + + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying 4 gateway objects are created (forbidding 1↔3 removes both directions due to bidirectional tunnels, leaving 2 pairs * 2 objects = 4 gateways)") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Gateway count (restricted):", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 4 + }, timeout, interval).Should(BeTrue()) + }) + + It("Should create gateway pairs from custom connectivity matrix", func() { + By("Creating SliceConfig with custom topology matrix") + slice.Spec.Clusters = []string{"topo-worker-1", "topo-worker-2", "topo-worker-3"} + slice.Spec.TopologyConfig = &v1alpha1.TopologyConfig{ + TopologyType: v1alpha1.TopologyCustom, + ConnectivityMatrix: []v1alpha1.ConnectivityEntry{ + { + SourceCluster: "topo-worker-1", + TargetClusters: []string{"topo-worker-2"}, // only 1->2 + }, + }, + } + + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying 2 gateway objects are created from custom matrix (1 pair specified becomes bidirectional → 2 objects)") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Gateway count (custom matrix):", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 2 + }, timeout, interval).Should(BeTrue()) + }) + + It("Should update gateways when topology config changes", func() { + By("Creating SliceConfig with 2 clusters initially (default full-mesh)") + slice.Spec.TopologyConfig = nil + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying 2 gateway objects for 2 clusters (full-mesh defaults to 1 pair → 2 objects)") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Initial gateway count:", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 2 + }, timeout, interval).Should(BeTrue()) + + By("Updating to 3 clusters with full-mesh") + sliceKey := types.NamespacedName{ + Name: topologySliceName, + Namespace: topoSliceNamespace, + } + updatedSlice := &v1alpha1.SliceConfig{} + Eventually(func() bool { + err := k8sClient.Get(ctx, sliceKey, updatedSlice) + if err != nil { + return false + } + updatedSlice.Spec.Clusters = []string{"topo-worker-1", "topo-worker-2", "topo-worker-3"} + updatedSlice.Spec.TopologyConfig = &v1alpha1.TopologyConfig{ + TopologyType: v1alpha1.TopologyFullMesh, + } + err = k8sClient.Update(ctx, updatedSlice) + return err == nil + }, timeout, interval).Should(BeTrue()) + + By("Verifying 6 gateway objects are now created for 3 bidirectional pairs") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Updated gateway count:", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 6 + }, timeout, interval).Should(BeTrue()) + }) +}) diff --git a/service/slice_config_service.go b/service/slice_config_service.go index 11e0fab85..3a6622d78 100644 --- a/service/slice_config_service.go +++ b/service/slice_config_service.go @@ -536,11 +536,16 @@ func (s *SliceConfigService) buildForbiddenSet(forbiddenEdges []v1alpha1.Forbidd } // filterForbiddenPairs removes pairs that are in the forbidden set +// Since gateway creation is bidirectional (creates both server and client), +// we must filter out BOTH directions if either is forbidden func (s *SliceConfigService) filterForbiddenPairs(pairs []util.GatewayPair, forbidden map[string]bool) []util.GatewayPair { filtered := make([]util.GatewayPair, 0, len(pairs)) for _, p := range pairs { - key := p.Source + "-" + p.Target - if !forbidden[key] { + forwardKey := p.Source + "-" + p.Target + reverseKey := p.Target + "-" + p.Source + + // Skip if EITHER direction is forbidden (because gateway is bidirectional) + if !forbidden[forwardKey] && !forbidden[reverseKey] { filtered = append(filtered, p) } } @@ -555,11 +560,3 @@ func (s *SliceConfigService) makeClusterSet(clusters []string) map[string]bool { } return set } - -// pairKey creates a normalized key for a cluster pair -func (s *SliceConfigService) pairKey(a, b string) string { - if a < b { - return a + "-" + b - } - return b + "-" + a -} diff --git a/service/slice_config_service_test.go b/service/slice_config_service_test.go index f7c3433cd..dbf668b4b 100644 --- a/service/slice_config_service_test.go +++ b/service/slice_config_service_test.go @@ -841,13 +841,13 @@ func TestResolveTopologyPairs_RestrictedTopology(t *testing.T) { pairs, err := service.resolveTopologyPairs(sliceConfig) require.NoError(t, err) - require.Len(t, pairs, 5) + // Forbidding 1→3 also removes 3→1 (bidirectional gateway) + require.Len(t, pairs, 4) expectedPairs := []util.GatewayPair{ {Source: "cluster1", Target: "cluster2"}, {Source: "cluster2", Target: "cluster1"}, {Source: "cluster2", Target: "cluster3"}, - {Source: "cluster3", Target: "cluster1"}, {Source: "cluster3", Target: "cluster2"}, } require.ElementsMatch(t, expectedPairs, pairs) @@ -1017,13 +1017,13 @@ func TestResolveRestrictedTopology(t *testing.T) { pairs, err := service.resolveRestrictedTopology(clusters, forbiddenEdges) require.NoError(t, err) - require.Len(t, pairs, 5) + // Forbidding 1→3 also removes 3→1 (bidirectional gateway) + require.Len(t, pairs, 4) expectedPairs := []util.GatewayPair{ {Source: "cluster1", Target: "cluster2"}, {Source: "cluster2", Target: "cluster1"}, {Source: "cluster2", Target: "cluster3"}, - {Source: "cluster3", Target: "cluster1"}, {Source: "cluster3", Target: "cluster2"}, } require.ElementsMatch(t, expectedPairs, pairs) @@ -1111,24 +1111,6 @@ func TestFilterForbiddenPairs(t *testing.T) { }) } -func TestPairKey(t *testing.T) { - service := &SliceConfigService{} - - t.Run("OrderIndependent", func(t *testing.T) { - require.Equal(t, "a-b", service.pairKey("a", "b")) - require.Equal(t, "a-b", service.pairKey("b", "a")) - }) - - t.Run("ClusterNames", func(t *testing.T) { - require.Equal(t, "cluster1-cluster2", service.pairKey("cluster1", "cluster2")) - require.Equal(t, "cluster1-cluster2", service.pairKey("cluster2", "cluster1")) - }) - - t.Run("SameName", func(t *testing.T) { - require.Equal(t, "cluster1-cluster1", service.pairKey("cluster1", "cluster1")) - }) -} - func TestMakeClusterSet(t *testing.T) { service := &SliceConfigService{} @@ -1147,5 +1129,154 @@ func TestMakeClusterSet(t *testing.T) { set := service.makeClusterSet([]string{}) require.Empty(t, set) }) + + t.Run("SingleCluster", func(t *testing.T) { + set := service.makeClusterSet([]string{"only"}) + require.Len(t, set, 1) + require.True(t, set["only"]) + }) + + t.Run("DuplicateClusters", func(t *testing.T) { + clusters := []string{"c1", "c2", "c1"} + set := service.makeClusterSet(clusters) + require.Len(t, set, 2) + require.True(t, set["c1"]) + require.True(t, set["c2"]) + }) +} + +func TestResolveTopologyPairs_NilTopology(t *testing.T) { + service := &SliceConfigService{} + + sliceConfig := &controllerv1alpha1.SliceConfig{ + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{"c1", "c2"}, + TopologyConfig: nil, + }, + } + + pairs, err := service.resolveTopologyPairs(sliceConfig) + require.NoError(t, err) + require.Len(t, pairs, 2) +} + +func TestResolveTopologyPairs_EmptyTopologyType(t *testing.T) { + service := &SliceConfigService{} + + sliceConfig := &controllerv1alpha1.SliceConfig{ + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{"c1", "c2"}, + TopologyConfig: &controllerv1alpha1.TopologyConfig{ + TopologyType: "", + }, + }, + } + + pairs, err := service.resolveTopologyPairs(sliceConfig) + require.NoError(t, err) + require.Len(t, pairs, 2) +} + +func TestResolveFullMeshTopology_ZeroClusters(t *testing.T) { + service := &SliceConfigService{} + pairs := service.resolveFullMeshTopology([]string{}) + require.Empty(t, pairs) } +func TestResolveFullMeshTopology_FiveClusters(t *testing.T) { + service := &SliceConfigService{} + clusters := []string{"c1", "c2", "c3", "c4", "c5"} + pairs := service.resolveFullMeshTopology(clusters) + require.Len(t, pairs, 20) +} + +func TestResolveCustomTopology_MultipleTargets(t *testing.T) { + service := &SliceConfigService{} + + clusters := []string{"c1", "c2", "c3", "c4"} + matrix := []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "c1", TargetClusters: []string{"c2", "c3", "c4"}}, + } + + pairs, err := service.resolveCustomTopology(clusters, matrix) + require.NoError(t, err) + require.Len(t, pairs, 3) +} + +func TestResolveCustomTopology_AllPossiblePairs(t *testing.T) { + service := &SliceConfigService{} + + clusters := []string{"c1", "c2"} + matrix := []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "c1", TargetClusters: []string{"c2"}}, + {SourceCluster: "c2", TargetClusters: []string{"c1"}}, + } + + pairs, err := service.resolveCustomTopology(clusters, matrix) + require.NoError(t, err) + require.Len(t, pairs, 2) +} + +func TestResolveRestrictedTopology_MultipleForbiddenEdges(t *testing.T) { + service := &SliceConfigService{} + + clusters := []string{"c1", "c2", "c3"} + forbiddenEdges := []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "c1", TargetClusters: []string{"c2"}}, + {SourceCluster: "c2", TargetClusters: []string{"c3"}}, + } + + pairs, err := service.resolveRestrictedTopology(clusters, forbiddenEdges) + require.NoError(t, err) + require.Len(t, pairs, 2) +} + +func TestBuildForbiddenSet_MultipleTargets(t *testing.T) { + service := &SliceConfigService{} + + forbiddenEdges := []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "c1", TargetClusters: []string{"c2", "c3"}}, + } + + forbidden := service.buildForbiddenSet(forbiddenEdges) + require.Len(t, forbidden, 2) + require.True(t, forbidden["c1-c2"]) + require.True(t, forbidden["c1-c3"]) +} + +func TestFilterForbiddenPairs_BidirectionalFiltering(t *testing.T) { + service := &SliceConfigService{} + + pairs := []util.GatewayPair{ + {Source: "c1", Target: "c2"}, + {Source: "c2", Target: "c1"}, + {Source: "c1", Target: "c3"}, + {Source: "c3", Target: "c1"}, + } + + forbidden := map[string]bool{ + "c1-c3": true, + } + + filtered := service.filterForbiddenPairs(pairs, forbidden) + require.Len(t, filtered, 2) + + for _, p := range filtered { + require.False(t, (p.Source == "c1" && p.Target == "c3") || (p.Source == "c3" && p.Target == "c1")) + } +} + +func TestFilterForbiddenPairs_ReverseForbidden(t *testing.T) { + service := &SliceConfigService{} + + pairs := []util.GatewayPair{ + {Source: "c1", Target: "c2"}, + } + + forbidden := map[string]bool{ + "c2-c1": true, + } + + filtered := service.filterForbiddenPairs(pairs, forbidden) + require.Empty(t, filtered) +} diff --git a/service/slice_config_webhook_validation_test.go b/service/slice_config_webhook_validation_test.go index 5c3f639f5..367af4aa7 100644 --- a/service/slice_config_webhook_validation_test.go +++ b/service/slice_config_webhook_validation_test.go @@ -2366,26 +2366,6 @@ func TestValidateTopologyConfig_InvalidClusterInMatrix(t *testing.T) { require.Contains(t, err.Error(), "not in spec.clusters") } -func TestValidateTopologyConfig_AutoWithOptions(t *testing.T) { - topology := &controllerv1alpha1.TopologyConfig{ - TopologyType: controllerv1alpha1.TopologyRestricted, - } - clusters := []string{"c1", "c2", "c3"} - - err := validateTopologyConfig(topology, clusters) - require.Nil(t, err) -} - -func TestValidateTopologyConfig_AutoInvalidThreshold(t *testing.T) { - topology := &controllerv1alpha1.TopologyConfig{ - TopologyType: controllerv1alpha1.TopologyRestricted, - } - clusters := []string{"c1", "c2"} - - err := validateTopologyConfig(topology, clusters) - require.Nil(t, err) -} - func TestValidateTopologyConfig_InvalidForbiddenEdge(t *testing.T) { topology := &controllerv1alpha1.TopologyConfig{ TopologyType: controllerv1alpha1.TopologyRestricted, @@ -2440,3 +2420,185 @@ func TestValidateTopologyConfig_RestrictedPartiallyConnected(t *testing.T) { require.Nil(t, err) } +func TestValidateTopologyConfig_NilTopology(t *testing.T) { + err := validateTopologyConfig(nil, []string{"c1", "c2"}) + require.Nil(t, err) +} + +func TestValidateTopologyConfig_EmptyTopologyType(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: "", + } + err := validateTopologyConfig(topology, []string{"c1", "c2"}) + require.Nil(t, err) +} + +func TestValidateCustomTopology_InvalidTargetCluster(t *testing.T) { + matrix := []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "c1", TargetClusters: []string{"invalid"}}, + } + clusterSet := map[string]struct{}{"c1": {}, "c2": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateCustomTopology(matrix, clusterSet, basePath) + require.NotNil(t, err) + require.Contains(t, err.Error(), "not in spec.clusters") +} + +func TestValidateCustomTopology_InvalidSourceCluster(t *testing.T) { + matrix := []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "invalid", TargetClusters: []string{"c2"}}, + } + clusterSet := map[string]struct{}{"c1": {}, "c2": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateCustomTopology(matrix, clusterSet, basePath) + require.NotNil(t, err) + require.Contains(t, err.Error(), "not in spec.clusters") +} + +func TestValidateCustomTopology_MultipleInvalidTargets(t *testing.T) { + matrix := []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "c1", TargetClusters: []string{"c2", "invalid1", "invalid2"}}, + } + clusterSet := map[string]struct{}{"c1": {}, "c2": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateCustomTopology(matrix, clusterSet, basePath) + require.NotNil(t, err) + require.Contains(t, err.Error(), "not in spec.clusters") +} + +func TestValidateRestrictedTopology_EmptyForbiddenEdges(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyRestricted, + ForbiddenEdges: []controllerv1alpha1.ForbiddenEdge{}, + } + clusterSet := map[string]struct{}{"c1": {}, "c2": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateRestrictedTopology(topology, clusterSet, basePath) + require.Nil(t, err) +} + +func TestValidateRestrictedTopology_SingleCluster(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyRestricted, + ForbiddenEdges: []controllerv1alpha1.ForbiddenEdge{}, + } + clusterSet := map[string]struct{}{"c1": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateRestrictedTopology(topology, clusterSet, basePath) + require.Nil(t, err) +} + +func TestValidateRestrictedTopology_FullyDisconnected(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyRestricted, + ForbiddenEdges: []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "c1", TargetClusters: []string{"c2"}}, + {SourceCluster: "c2", TargetClusters: []string{"c1"}}, + }, + } + clusterSet := map[string]struct{}{"c1": {}, "c2": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateRestrictedTopology(topology, clusterSet, basePath) + require.NotNil(t, err) + require.Contains(t, err.Error(), "isolated clusters") +} + +func TestValidateRestrictedTopology_FourClustersOneIsolated(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyRestricted, + ForbiddenEdges: []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "c1", TargetClusters: []string{"c4"}}, + {SourceCluster: "c2", TargetClusters: []string{"c4"}}, + {SourceCluster: "c3", TargetClusters: []string{"c4"}}, + {SourceCluster: "c4", TargetClusters: []string{"c1", "c2", "c3"}}, + }, + } + clusterSet := map[string]struct{}{"c1": {}, "c2": {}, "c3": {}, "c4": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateRestrictedTopology(topology, clusterSet, basePath) + require.NotNil(t, err) + require.Contains(t, err.Error(), "isolated clusters") +} + +func TestValidateForbiddenEdges_EmptyEdges(t *testing.T) { + edges := []controllerv1alpha1.ForbiddenEdge{} + clusterSet := map[string]struct{}{"c1": {}, "c2": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateForbiddenEdges(edges, clusterSet, basePath) + require.Nil(t, err) +} + +func TestValidateForbiddenEdges_MultipleTargets(t *testing.T) { + edges := []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "c1", TargetClusters: []string{"c2", "c3"}}, + } + clusterSet := map[string]struct{}{"c1": {}, "c2": {}, "c3": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateForbiddenEdges(edges, clusterSet, basePath) + require.Nil(t, err) +} + +func TestValidateForbiddenEdges_InvalidSourceCluster(t *testing.T) { + edges := []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "invalid", TargetClusters: []string{"c2"}}, + } + clusterSet := map[string]struct{}{"c1": {}, "c2": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateForbiddenEdges(edges, clusterSet, basePath) + require.NotNil(t, err) + require.Contains(t, err.Error(), "not in spec.clusters") +} + +func TestValidateForbiddenEdges_InvalidTargetCluster(t *testing.T) { + edges := []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "c1", TargetClusters: []string{"invalid"}}, + } + clusterSet := map[string]struct{}{"c1": {}, "c2": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateForbiddenEdges(edges, clusterSet, basePath) + require.NotNil(t, err) + require.Contains(t, err.Error(), "not in spec.clusters") +} + +func TestValidateForbiddenEdges_InvalidTargetInList(t *testing.T) { + edges := []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "c1", TargetClusters: []string{"c2", "invalid", "c3"}}, + } + clusterSet := map[string]struct{}{"c1": {}, "c2": {}, "c3": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateForbiddenEdges(edges, clusterSet, basePath) + require.NotNil(t, err) + require.Contains(t, err.Error(), "not in spec.clusters") + require.Contains(t, err.Error(), "targetClusters[1]") +} + +func TestBuildForbiddenSetStatic_MultipleEdgesMultipleTargets(t *testing.T) { + edges := []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "c1", TargetClusters: []string{"c2", "c3"}}, + {SourceCluster: "c2", TargetClusters: []string{"c4"}}, + } + + forbidden := buildForbiddenSetStatic(edges) + require.Len(t, forbidden, 3) + require.True(t, forbidden["c1-c2"]) + require.True(t, forbidden["c1-c3"]) + require.True(t, forbidden["c2-c4"]) +} + +func TestBuildForbiddenSetStatic_Empty(t *testing.T) { + forbidden := buildForbiddenSetStatic([]controllerv1alpha1.ForbiddenEdge{}) + require.Empty(t, forbidden) +} + From e960ad698358ea38268d8eb9fe66228cad27c5f6 Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Thu, 13 Nov 2025 16:53:35 +0000 Subject: [PATCH 29/34] test(topology): restore comprehensive integration tests and remove obsolete auto tests Changes: - Restore 7 topology integration test cases in sliceconfig_controller_test.go * Full-mesh topology tests * Custom topology tests * Restricted topology tests * Multiple topology types validation - Add NetworkPresent and ClusterHealth status fields to clusters in integration tests - Remove obsolete TestValidateTopologyConfig_AutoWithOptions and TestValidateTopologyConfig_AutoInvalidThreshold - These Auto tests were redundant with Restricted topology tests since topology type was renamed Integration tests verify: - Full-mesh topology creates correct gateway pairs - Custom connectivity matrix is respected - Restricted topology with forbidden edges works correctly - Topology changes during slice lifecycle Signed-off-by: $(git config user.name) <$(git config user.email)> Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- .../controller/sliceconfig_controller_test.go | 394 ++++++++++++++++++ 1 file changed, 394 insertions(+) diff --git a/controllers/controller/sliceconfig_controller_test.go b/controllers/controller/sliceconfig_controller_test.go index 99435e832..7157b6204 100644 --- a/controllers/controller/sliceconfig_controller_test.go +++ b/controllers/controller/sliceconfig_controller_test.go @@ -88,6 +88,7 @@ var _ = Describe("Slice Config controller Tests", Ordered, func() { Cluster1.Status.CniSubnet = []string{"192.168.0.0/24"} Cluster1.Status.RegistrationStatus = v1alpha1.RegistrationStatusRegistered Cluster1.Status.NetworkPresent = true + Cluster1.Status.NetworkPresent = true Eventually(func() bool { err := k8sClient.Status().Update(ctx, Cluster1) @@ -725,6 +726,399 @@ var _ = Describe("Slice Config controller - Topology Tests", Ordered, func() { }, timeout, interval).Should(BeTrue()) }) + It("Should exclude forbidden edges from restricted topology", func() { + By("Creating SliceConfig with restricted topology and forbidden edges") + slice.Spec.Clusters = []string{"topo-worker-1", "topo-worker-2", "topo-worker-3"} + slice.Spec.TopologyConfig = &v1alpha1.TopologyConfig{ + TopologyType: v1alpha1.TopologyRestricted, + ForbiddenEdges: []v1alpha1.ForbiddenEdge{ + { + SourceCluster: "topo-worker-1", + TargetClusters: []string{"topo-worker-3"}, // block 1->3 + }, + }, + } + + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying 4 gateway objects are created (2 pairs remaining after forbidding 1→3, 4 gateway objects)") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Gateway count (restricted):", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 4 + }, timeout, interval).Should(BeTrue()) + }) + + It("Should create gateway pairs from custom connectivity matrix", func() { + By("Creating SliceConfig with custom topology matrix") + slice.Spec.Clusters = []string{"topo-worker-1", "topo-worker-2", "topo-worker-3"} + slice.Spec.TopologyConfig = &v1alpha1.TopologyConfig{ + TopologyType: v1alpha1.TopologyCustom, + ConnectivityMatrix: []v1alpha1.ConnectivityEntry{ + { + SourceCluster: "topo-worker-1", + TargetClusters: []string{"topo-worker-2"}, // only 1->2 + }, + }, + } + + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying 2 gateway objects are created from custom matrix (1 pair specified becomes bidirectional → 2 objects)") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Gateway count (custom matrix):", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 2 + }, timeout, interval).Should(BeTrue()) + }) + + It("Should update gateways when topology config changes", func() { + By("Creating SliceConfig with 2 clusters initially (default full-mesh)") + slice.Spec.TopologyConfig = nil + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying 2 gateway objects for 2 clusters (full-mesh defaults to 1 pair → 2 objects)") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Initial gateway count:", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 2 + }, timeout, interval).Should(BeTrue()) + + By("Updating to 3 clusters with full-mesh") + sliceKey := types.NamespacedName{ + Name: topologySliceName, + Namespace: topoSliceNamespace, + } + updatedSlice := &v1alpha1.SliceConfig{} + Eventually(func() bool { + err := k8sClient.Get(ctx, sliceKey, updatedSlice) + if err != nil { + return false + } + updatedSlice.Spec.Clusters = []string{"topo-worker-1", "topo-worker-2", "topo-worker-3"} + updatedSlice.Spec.TopologyConfig = &v1alpha1.TopologyConfig{ + TopologyType: v1alpha1.TopologyFullMesh, + } + err = k8sClient.Update(ctx, updatedSlice) + return err == nil + }, timeout, interval).Should(BeTrue()) + + By("Verifying 6 gateway objects are now created for 3 bidirectional pairs") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Updated gateway count:", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 6 + }, timeout, interval).Should(BeTrue()) + }) +}) + +var _ = Describe("Slice Config controller - Topology Tests", Ordered, func() { + var slice *v1alpha1.SliceConfig + var topologyCluster1 *v1alpha1.Cluster + var topologyCluster2 *v1alpha1.Cluster + var topologyCluster3 *v1alpha1.Cluster + const topologySliceName = "test-topology-slice" + const topoProjectName = "topology-project" + const topoSliceNamespace = "kubeslice-topology-project" + + BeforeAll(func() { + ctx := context.Background() + + // Create project for topology tests + topoProject := &v1alpha1.Project{ + ObjectMeta: metav1.ObjectMeta{ + Name: topoProjectName, + Namespace: controlPlaneNamespace, + }, + } + + Eventually(func() bool { + err := k8sClient.Create(ctx, topoProject) + return err == nil + }, timeout, interval).Should(BeTrue()) + + // Check namespace is created + ns := v1.Namespace{} + Eventually(func() bool { + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: topoSliceNamespace, + }, &ns) + return err == nil + }, timeout, interval).Should(BeTrue()) + + // Create topology test clusters + topologyCluster1 = &v1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "topo-worker-1", + Namespace: topoSliceNamespace, + }, + Spec: v1alpha1.ClusterSpec{ + NodeIPs: []string{"11.11.11.20"}, + }, + } + + topologyCluster2 = &v1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "topo-worker-2", + Namespace: topoSliceNamespace, + }, + Spec: v1alpha1.ClusterSpec{ + NodeIPs: []string{"11.11.11.21"}, + }, + } + + topologyCluster3 = &v1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "topo-worker-3", + Namespace: topoSliceNamespace, + }, + Spec: v1alpha1.ClusterSpec{ + NodeIPs: []string{"11.11.11.22"}, + }, + } + + // Create and register first cluster + Eventually(func() bool { + err := k8sClient.Create(ctx, topologyCluster1) + return err == nil + }, timeout, interval).Should(BeTrue()) + + getKey := types.NamespacedName{ + Namespace: topologyCluster1.Namespace, + Name: topologyCluster1.Name, + } + Eventually(func() bool { + err := k8sClient.Get(ctx, getKey, topologyCluster1) + return err == nil + }, timeout, interval).Should(BeTrue()) + + topologyCluster1.Status.CniSubnet = []string{"192.168.2.0/24"} + topologyCluster1.Status.RegistrationStatus = v1alpha1.RegistrationStatusRegistered + topologyCluster1.Status.ClusterHealth = &v1alpha1.ClusterHealth{ClusterHealthStatus: v1alpha1.ClusterHealthStatusNormal} + topologyCluster1.Status.NetworkPresent = true + + Eventually(func() bool { + err := k8sClient.Status().Update(ctx, topologyCluster1) + return err == nil + }, timeout, interval).Should(BeTrue()) + + // Create and register second cluster + Eventually(func() bool { + err := k8sClient.Create(ctx, topologyCluster2) + return err == nil + }, timeout, interval).Should(BeTrue()) + + getKey = types.NamespacedName{ + Namespace: topologyCluster2.Namespace, + Name: topologyCluster2.Name, + } + Eventually(func() bool { + err := k8sClient.Get(ctx, getKey, topologyCluster2) + return err == nil + }, timeout, interval).Should(BeTrue()) + + topologyCluster2.Status.CniSubnet = []string{"192.168.3.0/24"} + topologyCluster2.Status.RegistrationStatus = v1alpha1.RegistrationStatusRegistered + topologyCluster2.Status.ClusterHealth = &v1alpha1.ClusterHealth{ClusterHealthStatus: v1alpha1.ClusterHealthStatusNormal} + topologyCluster2.Status.NetworkPresent = true + + Eventually(func() bool { + err := k8sClient.Status().Update(ctx, topologyCluster2) + return err == nil + }, timeout, interval).Should(BeTrue()) + + // Create and register third cluster + Eventually(func() bool { + err := k8sClient.Create(ctx, topologyCluster3) + return err == nil + }, timeout, interval).Should(BeTrue()) + + getKey = types.NamespacedName{ + Namespace: topologyCluster3.Namespace, + Name: topologyCluster3.Name, + } + Eventually(func() bool { + err := k8sClient.Get(ctx, getKey, topologyCluster3) + return err == nil + }, timeout, interval).Should(BeTrue()) + + topologyCluster3.Status.CniSubnet = []string{"192.168.4.0/24"} + topologyCluster3.Status.RegistrationStatus = v1alpha1.RegistrationStatusRegistered + topologyCluster3.Status.ClusterHealth = &v1alpha1.ClusterHealth{ClusterHealthStatus: v1alpha1.ClusterHealthStatusNormal} + topologyCluster3.Status.NetworkPresent = true + + Eventually(func() bool { + err := k8sClient.Status().Update(ctx, topologyCluster3) + return err == nil + }, timeout, interval).Should(BeTrue()) + }) + + BeforeEach(func() { + slice = &v1alpha1.SliceConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: topologySliceName, + Namespace: topoSliceNamespace, + }, + Spec: v1alpha1.SliceConfigSpec{ + Clusters: []string{"topo-worker-1", "topo-worker-2"}, + MaxClusters: 10, + SliceSubnet: "10.2.0.0/16", + SliceGatewayProvider: &v1alpha1.WorkerSliceGatewayProvider{ + SliceGatewayType: "OpenVPN", + SliceCaType: "Local", + }, + SliceIpamType: "Local", + SliceType: "Application", + QosProfileDetails: &v1alpha1.QOSProfile{ + BandwidthCeilingKbps: 5120, + DscpClass: "AF11", + }, + }, + } + }) + + AfterEach(func() { + ls := map[string]string{ + "original-slice-name": topologySliceName, + } + listOpts := []client.ListOption{ + client.MatchingLabels(ls), + } + + getKey := types.NamespacedName{ + Name: topologySliceName, + Namespace: topoSliceNamespace, + } + + existingSlice := &v1alpha1.SliceConfig{} + err := k8sClient.Get(ctx, getKey, existingSlice) + if err != nil { + Expect(errors.IsNotFound(err)).To(BeTrue()) + return + } + + Expect(k8sClient.Delete(ctx, existingSlice)).Should(Succeed()) + + Eventually(func() bool { + workerSliceConfigList := workerv1alpha1.WorkerSliceConfigList{} + err := k8sClient.List(ctx, &workerSliceConfigList, listOpts...) + if err != nil { + return false + } + if len(workerSliceConfigList.Items) == 0 { + return true + } + for i := range workerSliceConfigList.Items { + if delErr := k8sClient.Delete(ctx, &workerSliceConfigList.Items[i]); delErr != nil && !errors.IsNotFound(delErr) { + GinkgoWriter.Printf("failed deleting WorkerSliceConfig %s/%s: %v\n", workerSliceConfigList.Items[i].Namespace, workerSliceConfigList.Items[i].Name, delErr) + } + } + return false + }, timeout, interval).Should(BeTrue()) + + Eventually(func() bool { + workerSliceGatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &workerSliceGatewayList, listOpts...) + if err != nil { + return false + } + if len(workerSliceGatewayList.Items) == 0 { + return true + } + for i := range workerSliceGatewayList.Items { + if delErr := k8sClient.Delete(ctx, &workerSliceGatewayList.Items[i]); delErr != nil && !errors.IsNotFound(delErr) { + GinkgoWriter.Printf("failed deleting WorkerSliceGateway %s/%s: %v\n", workerSliceGatewayList.Items[i].Namespace, workerSliceGatewayList.Items[i].Name, delErr) + } + } + return false + }, timeout, interval).Should(BeTrue()) + + Eventually(func() bool { + fresh := &v1alpha1.SliceConfig{} + err := k8sClient.Get(ctx, getKey, fresh) + return errors.IsNotFound(err) + }, timeout, interval).Should(BeTrue()) + }) + + It("Should create 1 gateway pair for full-mesh topology with 2 clusters", func() { + By("Creating a SliceConfig with full-mesh topology and 2 clusters") + slice.Spec.TopologyConfig = &v1alpha1.TopologyConfig{ + TopologyType: v1alpha1.TopologyFullMesh, + } + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying 2 gateway objects are created for 1 bidirectional pair (n*(n-1)/2 = 1 pair for n=2, but 2 gateway objects)") + sliceKey := types.NamespacedName{ + Name: topologySliceName, + Namespace: topoSliceNamespace, + } + Eventually(func() bool { + createdSlice := &v1alpha1.SliceConfig{} + err := k8sClient.Get(ctx, sliceKey, createdSlice) + if err != nil { + GinkgoWriter.Println("Error getting slice:", err) + return false + } + + // Check that gateway pairs were created (2 gateways for 1 bidirectional pair) + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err = k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Gateway count:", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 2 + }, timeout, interval).Should(BeTrue()) + }) + + It("Should create 3 gateway pairs for full-mesh topology with 3 clusters", func() { + By("Creating SliceConfig with full-mesh topology and 3 clusters") + slice.Spec.Clusters = []string{"topo-worker-1", "topo-worker-2", "topo-worker-3"} + slice.Spec.TopologyConfig = &v1alpha1.TopologyConfig{ + TopologyType: v1alpha1.TopologyFullMesh, + } + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying 6 gateway objects are created for 3 pairs (n*(n-1)/2 = 3 pairs for n=3, but 6 gateway objects)") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Gateway count for 3 clusters:", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 6 + }, timeout, interval).Should(BeTrue()) + }) + + It("Should default to full-mesh when topology config is nil", func() { + By("Creating SliceConfig without topology config") + slice.Spec.TopologyConfig = nil // No topology specified + slice.Spec.Clusters = []string{"topo-worker-1", "topo-worker-2"} + + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying it defaults to full-mesh (2 gateway objects for 1 bidirectional pair with 2 clusters)") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Gateway count for nil topology:", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 2 + }, timeout, interval).Should(BeTrue()) + }) + It("Should exclude forbidden edges from restricted topology", func() { By("Creating SliceConfig with restricted topology and forbidden edges") slice.Spec.Clusters = []string{"topo-worker-1", "topo-worker-2", "topo-worker-3"} From 3a3411c82fbb8811a7cb1dbe33a3492c1cf1464f Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Thu, 13 Nov 2025 21:18:50 +0000 Subject: [PATCH 30/34] fix: controller tests Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- controllers/controller/sliceconfig_controller_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controllers/controller/sliceconfig_controller_test.go b/controllers/controller/sliceconfig_controller_test.go index 7157b6204..91565c70d 100644 --- a/controllers/controller/sliceconfig_controller_test.go +++ b/controllers/controller/sliceconfig_controller_test.go @@ -741,7 +741,7 @@ var _ = Describe("Slice Config controller - Topology Tests", Ordered, func() { Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) - By("Verifying 4 gateway objects are created (2 pairs remaining after forbidding 1→3, 4 gateway objects)") + By("Verifying 4 gateway objects are created (forbidding 1↔3 removes both directions due to bidirectional tunnels, leaving 2 pairs * 2 objects = 4 gateways)") Eventually(func() bool { gatewayList := workerv1alpha1.WorkerSliceGatewayList{} err := k8sClient.List(ctx, &gatewayList, From 669effb1393b92be961c0bb19c453da5185a181f Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Fri, 14 Nov 2025 11:18:26 +0000 Subject: [PATCH 31/34] feat: added all ut till 100% completion for topology Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> From 9bcb6eebffe1b8656a203fd9ea09e85bd64b77a9 Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Mon, 17 Nov 2025 13:42:29 +0000 Subject: [PATCH 32/34] test(iperf): add iperf manifests, slice templates, and run script Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- docs/iperf/README.md | 26 +++++++ docs/iperf/iperf-server.yaml | 53 +++++++++++++ docs/iperf/iperf-sleep.yaml | 31 ++++++++ docs/iperf/slice-templates/custom.yaml | 39 ++++++++++ docs/iperf/slice-templates/fullmesh.yaml | 32 ++++++++ docs/iperf/slice-templates/restricted.yaml | 36 +++++++++ scripts/iperf-run.sh | 87 ++++++++++++++++++++++ 7 files changed, 304 insertions(+) create mode 100644 docs/iperf/README.md create mode 100644 docs/iperf/iperf-server.yaml create mode 100644 docs/iperf/iperf-sleep.yaml create mode 100644 docs/iperf/slice-templates/custom.yaml create mode 100644 docs/iperf/slice-templates/fullmesh.yaml create mode 100644 docs/iperf/slice-templates/restricted.yaml create mode 100755 scripts/iperf-run.sh diff --git a/docs/iperf/README.md b/docs/iperf/README.md new file mode 100644 index 000000000..ba168e5e2 --- /dev/null +++ b/docs/iperf/README.md @@ -0,0 +1,26 @@ +# iPerf inter-cluster test for KubeSlice + +This folder contains manifests and a small helper script to run iPerf tests between two worker clusters in a KubeSlice-enabled environment. + +What is included +- `iperf-sleep.yaml` - client deployment (sleep + netshoot sidecar) +- `iperf-server.yaml` - server deployment and `ServiceExport` for the iperf server +- `slice-templates/` - three SliceConfig templates: `fullmesh`, `restricted`, `custom` +- `../../scripts/iperf-run.sh` - helper script to deploy and run tests (see scripts path) + +Scenarios to test +- Full-mesh (default) — verify baseline connectivity and bandwidth +- Restricted — remove a forbidden edge and verify iperf is blocked +- Custom — only specific source→target connectivity enabled + +Quick notes +- You need two or more registered worker clusters (contexts configured with `kubectx` or `kubectl --context`). +- Create the `iperf` namespace in each participating cluster before applying deployments: + +```bash +kubectl --context create ns iperf +``` + +- The script does not modify controller code — it deploys SliceConfig/sample resources and the iperf workloads and runs `iperf` from the client pod. + +See `../../scripts/iperf-run.sh --help` for usage. diff --git a/docs/iperf/iperf-server.yaml b/docs/iperf/iperf-server.yaml new file mode 100644 index 000000000..13ef9d693 --- /dev/null +++ b/docs/iperf/iperf-server.yaml @@ -0,0 +1,53 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: iperf-server + namespace: iperf + labels: + app: iperf-server +spec: + replicas: 1 + selector: + matchLabels: + app: iperf-server + template: + metadata: + labels: + app: iperf-server + spec: + containers: + - name: iperf + image: mlabbe/iperf + imagePullPolicy: IfNotPresent + args: + - '-s' + - '-p' + - '5201' + ports: + - containerPort: 5201 + name: server + - name: sidecar + image: nicolaka/netshoot + imagePullPolicy: IfNotPresent + command: ["/bin/sleep", "3650d"] + securityContext: + capabilities: + add: ["NET_ADMIN"] + allowPrivilegeEscalation: true + privileged: true +--- +apiVersion: networking.kubeslice.io/v1beta1 +kind: ServiceExport +metadata: + name: iperf-server + namespace: iperf +spec: + slice: + selector: + matchLabels: + app: iperf-server + ingressEnabled: false + ports: + - name: tcp + containerPort: 5201 + protocol: TCP diff --git a/docs/iperf/iperf-sleep.yaml b/docs/iperf/iperf-sleep.yaml new file mode 100644 index 000000000..002a21396 --- /dev/null +++ b/docs/iperf/iperf-sleep.yaml @@ -0,0 +1,31 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: iperf-sleep + namespace: iperf + labels: + app: iperf-sleep +spec: + replicas: 1 + selector: + matchLabels: + app: iperf-sleep + template: + metadata: + labels: + app: iperf-sleep + spec: + containers: + - name: iperf + image: mlabbe/iperf + imagePullPolicy: IfNotPresent + command: ["/bin/sleep", "3650d"] + - name: sidecar + image: nicolaka/netshoot + imagePullPolicy: IfNotPresent + command: ["/bin/sleep", "3650d"] + securityContext: + capabilities: + add: ["NET_ADMIN"] + allowPrivilegeEscalation: true + privileged: true diff --git a/docs/iperf/slice-templates/custom.yaml b/docs/iperf/slice-templates/custom.yaml new file mode 100644 index 000000000..78b953639 --- /dev/null +++ b/docs/iperf/slice-templates/custom.yaml @@ -0,0 +1,39 @@ +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: iperf-custom +spec: + sliceSubnet: 10.34.0.0/16 + sliceType: Application + sliceGatewayProvider: + sliceGatewayType: OpenVPN + sliceCaType: Local + sliceIpamType: Local + clusters: + - + - + - + topologyConfig: + topologyType: TopologyCustom + connectivityMatrix: + - sourceCluster: + targetClusters: + - + - sourceCluster: + targetClusters: + - + qosProfileDetails: + queueType: HTB + priority: 1 + tcType: BANDWIDTH_CONTROL + bandwidthCeilingKbps: 5120 + bandwidthGuaranteedKbps: 2560 + dscpClass: AF11 + namespaceIsolationProfile: + applicationNamespaces: + - namespace: iperf + clusters: + - + - + - + isolationEnabled: false diff --git a/docs/iperf/slice-templates/fullmesh.yaml b/docs/iperf/slice-templates/fullmesh.yaml new file mode 100644 index 000000000..54fff4095 --- /dev/null +++ b/docs/iperf/slice-templates/fullmesh.yaml @@ -0,0 +1,32 @@ +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: iperf-fullmesh +spec: + sliceSubnet: 10.32.0.0/16 + sliceType: Application + sliceGatewayProvider: + sliceGatewayType: OpenVPN + sliceCaType: Local + sliceIpamType: Local + clusters: + - + - + qosProfileDetails: + queueType: HTB + priority: 1 + tcType: BANDWIDTH_CONTROL + bandwidthCeilingKbps: 5120 + bandwidthGuaranteedKbps: 2560 + dscpClass: AF11 + namespaceIsolationProfile: + applicationNamespaces: + - namespace: iperf + clusters: + - + - + isolationEnabled: false + allowedNamespaces: + - namespace: kube-system + clusters: + - diff --git a/docs/iperf/slice-templates/restricted.yaml b/docs/iperf/slice-templates/restricted.yaml new file mode 100644 index 000000000..97e4dfa36 --- /dev/null +++ b/docs/iperf/slice-templates/restricted.yaml @@ -0,0 +1,36 @@ +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: iperf-restricted +spec: + sliceSubnet: 10.33.0.0/16 + sliceType: Application + sliceGatewayProvider: + sliceGatewayType: OpenVPN + sliceCaType: Local + sliceIpamType: Local + clusters: + - + - + - + topologyConfig: + topologyType: TopologyRestricted + forbiddenEdges: + - sourceCluster: + targetClusters: + - + qosProfileDetails: + queueType: HTB + priority: 1 + tcType: BANDWIDTH_CONTROL + bandwidthCeilingKbps: 5120 + bandwidthGuaranteedKbps: 2560 + dscpClass: AF11 + namespaceIsolationProfile: + applicationNamespaces: + - namespace: iperf + clusters: + - + - + - + isolationEnabled: false diff --git a/scripts/iperf-run.sh b/scripts/iperf-run.sh new file mode 100755 index 000000000..b0bac9210 --- /dev/null +++ b/scripts/iperf-run.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash +# Helper script to deploy iperf server/client and run tests across two clusters +# Usage: +# scripts/iperf-run.sh --server-context --sleep-context --project-namespace --slice-file --slice-name + +set -euo pipefail + +usage(){ + cat </$SLICE_NAME/g" docs/iperf/iperf-server.yaml | kubectl --context "$SERVER_CTX" apply -f - -n iperf || true + +# 3) Deploy iperf-sleep in sleep cluster +kubectl --context "$SLEEP_CTX" create ns iperf --dry-run=client -o yaml | kubectl --context "$SLEEP_CTX" apply -f - +kubectl --context "$SLEEP_CTX" apply -f docs/iperf/iperf-sleep.yaml -n iperf + +# 4) Wait for pods to become ready (server) +echo "Waiting for iperf-server pod..." +kubectl --context "$SERVER_CTX" -n iperf wait --for=condition=ready pod -l app=iperf-server --timeout=120s + +echo "Waiting for iperf-sleep pod..." +kubectl --context "$SLEEP_CTX" -n iperf wait --for=condition=ready pod -l app=iperf-sleep --timeout=120s + +# 5) Derive DNS name (short name should be available) +SHORT_DNS="iperf-server.iperf.svc.slice.local" + +echo "Using short DNS: $SHORT_DNS" + +# 6) Exec into sleep pod and run iperf +SLEEP_POD=$(kubectl --context "$SLEEP_CTX" -n iperf get pods -l app=iperf-sleep -o jsonpath='{.items[0].metadata.name}') +echo "Using sleep pod: $SLEEP_POD" + +OUTFILE="iperf-${SLICE_NAME}-$(date +%Y%m%dT%H%M%S).log" +kubectl --context "$SLEEP_CTX" -n iperf exec -c iperf "$SLEEP_POD" -- iperf -c "$SHORT_DNS" -p 5201 -i 1 -t 10 > "$OUTFILE" 2>&1 || true + +echo "iperf output saved to $OUTFILE" +echo +echo "--- output ---" +cat "$OUTFILE" + +echo "Done" From 2aba737333971a9d3a68369cb9380132549d11d0 Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Wed, 19 Nov 2025 14:14:28 +0000 Subject: [PATCH 33/34] feat: Add TopologyConfig to WorkerSliceConfig API - Add TopologyConfig field to WorkerSliceConfigSpec to propagate topology info to workers - Regenerate deepcopy methods and CRDs - This enables the hub to send topology type to worker clusters Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- .../v1alpha1/workersliceconfig_types.go | 2 + apis/worker/v1alpha1/zz_generated.deepcopy.go | 6 +++ ...orker.kubeslice.io_workersliceconfigs.yaml | 39 +++++++++++++++++++ 3 files changed, 47 insertions(+) diff --git a/apis/worker/v1alpha1/workersliceconfig_types.go b/apis/worker/v1alpha1/workersliceconfig_types.go index 10043c692..da8d3e6f9 100644 --- a/apis/worker/v1alpha1/workersliceconfig_types.go +++ b/apis/worker/v1alpha1/workersliceconfig_types.go @@ -53,6 +53,8 @@ type WorkerSliceConfigSpec struct { ExternalGatewayConfig ExternalGatewayConfig `json:"externalGatewayConfig,omitempty"` //+kubebuilder:default:=single-network OverlayNetworkDeploymentMode controllerv1alpha1.NetworkType `json:"overlayNetworkDeploymentMode,omitempty"` + // Topology configuration for flexible topology support + TopologyConfig *controllerv1alpha1.TopologyConfig `json:"topologyConfig,omitempty"` } // WorkerSliceGatewayProvider defines the configuration for slicegateway diff --git a/apis/worker/v1alpha1/zz_generated.deepcopy.go b/apis/worker/v1alpha1/zz_generated.deepcopy.go index 578ce988b..4730b1fa5 100644 --- a/apis/worker/v1alpha1/zz_generated.deepcopy.go +++ b/apis/worker/v1alpha1/zz_generated.deepcopy.go @@ -21,6 +21,7 @@ limitations under the License. package v1alpha1 import ( + controllerv1alpha1 "github.com/kubeslice/kubeslice-controller/apis/controller/v1alpha1" "k8s.io/apimachinery/pkg/runtime" ) @@ -449,6 +450,11 @@ func (in *WorkerSliceConfigSpec) DeepCopyInto(out *WorkerSliceConfigSpec) { **out = **in } out.ExternalGatewayConfig = in.ExternalGatewayConfig + if in.TopologyConfig != nil { + in, out := &in.TopologyConfig, &out.TopologyConfig + *out = new(controllerv1alpha1.TopologyConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerSliceConfigSpec. diff --git a/config/crd/bases/worker.kubeslice.io_workersliceconfigs.yaml b/config/crd/bases/worker.kubeslice.io_workersliceconfigs.yaml index 80599b7bd..95973a4bd 100644 --- a/config/crd/bases/worker.kubeslice.io_workersliceconfigs.yaml +++ b/config/crd/bases/worker.kubeslice.io_workersliceconfigs.yaml @@ -168,6 +168,45 @@ spec: sliceType: default: Application type: string + topologyConfig: + description: Topology configuration for flexible topology support + properties: + connectivityMatrix: + items: + properties: + sourceCluster: + type: string + targetClusters: + items: + type: string + type: array + required: + - sourceCluster + - targetClusters + type: object + type: array + forbiddenEdges: + items: + properties: + sourceCluster: + type: string + targetClusters: + items: + type: string + type: array + required: + - sourceCluster + - targetClusters + type: object + type: array + topologyType: + default: full-mesh + enum: + - restricted + - full-mesh + - custom + type: string + type: object type: object status: description: WorkerSliceConfigStatus defines the observed state of Slice From 2ec29fb1c9bd88f99ac7d18e88aa6832070f71af Mon Sep 17 00:00:00 2001 From: Transcendental-Programmer Date: Sun, 23 Nov 2025 15:16:26 +0000 Subject: [PATCH 34/34] worker slicegateway service updated Signed-off-by: Transcendental-Programmer Signed-off-by: Priyansh Saxena <130545865+Transcendental-Programmer@users.noreply.github.com> --- service/worker_slice_gateway_service.go | 16 +++++++++++++--- service/worker_slice_gateway_service_test.go | 4 ++-- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/service/worker_slice_gateway_service.go b/service/worker_slice_gateway_service.go index b1392a10c..d068cb47c 100644 --- a/service/worker_slice_gateway_service.go +++ b/service/worker_slice_gateway_service.go @@ -351,7 +351,7 @@ func (s *WorkerSliceGatewayService) CreateMinimumWorkerSliceGateways(ctx context clusterNames []string, namespace string, label map[string]string, clusterMap map[string]int, sliceSubnet string, clusterCidr string, sliceGwSvcTypeMap map[string]*controllerv1alpha1.SliceGatewayServiceType, gatewayPairs []util.GatewayPair) (ctrl.Result, error) { - err := s.cleanupObsoleteGateways(ctx, namespace, label, clusterNames, clusterMap) + err := s.cleanupObsoleteGateways(ctx, namespace, label, clusterNames, clusterMap, gatewayPairs) if err != nil { return ctrl.Result{}, err } @@ -379,7 +379,7 @@ func (s *WorkerSliceGatewayService) ListWorkerSliceGateways(ctx context.Context, // cleanupObsoleteGateways is a function delete outdated gateways func (s *WorkerSliceGatewayService) cleanupObsoleteGateways(ctx context.Context, namespace string, ownerLabel map[string]string, - clusters []string, clusterMap map[string]int) error { + clusters []string, clusterMap map[string]int, gatewayPairs []util.GatewayPair) error { gateways, err := s.ListWorkerSliceGateways(ctx, ownerLabel, namespace) if err != nil { @@ -402,11 +402,21 @@ func (s *WorkerSliceGatewayService) cleanupObsoleteGateways(ctx context.Context, clusterExistMap[cluster] = true } + // Create a map of valid gateway pairs + validPairs := make(map[string]bool) + for _, pair := range gatewayPairs { + validPairs[pair.Source+"-"+pair.Target] = true + } + for _, gateway := range gateways { clusterSource := gateway.Spec.LocalGatewayConfig.ClusterName clusterDestination := gateway.Spec.RemoteGatewayConfig.ClusterName gatewayExpectedNumber := s.calculateGatewayNumber(clusterMap[clusterSource], clusterMap[clusterDestination]) - if !clusterExistMap[clusterSource] || !clusterExistMap[clusterDestination] || gatewayExpectedNumber != gateway.Spec.GatewayNumber { + + // Check if the pair is valid in the current topology + isValidPair := validPairs[clusterSource+"-"+clusterDestination] + + if !clusterExistMap[clusterSource] || !clusterExistMap[clusterDestination] || gatewayExpectedNumber != gateway.Spec.GatewayNumber || !isValidPair { err = util.DeleteResource(ctx, &gateway) if err != nil { //Register an event for worker slice gateway deletion failure diff --git a/service/worker_slice_gateway_service_test.go b/service/worker_slice_gateway_service_test.go index af024b103..f42907b1d 100644 --- a/service/worker_slice_gateway_service_test.go +++ b/service/worker_slice_gateway_service_test.go @@ -320,7 +320,7 @@ func testCreateMinimumWorkerSliceGatewaysAlreadyExists(t *testing.T) { //environment := make(map[string]string, 5) //jobMock.On("CreateJob", ctx, requestObj.Namespace, "image", environment).Return(ctrl.Result{}, nil).Once() - result, err := workerSliceGatewayService.CreateMinimumWorkerSliceGateways(ctx, "red", clusterNames, requestObj.Namespace, label, clusterMap, "10.10.10.10/16", "/16", nil, nil) + result, err := workerSliceGatewayService.CreateMinimumWorkerSliceGateways(ctx, "red", clusterNames, requestObj.Namespace, label, clusterMap, "10.10.10.10/16", "/16", nil, []util.GatewayPair{{Source: "cluster-1", Target: "cluster-2"}}) expectedResult := ctrl.Result{} require.NoError(t, nil) require.Equal(t, result, expectedResult) @@ -407,7 +407,7 @@ func testCreateMinimumWorkerSliceGatewaysNotExists(t *testing.T) { clientMock.On("Update", ctx, mock.AnythingOfType("*v1.Event")).Return(nil).Once() clientMock.On("Get", ctx, mock.Anything, mock.Anything).Return(nil).Once() mMock.On("RecordCounterMetric", mock.Anything, mock.Anything).Return().Once() - result, err := workerSliceGatewayService.CreateMinimumWorkerSliceGateways(ctx, "red", clusterNames, requestObj.Namespace, label, clusterMap, "10.10.10.10/16", "/16", nil, nil) + result, err := workerSliceGatewayService.CreateMinimumWorkerSliceGateways(ctx, "red", clusterNames, requestObj.Namespace, label, clusterMap, "10.10.10.10/16", "/16", nil, []util.GatewayPair{{Source: "cluster-1", Target: "cluster-2"}}) expectedResult := ctrl.Result{} require.NoError(t, nil) require.Equal(t, result, expectedResult)