diff --git a/.gitignore b/.gitignore index d0b26d3eb..ff017f161 100644 --- a/.gitignore +++ b/.gitignore @@ -32,4 +32,5 @@ work cover.out coverage.out -.vscode \ No newline at end of file +.vscode + diff --git a/apis/controller/v1alpha1/sliceconfig_types.go b/apis/controller/v1alpha1/sliceconfig_types.go index 38b7bbded..a88a2ad4a 100644 --- a/apis/controller/v1alpha1/sliceconfig_types.go +++ b/apis/controller/v1alpha1/sliceconfig_types.go @@ -74,6 +74,8 @@ type SliceConfigSpec struct { // RenewBefore is used for renew now! RenewBefore *metav1.Time `json:"renewBefore,omitempty"` VPNConfig *VPNConfiguration `json:"vpnConfig,omitempty"` + // TopologyConfig defines cluster connectivity patterns + TopologyConfig *TopologyConfig `json:"topologyConfig,omitempty"` } // ExternalGatewayConfig is the configuration for external gateways like 'istio', etc/ @@ -174,6 +176,37 @@ type VPNConfiguration struct { Cipher string `json:"cipher"` } +// +kubebuilder:validation:Enum:=restricted;full-mesh;custom +type TopologyType string + +const ( + TopologyRestricted TopologyType = "restricted" + TopologyFullMesh TopologyType = "full-mesh" + TopologyCustom TopologyType = "custom" +) + +type TopologyConfig struct { + //+kubebuilder:default:=full-mesh + TopologyType TopologyType `json:"topologyType,omitempty"` + ConnectivityMatrix []ConnectivityEntry `json:"connectivityMatrix,omitempty"` + ForbiddenEdges []ForbiddenEdge `json:"forbiddenEdges,omitempty"` +} + +type ConnectivityEntry struct { + //+kubebuilder:validation:Required + SourceCluster string `json:"sourceCluster"` + //+kubebuilder:validation:Required + TargetClusters []string `json:"targetClusters"` +} + +type ForbiddenEdge struct { + //+kubebuilder:validation:Required + SourceCluster string `json:"sourceCluster"` + //+kubebuilder:validation:Required + TargetClusters []string `json:"targetClusters"` +} + + type KubesliceEvent struct { // Type of the event. Can be one of Error, Success or InProgress Type string `json:"type,omitempty"` diff --git a/apis/controller/v1alpha1/zz_generated.deepcopy.go b/apis/controller/v1alpha1/zz_generated.deepcopy.go index e7d11c185..e1749a662 100644 --- a/apis/controller/v1alpha1/zz_generated.deepcopy.go +++ b/apis/controller/v1alpha1/zz_generated.deepcopy.go @@ -203,6 +203,26 @@ func (in *ComponentStatus) DeepCopy() *ComponentStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectivityEntry) DeepCopyInto(out *ConnectivityEntry) { + *out = *in + if in.TargetClusters != nil { + in, out := &in.TargetClusters, &out.TargetClusters + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectivityEntry. +func (in *ConnectivityEntry) DeepCopy() *ConnectivityEntry { + if in == nil { + return nil + } + out := new(ConnectivityEntry) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExternalGatewayConfig) DeepCopyInto(out *ExternalGatewayConfig) { *out = *in @@ -242,6 +262,26 @@ func (in *ExternalGatewayConfigOptions) DeepCopy() *ExternalGatewayConfigOptions return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForbiddenEdge) DeepCopyInto(out *ForbiddenEdge) { + *out = *in + if in.TargetClusters != nil { + in, out := &in.TargetClusters, &out.TargetClusters + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForbiddenEdge. +func (in *ForbiddenEdge) DeepCopy() *ForbiddenEdge { + if in == nil { + return nil + } + out := new(ForbiddenEdge) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GPURestriction) DeepCopyInto(out *GPURestriction) { *out = *in @@ -744,6 +784,11 @@ func (in *SliceConfigSpec) DeepCopyInto(out *SliceConfigSpec) { *out = new(VPNConfiguration) **out = **in } + if in.TopologyConfig != nil { + in, out := &in.TopologyConfig, &out.TopologyConfig + *out = new(TopologyConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SliceConfigSpec. @@ -933,6 +978,35 @@ func (in *Telemetry) DeepCopy() *Telemetry { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologyConfig) DeepCopyInto(out *TopologyConfig) { + *out = *in + if in.ConnectivityMatrix != nil { + in, out := &in.ConnectivityMatrix, &out.ConnectivityMatrix + *out = make([]ConnectivityEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ForbiddenEdges != nil { + in, out := &in.ForbiddenEdges, &out.ForbiddenEdges + *out = make([]ForbiddenEdge, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologyConfig. +func (in *TopologyConfig) DeepCopy() *TopologyConfig { + if in == nil { + return nil + } + out := new(TopologyConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VCPURestriction) DeepCopyInto(out *VCPURestriction) { *out = *in diff --git a/apis/worker/v1alpha1/workersliceconfig_types.go b/apis/worker/v1alpha1/workersliceconfig_types.go index 10043c692..da8d3e6f9 100644 --- a/apis/worker/v1alpha1/workersliceconfig_types.go +++ b/apis/worker/v1alpha1/workersliceconfig_types.go @@ -53,6 +53,8 @@ type WorkerSliceConfigSpec struct { ExternalGatewayConfig ExternalGatewayConfig `json:"externalGatewayConfig,omitempty"` //+kubebuilder:default:=single-network OverlayNetworkDeploymentMode controllerv1alpha1.NetworkType `json:"overlayNetworkDeploymentMode,omitempty"` + // Topology configuration for flexible topology support + TopologyConfig *controllerv1alpha1.TopologyConfig `json:"topologyConfig,omitempty"` } // WorkerSliceGatewayProvider defines the configuration for slicegateway diff --git a/apis/worker/v1alpha1/zz_generated.deepcopy.go b/apis/worker/v1alpha1/zz_generated.deepcopy.go index 578ce988b..4730b1fa5 100644 --- a/apis/worker/v1alpha1/zz_generated.deepcopy.go +++ b/apis/worker/v1alpha1/zz_generated.deepcopy.go @@ -21,6 +21,7 @@ limitations under the License. package v1alpha1 import ( + controllerv1alpha1 "github.com/kubeslice/kubeslice-controller/apis/controller/v1alpha1" "k8s.io/apimachinery/pkg/runtime" ) @@ -449,6 +450,11 @@ func (in *WorkerSliceConfigSpec) DeepCopyInto(out *WorkerSliceConfigSpec) { **out = **in } out.ExternalGatewayConfig = in.ExternalGatewayConfig + if in.TopologyConfig != nil { + in, out := &in.TopologyConfig, &out.TopologyConfig + *out = new(controllerv1alpha1.TopologyConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerSliceConfigSpec. diff --git a/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml b/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml index 70063d3b8..388914f0f 100644 --- a/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml +++ b/config/crd/bases/controller.kubeslice.io_sliceconfigs.yaml @@ -227,6 +227,46 @@ spec: type: string standardQosProfileName: type: string + topologyConfig: + description: TopologyConfig defines cluster connectivity patterns + properties: + connectivityMatrix: + items: + properties: + sourceCluster: + type: string + targetClusters: + items: + type: string + type: array + required: + - sourceCluster + - targetClusters + type: object + type: array + forbiddenEdges: + items: + properties: + sourceCluster: + type: string + targetClusters: + items: + type: string + type: array + required: + - sourceCluster + - targetClusters + type: object + type: array + topologyType: + default: full-mesh + enum: + - restricted + - full-mesh + - custom + type: string + type: object + vpnConfig: description: VPNConfiguration defines the additional (optional) VPN Configuration to customise diff --git a/config/crd/bases/worker.kubeslice.io_workersliceconfigs.yaml b/config/crd/bases/worker.kubeslice.io_workersliceconfigs.yaml index 80599b7bd..95973a4bd 100644 --- a/config/crd/bases/worker.kubeslice.io_workersliceconfigs.yaml +++ b/config/crd/bases/worker.kubeslice.io_workersliceconfigs.yaml @@ -168,6 +168,45 @@ spec: sliceType: default: Application type: string + topologyConfig: + description: Topology configuration for flexible topology support + properties: + connectivityMatrix: + items: + properties: + sourceCluster: + type: string + targetClusters: + items: + type: string + type: array + required: + - sourceCluster + - targetClusters + type: object + type: array + forbiddenEdges: + items: + properties: + sourceCluster: + type: string + targetClusters: + items: + type: string + type: array + required: + - sourceCluster + - targetClusters + type: object + type: array + topologyType: + default: full-mesh + enum: + - restricted + - full-mesh + - custom + type: string + type: object type: object status: description: WorkerSliceConfigStatus defines the observed state of Slice diff --git a/config/samples/topology-custom-matrix.yaml b/config/samples/topology-custom-matrix.yaml new file mode 100644 index 000000000..449174f3a --- /dev/null +++ b/config/samples/topology-custom-matrix.yaml @@ -0,0 +1,17 @@ +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: demo-custom-matrix + namespace: kubeslice-avesha +spec: + sliceSubnet: "10.3.0.0/16" + clusters: ["dmz", "gateway", "internal", "database"] + topologyConfig: + topologyType: custom + connectivityMatrix: + - sourceCluster: dmz + targetClusters: ["gateway"] + - sourceCluster: gateway + targetClusters: ["internal", "dmz"] + - sourceCluster: internal + targetClusters: ["database", "gateway"] \ No newline at end of file diff --git a/config/samples/topology-full-mesh.yaml b/config/samples/topology-full-mesh.yaml new file mode 100644 index 000000000..5f5f25986 --- /dev/null +++ b/config/samples/topology-full-mesh.yaml @@ -0,0 +1,18 @@ +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: demo-full-mesh + namespace: kubeslice-avesha +spec: + sliceSubnet: "10.1.0.0/16" + clusters: ["cluster-1", "cluster-2", "cluster-3", "cluster-4"] + topologyConfig: + topologyType: full-mesh + # Full-mesh creates all possible connections between clusters + # For 4 clusters: 6 gateway pairs (n*(n-1)/2) + # cluster-1 <-> cluster-2 + # cluster-1 <-> cluster-3 + # cluster-1 <-> cluster-4 + # cluster-2 <-> cluster-3 + # cluster-2 <-> cluster-4 + # cluster-3 <-> cluster-4 diff --git a/config/samples/topology-restricted-secure.yaml b/config/samples/topology-restricted-secure.yaml new file mode 100644 index 000000000..77c0d44fc --- /dev/null +++ b/config/samples/topology-restricted-secure.yaml @@ -0,0 +1,13 @@ +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: demo-restricted-secure + namespace: kubeslice-avesha +spec: + sliceSubnet: "10.2.0.0/16" + clusters: ["dmz", "gateway", "internal", "analytics"] + topologyConfig: + topologyType: restricted + forbiddenEdges: + - sourceCluster: gateway + targetClusters: ["dmz", "analytics"] \ No newline at end of file diff --git a/controllers/controller/sliceconfig_controller_test.go b/controllers/controller/sliceconfig_controller_test.go index a8bf60773..91565c70d 100644 --- a/controllers/controller/sliceconfig_controller_test.go +++ b/controllers/controller/sliceconfig_controller_test.go @@ -87,6 +87,8 @@ var _ = Describe("Slice Config controller Tests", Ordered, func() { }, timeout, interval).Should(BeTrue()) Cluster1.Status.CniSubnet = []string{"192.168.0.0/24"} Cluster1.Status.RegistrationStatus = v1alpha1.RegistrationStatusRegistered + Cluster1.Status.NetworkPresent = true + Cluster1.Status.NetworkPresent = true Eventually(func() bool { err := k8sClient.Status().Update(ctx, Cluster1) @@ -117,6 +119,7 @@ var _ = Describe("Slice Config controller Tests", Ordered, func() { }, timeout, interval).Should(BeTrue()) Cluster2.Status.CniSubnet = []string{"192.168.1.0/24"} Cluster2.Status.RegistrationStatus = v1alpha1.RegistrationStatusRegistered + Cluster2.Status.NetworkPresent = true Eventually(func() bool { err := k8sClient.Status().Update(ctx, Cluster2) @@ -427,3 +430,789 @@ var _ = Describe("Slice Config controller Tests", Ordered, func() { }) }) }) + +var _ = Describe("Slice Config controller - Topology Tests", Ordered, func() { + var slice *v1alpha1.SliceConfig + var topologyCluster1 *v1alpha1.Cluster + var topologyCluster2 *v1alpha1.Cluster + var topologyCluster3 *v1alpha1.Cluster + const topologySliceName = "test-topology-slice" + const topoProjectName = "topology-project" + const topoSliceNamespace = "kubeslice-topology-project" + + BeforeAll(func() { + ctx := context.Background() + + // Create project for topology tests + topoProject := &v1alpha1.Project{ + ObjectMeta: metav1.ObjectMeta{ + Name: topoProjectName, + Namespace: controlPlaneNamespace, + }, + } + + Eventually(func() bool { + err := k8sClient.Create(ctx, topoProject) + return err == nil + }, timeout, interval).Should(BeTrue()) + + // Check namespace is created + ns := v1.Namespace{} + Eventually(func() bool { + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: topoSliceNamespace, + }, &ns) + return err == nil + }, timeout, interval).Should(BeTrue()) + + // Create topology test clusters + topologyCluster1 = &v1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "topo-worker-1", + Namespace: topoSliceNamespace, + }, + Spec: v1alpha1.ClusterSpec{ + NodeIPs: []string{"11.11.11.20"}, + }, + } + + topologyCluster2 = &v1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "topo-worker-2", + Namespace: topoSliceNamespace, + }, + Spec: v1alpha1.ClusterSpec{ + NodeIPs: []string{"11.11.11.21"}, + }, + } + + topologyCluster3 = &v1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "topo-worker-3", + Namespace: topoSliceNamespace, + }, + Spec: v1alpha1.ClusterSpec{ + NodeIPs: []string{"11.11.11.22"}, + }, + } + + // Create and register first cluster + Eventually(func() bool { + err := k8sClient.Create(ctx, topologyCluster1) + return err == nil + }, timeout, interval).Should(BeTrue()) + + getKey := types.NamespacedName{ + Namespace: topologyCluster1.Namespace, + Name: topologyCluster1.Name, + } + Eventually(func() bool { + err := k8sClient.Get(ctx, getKey, topologyCluster1) + return err == nil + }, timeout, interval).Should(BeTrue()) + + topologyCluster1.Status.CniSubnet = []string{"192.168.2.0/24"} + topologyCluster1.Status.RegistrationStatus = v1alpha1.RegistrationStatusRegistered + topologyCluster1.Status.ClusterHealth = &v1alpha1.ClusterHealth{ClusterHealthStatus: v1alpha1.ClusterHealthStatusNormal} + topologyCluster1.Status.NetworkPresent = true + + Eventually(func() bool { + err := k8sClient.Status().Update(ctx, topologyCluster1) + return err == nil + }, timeout, interval).Should(BeTrue()) + + // Create and register second cluster + Eventually(func() bool { + err := k8sClient.Create(ctx, topologyCluster2) + return err == nil + }, timeout, interval).Should(BeTrue()) + + getKey = types.NamespacedName{ + Namespace: topologyCluster2.Namespace, + Name: topologyCluster2.Name, + } + Eventually(func() bool { + err := k8sClient.Get(ctx, getKey, topologyCluster2) + return err == nil + }, timeout, interval).Should(BeTrue()) + + topologyCluster2.Status.CniSubnet = []string{"192.168.3.0/24"} + topologyCluster2.Status.RegistrationStatus = v1alpha1.RegistrationStatusRegistered + topologyCluster2.Status.ClusterHealth = &v1alpha1.ClusterHealth{ClusterHealthStatus: v1alpha1.ClusterHealthStatusNormal} + topologyCluster2.Status.NetworkPresent = true + + Eventually(func() bool { + err := k8sClient.Status().Update(ctx, topologyCluster2) + return err == nil + }, timeout, interval).Should(BeTrue()) + + // Create and register third cluster + Eventually(func() bool { + err := k8sClient.Create(ctx, topologyCluster3) + return err == nil + }, timeout, interval).Should(BeTrue()) + + getKey = types.NamespacedName{ + Namespace: topologyCluster3.Namespace, + Name: topologyCluster3.Name, + } + Eventually(func() bool { + err := k8sClient.Get(ctx, getKey, topologyCluster3) + return err == nil + }, timeout, interval).Should(BeTrue()) + + topologyCluster3.Status.CniSubnet = []string{"192.168.4.0/24"} + topologyCluster3.Status.RegistrationStatus = v1alpha1.RegistrationStatusRegistered + topologyCluster3.Status.ClusterHealth = &v1alpha1.ClusterHealth{ClusterHealthStatus: v1alpha1.ClusterHealthStatusNormal} + topologyCluster3.Status.NetworkPresent = true + + Eventually(func() bool { + err := k8sClient.Status().Update(ctx, topologyCluster3) + return err == nil + }, timeout, interval).Should(BeTrue()) + }) + + BeforeEach(func() { + slice = &v1alpha1.SliceConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: topologySliceName, + Namespace: topoSliceNamespace, + }, + Spec: v1alpha1.SliceConfigSpec{ + Clusters: []string{"topo-worker-1", "topo-worker-2"}, + MaxClusters: 10, + SliceSubnet: "10.2.0.0/16", + SliceGatewayProvider: &v1alpha1.WorkerSliceGatewayProvider{ + SliceGatewayType: "OpenVPN", + SliceCaType: "Local", + }, + SliceIpamType: "Local", + SliceType: "Application", + QosProfileDetails: &v1alpha1.QOSProfile{ + BandwidthCeilingKbps: 5120, + DscpClass: "AF11", + }, + }, + } + }) + + AfterEach(func() { + ls := map[string]string{ + "original-slice-name": topologySliceName, + } + listOpts := []client.ListOption{ + client.MatchingLabels(ls), + } + + getKey := types.NamespacedName{ + Name: topologySliceName, + Namespace: topoSliceNamespace, + } + + existingSlice := &v1alpha1.SliceConfig{} + err := k8sClient.Get(ctx, getKey, existingSlice) + if err != nil { + Expect(errors.IsNotFound(err)).To(BeTrue()) + return + } + + Expect(k8sClient.Delete(ctx, existingSlice)).Should(Succeed()) + + Eventually(func() bool { + workerSliceConfigList := workerv1alpha1.WorkerSliceConfigList{} + err := k8sClient.List(ctx, &workerSliceConfigList, listOpts...) + if err != nil { + return false + } + if len(workerSliceConfigList.Items) == 0 { + return true + } + for i := range workerSliceConfigList.Items { + if delErr := k8sClient.Delete(ctx, &workerSliceConfigList.Items[i]); delErr != nil && !errors.IsNotFound(delErr) { + GinkgoWriter.Printf("failed deleting WorkerSliceConfig %s/%s: %v\n", workerSliceConfigList.Items[i].Namespace, workerSliceConfigList.Items[i].Name, delErr) + } + } + return false + }, timeout, interval).Should(BeTrue()) + + Eventually(func() bool { + workerSliceGatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &workerSliceGatewayList, listOpts...) + if err != nil { + return false + } + if len(workerSliceGatewayList.Items) == 0 { + return true + } + for i := range workerSliceGatewayList.Items { + if delErr := k8sClient.Delete(ctx, &workerSliceGatewayList.Items[i]); delErr != nil && !errors.IsNotFound(delErr) { + GinkgoWriter.Printf("failed deleting WorkerSliceGateway %s/%s: %v\n", workerSliceGatewayList.Items[i].Namespace, workerSliceGatewayList.Items[i].Name, delErr) + } + } + return false + }, timeout, interval).Should(BeTrue()) + + Eventually(func() bool { + fresh := &v1alpha1.SliceConfig{} + err := k8sClient.Get(ctx, getKey, fresh) + return errors.IsNotFound(err) + }, timeout, interval).Should(BeTrue()) + }) + + It("Should create 1 gateway pair for full-mesh topology with 2 clusters", func() { + By("Creating a SliceConfig with full-mesh topology and 2 clusters") + slice.Spec.TopologyConfig = &v1alpha1.TopologyConfig{ + TopologyType: v1alpha1.TopologyFullMesh, + } + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying 2 gateway objects are created for 1 bidirectional pair (n*(n-1)/2 = 1 pair for n=2, but 2 gateway objects)") + sliceKey := types.NamespacedName{ + Name: topologySliceName, + Namespace: topoSliceNamespace, + } + Eventually(func() bool { + createdSlice := &v1alpha1.SliceConfig{} + err := k8sClient.Get(ctx, sliceKey, createdSlice) + if err != nil { + GinkgoWriter.Println("Error getting slice:", err) + return false + } + + // Check that gateway pairs were created (2 gateways for 1 bidirectional pair) + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err = k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Gateway count:", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 2 + }, timeout, interval).Should(BeTrue()) + }) + + It("Should create 3 gateway pairs for full-mesh topology with 3 clusters", func() { + By("Creating SliceConfig with full-mesh topology and 3 clusters") + slice.Spec.Clusters = []string{"topo-worker-1", "topo-worker-2", "topo-worker-3"} + slice.Spec.TopologyConfig = &v1alpha1.TopologyConfig{ + TopologyType: v1alpha1.TopologyFullMesh, + } + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying 6 gateway objects are created for 3 pairs (n*(n-1)/2 = 3 pairs for n=3, but 6 gateway objects)") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Gateway count for 3 clusters:", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 6 + }, timeout, interval).Should(BeTrue()) + }) + + It("Should default to full-mesh when topology config is nil", func() { + By("Creating SliceConfig without topology config") + slice.Spec.TopologyConfig = nil // No topology specified + slice.Spec.Clusters = []string{"topo-worker-1", "topo-worker-2"} + + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying it defaults to full-mesh (2 gateway objects for 1 bidirectional pair with 2 clusters)") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Gateway count for nil topology:", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 2 + }, timeout, interval).Should(BeTrue()) + }) + + It("Should exclude forbidden edges from restricted topology", func() { + By("Creating SliceConfig with restricted topology and forbidden edges") + slice.Spec.Clusters = []string{"topo-worker-1", "topo-worker-2", "topo-worker-3"} + slice.Spec.TopologyConfig = &v1alpha1.TopologyConfig{ + TopologyType: v1alpha1.TopologyRestricted, + ForbiddenEdges: []v1alpha1.ForbiddenEdge{ + { + SourceCluster: "topo-worker-1", + TargetClusters: []string{"topo-worker-3"}, // block 1->3 + }, + }, + } + + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying 4 gateway objects are created (forbidding 1↔3 removes both directions due to bidirectional tunnels, leaving 2 pairs * 2 objects = 4 gateways)") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Gateway count (restricted):", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 4 + }, timeout, interval).Should(BeTrue()) + }) + + It("Should create gateway pairs from custom connectivity matrix", func() { + By("Creating SliceConfig with custom topology matrix") + slice.Spec.Clusters = []string{"topo-worker-1", "topo-worker-2", "topo-worker-3"} + slice.Spec.TopologyConfig = &v1alpha1.TopologyConfig{ + TopologyType: v1alpha1.TopologyCustom, + ConnectivityMatrix: []v1alpha1.ConnectivityEntry{ + { + SourceCluster: "topo-worker-1", + TargetClusters: []string{"topo-worker-2"}, // only 1->2 + }, + }, + } + + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying 2 gateway objects are created from custom matrix (1 pair specified becomes bidirectional → 2 objects)") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Gateway count (custom matrix):", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 2 + }, timeout, interval).Should(BeTrue()) + }) + + It("Should update gateways when topology config changes", func() { + By("Creating SliceConfig with 2 clusters initially (default full-mesh)") + slice.Spec.TopologyConfig = nil + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying 2 gateway objects for 2 clusters (full-mesh defaults to 1 pair → 2 objects)") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Initial gateway count:", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 2 + }, timeout, interval).Should(BeTrue()) + + By("Updating to 3 clusters with full-mesh") + sliceKey := types.NamespacedName{ + Name: topologySliceName, + Namespace: topoSliceNamespace, + } + updatedSlice := &v1alpha1.SliceConfig{} + Eventually(func() bool { + err := k8sClient.Get(ctx, sliceKey, updatedSlice) + if err != nil { + return false + } + updatedSlice.Spec.Clusters = []string{"topo-worker-1", "topo-worker-2", "topo-worker-3"} + updatedSlice.Spec.TopologyConfig = &v1alpha1.TopologyConfig{ + TopologyType: v1alpha1.TopologyFullMesh, + } + err = k8sClient.Update(ctx, updatedSlice) + return err == nil + }, timeout, interval).Should(BeTrue()) + + By("Verifying 6 gateway objects are now created for 3 bidirectional pairs") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Updated gateway count:", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 6 + }, timeout, interval).Should(BeTrue()) + }) +}) + +var _ = Describe("Slice Config controller - Topology Tests", Ordered, func() { + var slice *v1alpha1.SliceConfig + var topologyCluster1 *v1alpha1.Cluster + var topologyCluster2 *v1alpha1.Cluster + var topologyCluster3 *v1alpha1.Cluster + const topologySliceName = "test-topology-slice" + const topoProjectName = "topology-project" + const topoSliceNamespace = "kubeslice-topology-project" + + BeforeAll(func() { + ctx := context.Background() + + // Create project for topology tests + topoProject := &v1alpha1.Project{ + ObjectMeta: metav1.ObjectMeta{ + Name: topoProjectName, + Namespace: controlPlaneNamespace, + }, + } + + Eventually(func() bool { + err := k8sClient.Create(ctx, topoProject) + return err == nil + }, timeout, interval).Should(BeTrue()) + + // Check namespace is created + ns := v1.Namespace{} + Eventually(func() bool { + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: topoSliceNamespace, + }, &ns) + return err == nil + }, timeout, interval).Should(BeTrue()) + + // Create topology test clusters + topologyCluster1 = &v1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "topo-worker-1", + Namespace: topoSliceNamespace, + }, + Spec: v1alpha1.ClusterSpec{ + NodeIPs: []string{"11.11.11.20"}, + }, + } + + topologyCluster2 = &v1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "topo-worker-2", + Namespace: topoSliceNamespace, + }, + Spec: v1alpha1.ClusterSpec{ + NodeIPs: []string{"11.11.11.21"}, + }, + } + + topologyCluster3 = &v1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "topo-worker-3", + Namespace: topoSliceNamespace, + }, + Spec: v1alpha1.ClusterSpec{ + NodeIPs: []string{"11.11.11.22"}, + }, + } + + // Create and register first cluster + Eventually(func() bool { + err := k8sClient.Create(ctx, topologyCluster1) + return err == nil + }, timeout, interval).Should(BeTrue()) + + getKey := types.NamespacedName{ + Namespace: topologyCluster1.Namespace, + Name: topologyCluster1.Name, + } + Eventually(func() bool { + err := k8sClient.Get(ctx, getKey, topologyCluster1) + return err == nil + }, timeout, interval).Should(BeTrue()) + + topologyCluster1.Status.CniSubnet = []string{"192.168.2.0/24"} + topologyCluster1.Status.RegistrationStatus = v1alpha1.RegistrationStatusRegistered + topologyCluster1.Status.ClusterHealth = &v1alpha1.ClusterHealth{ClusterHealthStatus: v1alpha1.ClusterHealthStatusNormal} + topologyCluster1.Status.NetworkPresent = true + + Eventually(func() bool { + err := k8sClient.Status().Update(ctx, topologyCluster1) + return err == nil + }, timeout, interval).Should(BeTrue()) + + // Create and register second cluster + Eventually(func() bool { + err := k8sClient.Create(ctx, topologyCluster2) + return err == nil + }, timeout, interval).Should(BeTrue()) + + getKey = types.NamespacedName{ + Namespace: topologyCluster2.Namespace, + Name: topologyCluster2.Name, + } + Eventually(func() bool { + err := k8sClient.Get(ctx, getKey, topologyCluster2) + return err == nil + }, timeout, interval).Should(BeTrue()) + + topologyCluster2.Status.CniSubnet = []string{"192.168.3.0/24"} + topologyCluster2.Status.RegistrationStatus = v1alpha1.RegistrationStatusRegistered + topologyCluster2.Status.ClusterHealth = &v1alpha1.ClusterHealth{ClusterHealthStatus: v1alpha1.ClusterHealthStatusNormal} + topologyCluster2.Status.NetworkPresent = true + + Eventually(func() bool { + err := k8sClient.Status().Update(ctx, topologyCluster2) + return err == nil + }, timeout, interval).Should(BeTrue()) + + // Create and register third cluster + Eventually(func() bool { + err := k8sClient.Create(ctx, topologyCluster3) + return err == nil + }, timeout, interval).Should(BeTrue()) + + getKey = types.NamespacedName{ + Namespace: topologyCluster3.Namespace, + Name: topologyCluster3.Name, + } + Eventually(func() bool { + err := k8sClient.Get(ctx, getKey, topologyCluster3) + return err == nil + }, timeout, interval).Should(BeTrue()) + + topologyCluster3.Status.CniSubnet = []string{"192.168.4.0/24"} + topologyCluster3.Status.RegistrationStatus = v1alpha1.RegistrationStatusRegistered + topologyCluster3.Status.ClusterHealth = &v1alpha1.ClusterHealth{ClusterHealthStatus: v1alpha1.ClusterHealthStatusNormal} + topologyCluster3.Status.NetworkPresent = true + + Eventually(func() bool { + err := k8sClient.Status().Update(ctx, topologyCluster3) + return err == nil + }, timeout, interval).Should(BeTrue()) + }) + + BeforeEach(func() { + slice = &v1alpha1.SliceConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: topologySliceName, + Namespace: topoSliceNamespace, + }, + Spec: v1alpha1.SliceConfigSpec{ + Clusters: []string{"topo-worker-1", "topo-worker-2"}, + MaxClusters: 10, + SliceSubnet: "10.2.0.0/16", + SliceGatewayProvider: &v1alpha1.WorkerSliceGatewayProvider{ + SliceGatewayType: "OpenVPN", + SliceCaType: "Local", + }, + SliceIpamType: "Local", + SliceType: "Application", + QosProfileDetails: &v1alpha1.QOSProfile{ + BandwidthCeilingKbps: 5120, + DscpClass: "AF11", + }, + }, + } + }) + + AfterEach(func() { + ls := map[string]string{ + "original-slice-name": topologySliceName, + } + listOpts := []client.ListOption{ + client.MatchingLabels(ls), + } + + getKey := types.NamespacedName{ + Name: topologySliceName, + Namespace: topoSliceNamespace, + } + + existingSlice := &v1alpha1.SliceConfig{} + err := k8sClient.Get(ctx, getKey, existingSlice) + if err != nil { + Expect(errors.IsNotFound(err)).To(BeTrue()) + return + } + + Expect(k8sClient.Delete(ctx, existingSlice)).Should(Succeed()) + + Eventually(func() bool { + workerSliceConfigList := workerv1alpha1.WorkerSliceConfigList{} + err := k8sClient.List(ctx, &workerSliceConfigList, listOpts...) + if err != nil { + return false + } + if len(workerSliceConfigList.Items) == 0 { + return true + } + for i := range workerSliceConfigList.Items { + if delErr := k8sClient.Delete(ctx, &workerSliceConfigList.Items[i]); delErr != nil && !errors.IsNotFound(delErr) { + GinkgoWriter.Printf("failed deleting WorkerSliceConfig %s/%s: %v\n", workerSliceConfigList.Items[i].Namespace, workerSliceConfigList.Items[i].Name, delErr) + } + } + return false + }, timeout, interval).Should(BeTrue()) + + Eventually(func() bool { + workerSliceGatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &workerSliceGatewayList, listOpts...) + if err != nil { + return false + } + if len(workerSliceGatewayList.Items) == 0 { + return true + } + for i := range workerSliceGatewayList.Items { + if delErr := k8sClient.Delete(ctx, &workerSliceGatewayList.Items[i]); delErr != nil && !errors.IsNotFound(delErr) { + GinkgoWriter.Printf("failed deleting WorkerSliceGateway %s/%s: %v\n", workerSliceGatewayList.Items[i].Namespace, workerSliceGatewayList.Items[i].Name, delErr) + } + } + return false + }, timeout, interval).Should(BeTrue()) + + Eventually(func() bool { + fresh := &v1alpha1.SliceConfig{} + err := k8sClient.Get(ctx, getKey, fresh) + return errors.IsNotFound(err) + }, timeout, interval).Should(BeTrue()) + }) + + It("Should create 1 gateway pair for full-mesh topology with 2 clusters", func() { + By("Creating a SliceConfig with full-mesh topology and 2 clusters") + slice.Spec.TopologyConfig = &v1alpha1.TopologyConfig{ + TopologyType: v1alpha1.TopologyFullMesh, + } + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying 2 gateway objects are created for 1 bidirectional pair (n*(n-1)/2 = 1 pair for n=2, but 2 gateway objects)") + sliceKey := types.NamespacedName{ + Name: topologySliceName, + Namespace: topoSliceNamespace, + } + Eventually(func() bool { + createdSlice := &v1alpha1.SliceConfig{} + err := k8sClient.Get(ctx, sliceKey, createdSlice) + if err != nil { + GinkgoWriter.Println("Error getting slice:", err) + return false + } + + // Check that gateway pairs were created (2 gateways for 1 bidirectional pair) + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err = k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Gateway count:", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 2 + }, timeout, interval).Should(BeTrue()) + }) + + It("Should create 3 gateway pairs for full-mesh topology with 3 clusters", func() { + By("Creating SliceConfig with full-mesh topology and 3 clusters") + slice.Spec.Clusters = []string{"topo-worker-1", "topo-worker-2", "topo-worker-3"} + slice.Spec.TopologyConfig = &v1alpha1.TopologyConfig{ + TopologyType: v1alpha1.TopologyFullMesh, + } + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying 6 gateway objects are created for 3 pairs (n*(n-1)/2 = 3 pairs for n=3, but 6 gateway objects)") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Gateway count for 3 clusters:", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 6 + }, timeout, interval).Should(BeTrue()) + }) + + It("Should default to full-mesh when topology config is nil", func() { + By("Creating SliceConfig without topology config") + slice.Spec.TopologyConfig = nil // No topology specified + slice.Spec.Clusters = []string{"topo-worker-1", "topo-worker-2"} + + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying it defaults to full-mesh (2 gateway objects for 1 bidirectional pair with 2 clusters)") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Gateway count for nil topology:", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 2 + }, timeout, interval).Should(BeTrue()) + }) + + It("Should exclude forbidden edges from restricted topology", func() { + By("Creating SliceConfig with restricted topology and forbidden edges") + slice.Spec.Clusters = []string{"topo-worker-1", "topo-worker-2", "topo-worker-3"} + slice.Spec.TopologyConfig = &v1alpha1.TopologyConfig{ + TopologyType: v1alpha1.TopologyRestricted, + ForbiddenEdges: []v1alpha1.ForbiddenEdge{ + { + SourceCluster: "topo-worker-1", + TargetClusters: []string{"topo-worker-3"}, // block 1->3 + }, + }, + } + + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying 4 gateway objects are created (forbidding 1↔3 removes both directions due to bidirectional tunnels, leaving 2 pairs * 2 objects = 4 gateways)") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Gateway count (restricted):", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 4 + }, timeout, interval).Should(BeTrue()) + }) + + It("Should create gateway pairs from custom connectivity matrix", func() { + By("Creating SliceConfig with custom topology matrix") + slice.Spec.Clusters = []string{"topo-worker-1", "topo-worker-2", "topo-worker-3"} + slice.Spec.TopologyConfig = &v1alpha1.TopologyConfig{ + TopologyType: v1alpha1.TopologyCustom, + ConnectivityMatrix: []v1alpha1.ConnectivityEntry{ + { + SourceCluster: "topo-worker-1", + TargetClusters: []string{"topo-worker-2"}, // only 1->2 + }, + }, + } + + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying 2 gateway objects are created from custom matrix (1 pair specified becomes bidirectional → 2 objects)") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Gateway count (custom matrix):", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 2 + }, timeout, interval).Should(BeTrue()) + }) + + It("Should update gateways when topology config changes", func() { + By("Creating SliceConfig with 2 clusters initially (default full-mesh)") + slice.Spec.TopologyConfig = nil + Expect(k8sClient.Create(ctx, slice)).Should(Succeed()) + + By("Verifying 2 gateway objects for 2 clusters (full-mesh defaults to 1 pair → 2 objects)") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Initial gateway count:", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 2 + }, timeout, interval).Should(BeTrue()) + + By("Updating to 3 clusters with full-mesh") + sliceKey := types.NamespacedName{ + Name: topologySliceName, + Namespace: topoSliceNamespace, + } + updatedSlice := &v1alpha1.SliceConfig{} + Eventually(func() bool { + err := k8sClient.Get(ctx, sliceKey, updatedSlice) + if err != nil { + return false + } + updatedSlice.Spec.Clusters = []string{"topo-worker-1", "topo-worker-2", "topo-worker-3"} + updatedSlice.Spec.TopologyConfig = &v1alpha1.TopologyConfig{ + TopologyType: v1alpha1.TopologyFullMesh, + } + err = k8sClient.Update(ctx, updatedSlice) + return err == nil + }, timeout, interval).Should(BeTrue()) + + By("Verifying 6 gateway objects are now created for 3 bidirectional pairs") + Eventually(func() bool { + gatewayList := workerv1alpha1.WorkerSliceGatewayList{} + err := k8sClient.List(ctx, &gatewayList, + client.MatchingLabels{"original-slice-name": slice.Name}) + + GinkgoWriter.Println("Updated gateway count:", len(gatewayList.Items)) + return err == nil && len(gatewayList.Items) == 6 + }, timeout, interval).Should(BeTrue()) + }) +}) diff --git a/docs/iperf/README.md b/docs/iperf/README.md new file mode 100644 index 000000000..ba168e5e2 --- /dev/null +++ b/docs/iperf/README.md @@ -0,0 +1,26 @@ +# iPerf inter-cluster test for KubeSlice + +This folder contains manifests and a small helper script to run iPerf tests between two worker clusters in a KubeSlice-enabled environment. + +What is included +- `iperf-sleep.yaml` - client deployment (sleep + netshoot sidecar) +- `iperf-server.yaml` - server deployment and `ServiceExport` for the iperf server +- `slice-templates/` - three SliceConfig templates: `fullmesh`, `restricted`, `custom` +- `../../scripts/iperf-run.sh` - helper script to deploy and run tests (see scripts path) + +Scenarios to test +- Full-mesh (default) — verify baseline connectivity and bandwidth +- Restricted — remove a forbidden edge and verify iperf is blocked +- Custom — only specific source→target connectivity enabled + +Quick notes +- You need two or more registered worker clusters (contexts configured with `kubectx` or `kubectl --context`). +- Create the `iperf` namespace in each participating cluster before applying deployments: + +```bash +kubectl --context create ns iperf +``` + +- The script does not modify controller code — it deploys SliceConfig/sample resources and the iperf workloads and runs `iperf` from the client pod. + +See `../../scripts/iperf-run.sh --help` for usage. diff --git a/docs/iperf/iperf-server.yaml b/docs/iperf/iperf-server.yaml new file mode 100644 index 000000000..13ef9d693 --- /dev/null +++ b/docs/iperf/iperf-server.yaml @@ -0,0 +1,53 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: iperf-server + namespace: iperf + labels: + app: iperf-server +spec: + replicas: 1 + selector: + matchLabels: + app: iperf-server + template: + metadata: + labels: + app: iperf-server + spec: + containers: + - name: iperf + image: mlabbe/iperf + imagePullPolicy: IfNotPresent + args: + - '-s' + - '-p' + - '5201' + ports: + - containerPort: 5201 + name: server + - name: sidecar + image: nicolaka/netshoot + imagePullPolicy: IfNotPresent + command: ["/bin/sleep", "3650d"] + securityContext: + capabilities: + add: ["NET_ADMIN"] + allowPrivilegeEscalation: true + privileged: true +--- +apiVersion: networking.kubeslice.io/v1beta1 +kind: ServiceExport +metadata: + name: iperf-server + namespace: iperf +spec: + slice: + selector: + matchLabels: + app: iperf-server + ingressEnabled: false + ports: + - name: tcp + containerPort: 5201 + protocol: TCP diff --git a/docs/iperf/iperf-sleep.yaml b/docs/iperf/iperf-sleep.yaml new file mode 100644 index 000000000..002a21396 --- /dev/null +++ b/docs/iperf/iperf-sleep.yaml @@ -0,0 +1,31 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: iperf-sleep + namespace: iperf + labels: + app: iperf-sleep +spec: + replicas: 1 + selector: + matchLabels: + app: iperf-sleep + template: + metadata: + labels: + app: iperf-sleep + spec: + containers: + - name: iperf + image: mlabbe/iperf + imagePullPolicy: IfNotPresent + command: ["/bin/sleep", "3650d"] + - name: sidecar + image: nicolaka/netshoot + imagePullPolicy: IfNotPresent + command: ["/bin/sleep", "3650d"] + securityContext: + capabilities: + add: ["NET_ADMIN"] + allowPrivilegeEscalation: true + privileged: true diff --git a/docs/iperf/slice-templates/custom.yaml b/docs/iperf/slice-templates/custom.yaml new file mode 100644 index 000000000..78b953639 --- /dev/null +++ b/docs/iperf/slice-templates/custom.yaml @@ -0,0 +1,39 @@ +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: iperf-custom +spec: + sliceSubnet: 10.34.0.0/16 + sliceType: Application + sliceGatewayProvider: + sliceGatewayType: OpenVPN + sliceCaType: Local + sliceIpamType: Local + clusters: + - + - + - + topologyConfig: + topologyType: TopologyCustom + connectivityMatrix: + - sourceCluster: + targetClusters: + - + - sourceCluster: + targetClusters: + - + qosProfileDetails: + queueType: HTB + priority: 1 + tcType: BANDWIDTH_CONTROL + bandwidthCeilingKbps: 5120 + bandwidthGuaranteedKbps: 2560 + dscpClass: AF11 + namespaceIsolationProfile: + applicationNamespaces: + - namespace: iperf + clusters: + - + - + - + isolationEnabled: false diff --git a/docs/iperf/slice-templates/fullmesh.yaml b/docs/iperf/slice-templates/fullmesh.yaml new file mode 100644 index 000000000..54fff4095 --- /dev/null +++ b/docs/iperf/slice-templates/fullmesh.yaml @@ -0,0 +1,32 @@ +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: iperf-fullmesh +spec: + sliceSubnet: 10.32.0.0/16 + sliceType: Application + sliceGatewayProvider: + sliceGatewayType: OpenVPN + sliceCaType: Local + sliceIpamType: Local + clusters: + - + - + qosProfileDetails: + queueType: HTB + priority: 1 + tcType: BANDWIDTH_CONTROL + bandwidthCeilingKbps: 5120 + bandwidthGuaranteedKbps: 2560 + dscpClass: AF11 + namespaceIsolationProfile: + applicationNamespaces: + - namespace: iperf + clusters: + - + - + isolationEnabled: false + allowedNamespaces: + - namespace: kube-system + clusters: + - diff --git a/docs/iperf/slice-templates/restricted.yaml b/docs/iperf/slice-templates/restricted.yaml new file mode 100644 index 000000000..97e4dfa36 --- /dev/null +++ b/docs/iperf/slice-templates/restricted.yaml @@ -0,0 +1,36 @@ +apiVersion: controller.kubeslice.io/v1alpha1 +kind: SliceConfig +metadata: + name: iperf-restricted +spec: + sliceSubnet: 10.33.0.0/16 + sliceType: Application + sliceGatewayProvider: + sliceGatewayType: OpenVPN + sliceCaType: Local + sliceIpamType: Local + clusters: + - + - + - + topologyConfig: + topologyType: TopologyRestricted + forbiddenEdges: + - sourceCluster: + targetClusters: + - + qosProfileDetails: + queueType: HTB + priority: 1 + tcType: BANDWIDTH_CONTROL + bandwidthCeilingKbps: 5120 + bandwidthGuaranteedKbps: 2560 + dscpClass: AF11 + namespaceIsolationProfile: + applicationNamespaces: + - namespace: iperf + clusters: + - + - + - + isolationEnabled: false diff --git a/scripts/iperf-run.sh b/scripts/iperf-run.sh new file mode 100755 index 000000000..b0bac9210 --- /dev/null +++ b/scripts/iperf-run.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash +# Helper script to deploy iperf server/client and run tests across two clusters +# Usage: +# scripts/iperf-run.sh --server-context --sleep-context --project-namespace --slice-file --slice-name + +set -euo pipefail + +usage(){ + cat </$SLICE_NAME/g" docs/iperf/iperf-server.yaml | kubectl --context "$SERVER_CTX" apply -f - -n iperf || true + +# 3) Deploy iperf-sleep in sleep cluster +kubectl --context "$SLEEP_CTX" create ns iperf --dry-run=client -o yaml | kubectl --context "$SLEEP_CTX" apply -f - +kubectl --context "$SLEEP_CTX" apply -f docs/iperf/iperf-sleep.yaml -n iperf + +# 4) Wait for pods to become ready (server) +echo "Waiting for iperf-server pod..." +kubectl --context "$SERVER_CTX" -n iperf wait --for=condition=ready pod -l app=iperf-server --timeout=120s + +echo "Waiting for iperf-sleep pod..." +kubectl --context "$SLEEP_CTX" -n iperf wait --for=condition=ready pod -l app=iperf-sleep --timeout=120s + +# 5) Derive DNS name (short name should be available) +SHORT_DNS="iperf-server.iperf.svc.slice.local" + +echo "Using short DNS: $SHORT_DNS" + +# 6) Exec into sleep pod and run iperf +SLEEP_POD=$(kubectl --context "$SLEEP_CTX" -n iperf get pods -l app=iperf-sleep -o jsonpath='{.items[0].metadata.name}') +echo "Using sleep pod: $SLEEP_POD" + +OUTFILE="iperf-${SLICE_NAME}-$(date +%Y%m%dT%H%M%S).log" +kubectl --context "$SLEEP_CTX" -n iperf exec -c iperf "$SLEEP_POD" -- iperf -c "$SHORT_DNS" -p 5201 -i 1 -t 10 > "$OUTFILE" 2>&1 || true + +echo "iperf output saved to $OUTFILE" +echo +echo "--- output ---" +cat "$OUTFILE" + +echo "Done" diff --git a/service/access_control_service_test.go b/service/access_control_service_test.go index 996350357..801c93f7e 100644 --- a/service/access_control_service_test.go +++ b/service/access_control_service_test.go @@ -1270,19 +1270,19 @@ func ACS_ReconcileWorkerClusterServiceAccountAndRoleBindings(t *testing.T) { mMock.AssertExpectations(t) } -func prepareACSTestContext(ctx context.Context, client util.Client, +func prepareACSTestContext(ctx context.Context, c client.Client, scheme *runtime.Scheme) context.Context { if scheme == nil { scheme = runtime.NewScheme() } controllerv1alpha1.AddToScheme(scheme) rbacv1.AddToScheme(scheme) - eventRecorder := events.NewEventRecorder(client, scheme, ossEvents.EventsMap, events.EventRecorderOptions{ + eventRecorder := events.NewEventRecorder(c, scheme, ossEvents.EventsMap, events.EventRecorderOptions{ Version: "v1alpha1", Cluster: util.ClusterController, Component: util.ComponentController, Slice: util.NotApplicable, }) - preparedCtx := util.PrepareKubeSliceControllersRequestContext(ctx, client, scheme, "ProjectTestController", &eventRecorder) + preparedCtx := util.PrepareKubeSliceControllersRequestContext(ctx, c, scheme, "ProjectTestController", &eventRecorder) return preparedCtx } diff --git a/service/mocks/IWorkerSliceGatewayService.go b/service/mocks/IWorkerSliceGatewayService.go index 602ed66b1..507f5b245 100644 --- a/service/mocks/IWorkerSliceGatewayService.go +++ b/service/mocks/IWorkerSliceGatewayService.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.28.1. DO NOT EDIT. +// Code generated by mockery v2.53.5. DO NOT EDIT. package mocks @@ -24,6 +24,10 @@ type IWorkerSliceGatewayService struct { func (_m *IWorkerSliceGatewayService) BuildNetworkAddresses(sliceSubnet string, sourceClusterName string, destinationClusterName string, clusterMap map[string]int, clusterCidr string) util.WorkerSliceGatewayNetworkAddresses { ret := _m.Called(sliceSubnet, sourceClusterName, destinationClusterName, clusterMap, clusterCidr) + if len(ret) == 0 { + panic("no return value specified for BuildNetworkAddresses") + } + var r0 util.WorkerSliceGatewayNetworkAddresses if rf, ok := ret.Get(0).(func(string, string, string, map[string]int, string) util.WorkerSliceGatewayNetworkAddresses); ok { r0 = rf(sliceSubnet, sourceClusterName, destinationClusterName, clusterMap, clusterCidr) @@ -34,23 +38,27 @@ func (_m *IWorkerSliceGatewayService) BuildNetworkAddresses(sliceSubnet string, return r0 } -// CreateMinimumWorkerSliceGateways provides a mock function with given fields: ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap -func (_m *IWorkerSliceGatewayService) CreateMinimumWorkerSliceGateways(ctx context.Context, sliceName string, clusterNames []string, namespace string, label map[string]string, clusterMap map[string]int, sliceSubnet string, clusterCidr string, sliceGwSvcTypeMap map[string]*v1alpha1.SliceGatewayServiceType) (reconcile.Result, error) { - ret := _m.Called(ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap) +// CreateMinimumWorkerSliceGateways provides a mock function with given fields: ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap, gatewayPairs +func (_m *IWorkerSliceGatewayService) CreateMinimumWorkerSliceGateways(ctx context.Context, sliceName string, clusterNames []string, namespace string, label map[string]string, clusterMap map[string]int, sliceSubnet string, clusterCidr string, sliceGwSvcTypeMap map[string]*v1alpha1.SliceGatewayServiceType, gatewayPairs []util.GatewayPair) (reconcile.Result, error) { + ret := _m.Called(ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap, gatewayPairs) + + if len(ret) == 0 { + panic("no return value specified for CreateMinimumWorkerSliceGateways") + } var r0 reconcile.Result var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, []string, string, map[string]string, map[string]int, string, string, map[string]*v1alpha1.SliceGatewayServiceType) (reconcile.Result, error)); ok { - return rf(ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap) + if rf, ok := ret.Get(0).(func(context.Context, string, []string, string, map[string]string, map[string]int, string, string, map[string]*v1alpha1.SliceGatewayServiceType, []util.GatewayPair) (reconcile.Result, error)); ok { + return rf(ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap, gatewayPairs) } - if rf, ok := ret.Get(0).(func(context.Context, string, []string, string, map[string]string, map[string]int, string, string, map[string]*v1alpha1.SliceGatewayServiceType) reconcile.Result); ok { - r0 = rf(ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap) + if rf, ok := ret.Get(0).(func(context.Context, string, []string, string, map[string]string, map[string]int, string, string, map[string]*v1alpha1.SliceGatewayServiceType, []util.GatewayPair) reconcile.Result); ok { + r0 = rf(ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap, gatewayPairs) } else { r0 = ret.Get(0).(reconcile.Result) } - if rf, ok := ret.Get(1).(func(context.Context, string, []string, string, map[string]string, map[string]int, string, string, map[string]*v1alpha1.SliceGatewayServiceType) error); ok { - r1 = rf(ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap) + if rf, ok := ret.Get(1).(func(context.Context, string, []string, string, map[string]string, map[string]int, string, string, map[string]*v1alpha1.SliceGatewayServiceType, []util.GatewayPair) error); ok { + r1 = rf(ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap, gatewayPairs) } else { r1 = ret.Error(1) } @@ -62,6 +70,10 @@ func (_m *IWorkerSliceGatewayService) CreateMinimumWorkerSliceGateways(ctx conte func (_m *IWorkerSliceGatewayService) DeleteWorkerSliceGatewaysByLabel(ctx context.Context, label map[string]string, namespace string) error { ret := _m.Called(ctx, label, namespace) + if len(ret) == 0 { + panic("no return value specified for DeleteWorkerSliceGatewaysByLabel") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, map[string]string, string) error); ok { r0 = rf(ctx, label, namespace) @@ -76,6 +88,10 @@ func (_m *IWorkerSliceGatewayService) DeleteWorkerSliceGatewaysByLabel(ctx conte func (_m *IWorkerSliceGatewayService) GenerateCerts(ctx context.Context, sliceName string, namespace string, gatewayProtocol string, serverGateway *workerv1alpha1.WorkerSliceGateway, clientGateway *workerv1alpha1.WorkerSliceGateway, gatewayAddresses util.WorkerSliceGatewayNetworkAddresses) error { ret := _m.Called(ctx, sliceName, namespace, gatewayProtocol, serverGateway, clientGateway, gatewayAddresses) + if len(ret) == 0 { + panic("no return value specified for GenerateCerts") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, string, string, *workerv1alpha1.WorkerSliceGateway, *workerv1alpha1.WorkerSliceGateway, util.WorkerSliceGatewayNetworkAddresses) error); ok { r0 = rf(ctx, sliceName, namespace, gatewayProtocol, serverGateway, clientGateway, gatewayAddresses) @@ -90,6 +106,10 @@ func (_m *IWorkerSliceGatewayService) GenerateCerts(ctx context.Context, sliceNa func (_m *IWorkerSliceGatewayService) ListWorkerSliceGateways(ctx context.Context, ownerLabel map[string]string, namespace string) ([]workerv1alpha1.WorkerSliceGateway, error) { ret := _m.Called(ctx, ownerLabel, namespace) + if len(ret) == 0 { + panic("no return value specified for ListWorkerSliceGateways") + } + var r0 []workerv1alpha1.WorkerSliceGateway var r1 error if rf, ok := ret.Get(0).(func(context.Context, map[string]string, string) ([]workerv1alpha1.WorkerSliceGateway, error)); ok { @@ -116,6 +136,10 @@ func (_m *IWorkerSliceGatewayService) ListWorkerSliceGateways(ctx context.Contex func (_m *IWorkerSliceGatewayService) NodeIpReconciliationOfWorkerSliceGateways(ctx context.Context, cluster *v1alpha1.Cluster, namespace string) error { ret := _m.Called(ctx, cluster, namespace) + if len(ret) == 0 { + panic("no return value specified for NodeIpReconciliationOfWorkerSliceGateways") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *v1alpha1.Cluster, string) error); ok { r0 = rf(ctx, cluster, namespace) @@ -130,6 +154,10 @@ func (_m *IWorkerSliceGatewayService) NodeIpReconciliationOfWorkerSliceGateways( func (_m *IWorkerSliceGatewayService) ReconcileWorkerSliceGateways(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { ret := _m.Called(ctx, req) + if len(ret) == 0 { + panic("no return value specified for ReconcileWorkerSliceGateways") + } + var r0 reconcile.Result var r1 error if rf, ok := ret.Get(0).(func(context.Context, reconcile.Request) (reconcile.Result, error)); ok { @@ -150,13 +178,12 @@ func (_m *IWorkerSliceGatewayService) ReconcileWorkerSliceGateways(ctx context.C return r0, r1 } -type mockConstructorTestingTNewIWorkerSliceGatewayService interface { +// NewIWorkerSliceGatewayService creates a new instance of IWorkerSliceGatewayService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewIWorkerSliceGatewayService(t interface { mock.TestingT Cleanup(func()) -} - -// NewIWorkerSliceGatewayService creates a new instance of IWorkerSliceGatewayService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewIWorkerSliceGatewayService(t mockConstructorTestingTNewIWorkerSliceGatewayService) *IWorkerSliceGatewayService { +}) *IWorkerSliceGatewayService { mock := &IWorkerSliceGatewayService{} mock.Mock.Test(t) diff --git a/service/namespace_service_test.go b/service/namespace_service_test.go index 42a15a513..7368d06c0 100644 --- a/service/namespace_service_test.go +++ b/service/namespace_service_test.go @@ -177,13 +177,13 @@ func TestDeleteNamespace_DoesNothingIfNamespaceDoNotExist(t *testing.T) { mMock.AssertExpectations(t) } -func prepareNamespaceTestContext(ctx context.Context, client util.Client, scheme *runtime.Scheme) context.Context { - eventRecorder := events.NewEventRecorder(client, scheme, ossEvents.EventsMap, events.EventRecorderOptions{ +func prepareNamespaceTestContext(ctx context.Context, c client.Client, scheme *runtime.Scheme) context.Context { + eventRecorder := events.NewEventRecorder(c, scheme, ossEvents.EventsMap, events.EventRecorderOptions{ Version: "v1alpha1", Cluster: util.ClusterController, Component: util.ComponentController, Slice: util.NotApplicable, }) - preparedCtx := util.PrepareKubeSliceControllersRequestContext(ctx, client, scheme, "NamespaceTestController", &eventRecorder) + preparedCtx := util.PrepareKubeSliceControllersRequestContext(ctx, c, scheme, "NamespaceTestController", &eventRecorder) return preparedCtx } diff --git a/service/project_service_test.go b/service/project_service_test.go index 21398c6cb..15322c7d7 100644 --- a/service/project_service_test.go +++ b/service/project_service_test.go @@ -38,6 +38,7 @@ import ( k8sError "k8s.io/apimachinery/pkg/api/errors" k8sapimachinery "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" @@ -276,14 +277,14 @@ func setupProjectTest(name string, namespace string) (*mocks.INamespaceService, return nsServiceMock, acsServicemOCK, projectService, requestObj, clientMock, project, ctx, clusterServiceMock, sliceConfigServiceMock, serviceExportConfigServiceMock, sliceQoSConfigServiceMock, mMock } -func prepareProjectTestContext(ctx context.Context, client util.Client, +func prepareProjectTestContext(ctx context.Context, c client.Client, scheme *runtime.Scheme) context.Context { - eventRecorder := events.NewEventRecorder(client, scheme, ossEvents.EventsMap, events.EventRecorderOptions{ + eventRecorder := events.NewEventRecorder(c, scheme, ossEvents.EventsMap, events.EventRecorderOptions{ Version: "v1alpha1", Cluster: util.ClusterController, Component: util.ComponentController, Slice: util.NotApplicable, }) - preparedCtx := util.PrepareKubeSliceControllersRequestContext(ctx, client, scheme, "ProjectTestController", &eventRecorder) + preparedCtx := util.PrepareKubeSliceControllersRequestContext(ctx, c, scheme, "ProjectTestController", &eventRecorder) return preparedCtx } diff --git a/service/slice_config_service.go b/service/slice_config_service.go index 539a82efa..3a6622d78 100644 --- a/service/slice_config_service.go +++ b/service/slice_config_service.go @@ -203,26 +203,34 @@ func (s *SliceConfigService) ReconcileSliceConfig(ctx context.Context, req ctrl. return ctrl.Result{}, err } - // Step 5: Create gateways with minimum specification - _, err = s.sgs.CreateMinimumWorkerSliceGateways(ctx, sliceConfig.Name, sliceConfig.Spec.Clusters, req.Namespace, ownershipLabel, clusterMap, sliceConfig.Spec.SliceSubnet, clusterCidr, sliceGwSvcTypeMap) + // Step 5: Resolve topology to get gateway pairs + gatewayPairs, err := s.resolveTopologyPairs(sliceConfig) + if err != nil { + logger.Errorf("Failed to resolve topology for slice %s: %v", sliceConfig.Name, err) + return ctrl.Result{}, err + } + logger.Infof("Resolved %d gateway pairs for slice %s", len(gatewayPairs), sliceConfig.Name) + + // Step 6: Create gateways with minimum specification + _, err = s.sgs.CreateMinimumWorkerSliceGateways(ctx, sliceConfig.Name, sliceConfig.Spec.Clusters, req.Namespace, ownershipLabel, clusterMap, sliceConfig.Spec.SliceSubnet, clusterCidr, sliceGwSvcTypeMap, gatewayPairs) if err != nil { return ctrl.Result{}, err } logger.Infof("sliceConfig %v reconciled", req.NamespacedName) - // Step 6: Create VPNKeyRotation CR + // Step 7: Create VPNKeyRotation CR // TODO(rahul): handle change in rotation interval if err := s.vpn.CreateMinimalVpnKeyRotationConfig(ctx, sliceConfig.Name, sliceConfig.Namespace, sliceConfig.Spec.RotationInterval); err != nil { // register an event util.RecordEvent(ctx, eventRecorder, sliceConfig, nil, events.EventVPNKeyRotationConfigCreationFailed) return ctrl.Result{}, err } - // Step 7: update cluster info into vpnkeyrotation Cconfig + // Step 8: update cluster info into vpnkeyrotation Config if _, err := s.vpn.ReconcileClusters(ctx, sliceConfig.Name, sliceConfig.Namespace, sliceConfig.Spec.Clusters); err != nil { return ctrl.Result{}, err } - // Step 8: Create ServiceImport Objects + // Step 9: Create ServiceImport Objects serviceExports := &v1alpha1.ServiceExportConfigList{} _, err = s.getServiceExportBySliceName(ctx, req.Namespace, sliceConfig.Name, serviceExports) if err != nil { @@ -431,3 +439,124 @@ func (s *SliceConfigService) handleDefaultSliceConfigAppns(ctx context.Context, } return ctrl.Result{}, nil } + +func (s *SliceConfigService) resolveTopologyPairs(sliceConfig *v1alpha1.SliceConfig) ([]util.GatewayPair, error) { + clusters := sliceConfig.Spec.Clusters + + // Default to full-mesh if no topology config + if sliceConfig.Spec.TopologyConfig == nil { + return s.resolveFullMeshTopology(clusters), nil + } + + switch sliceConfig.Spec.TopologyConfig.TopologyType { + case v1alpha1.TopologyFullMesh, "": + return s.resolveFullMeshTopology(clusters), nil + case v1alpha1.TopologyCustom: + return s.resolveCustomTopology(clusters, sliceConfig.Spec.TopologyConfig.ConnectivityMatrix) + case v1alpha1.TopologyRestricted: + return s.resolveRestrictedTopology(clusters, sliceConfig.Spec.TopologyConfig.ForbiddenEdges) + default: + return nil, fmt.Errorf("unknown topology type: %s", sliceConfig.Spec.TopologyConfig.TopologyType) + } +} + +func (s *SliceConfigService) resolveFullMeshTopology(clusters []string) []util.GatewayPair { + if len(clusters) < 2 { + return []util.GatewayPair{} + } + + pairs := make([]util.GatewayPair, 0, len(clusters)*(len(clusters)-1)) + for i := 0; i < len(clusters); i++ { + for j := 0; j < len(clusters); j++ { + if i != j { + pairs = append(pairs, util.GatewayPair{ + Source: clusters[i], + Target: clusters[j], + }) + } + } + } + return pairs +} + +func (s *SliceConfigService) resolveCustomTopology(clusters []string, matrix []v1alpha1.ConnectivityEntry) ([]util.GatewayPair, error) { + if len(matrix) == 0 { + return nil, fmt.Errorf("custom topology requires connectivity matrix") + } + + clusterSet := s.makeClusterSet(clusters) + pairs := make([]util.GatewayPair, 0) + + for _, entry := range matrix { + if !clusterSet[entry.SourceCluster] { + return nil, fmt.Errorf("connectivity entry references unknown source cluster: %s", entry.SourceCluster) + } + for _, target := range entry.TargetClusters { + if !clusterSet[target] { + return nil, fmt.Errorf("connectivity entry references unknown target cluster: %s", target) + } + pairs = append(pairs, util.GatewayPair{ + Source: entry.SourceCluster, + Target: target, + }) + } + } + + return pairs, nil +} + +// resolveRestrictedTopology creates full-mesh and removes forbidden edges +func (s *SliceConfigService) resolveRestrictedTopology(clusters []string, forbiddenEdges []v1alpha1.ForbiddenEdge) ([]util.GatewayPair, error) { + // Start with full mesh + allPairs := s.resolveFullMeshTopology(clusters) + + if len(forbiddenEdges) == 0 { + return allPairs, nil + } + + // Build forbidden set + forbidden := s.buildForbiddenSet(forbiddenEdges) + + // Filter out forbidden pairs + filtered := s.filterForbiddenPairs(allPairs, forbidden) + + return filtered, nil +} + +// buildForbiddenSet creates a map of forbidden edges +func (s *SliceConfigService) buildForbiddenSet(forbiddenEdges []v1alpha1.ForbiddenEdge) map[string]bool { + forbidden := make(map[string]bool) + for _, edge := range forbiddenEdges { + for _, target := range edge.TargetClusters { + key := edge.SourceCluster + "-" + target + forbidden[key] = true + } + } + return forbidden +} + +// filterForbiddenPairs removes pairs that are in the forbidden set +// Since gateway creation is bidirectional (creates both server and client), +// we must filter out BOTH directions if either is forbidden +func (s *SliceConfigService) filterForbiddenPairs(pairs []util.GatewayPair, forbidden map[string]bool) []util.GatewayPair { + filtered := make([]util.GatewayPair, 0, len(pairs)) + for _, p := range pairs { + forwardKey := p.Source + "-" + p.Target + reverseKey := p.Target + "-" + p.Source + + // Skip if EITHER direction is forbidden (because gateway is bidirectional) + if !forbidden[forwardKey] && !forbidden[reverseKey] { + filtered = append(filtered, p) + } + } + return filtered +} + +// makeClusterSet creates a set from cluster list +func (s *SliceConfigService) makeClusterSet(clusters []string) map[string]bool { + set := make(map[string]bool, len(clusters)) + for _, cluster := range clusters { + set[cluster] = true + } + return set +} diff --git a/service/slice_config_service_test.go b/service/slice_config_service_test.go index 1f317b5a1..dbf668b4b 100644 --- a/service/slice_config_service_test.go +++ b/service/slice_config_service_test.go @@ -737,3 +737,546 @@ func setupSliceConfigTest(name string, namespace string) (*mocks.IWorkerSliceGat ctx := util.PrepareKubeSliceControllersRequestContext(context.Background(), clientMock, scheme, "SliceConfigServiceTest", &eventRecorder) return workerSliceGatewayMock, workerSliceConfigMock, serviceExportConfigMock, workerServiceImportMock, workerSliceGatewayRecyclerMock, clientMock, sliceConfig, ctx, sliceConfigService, requestObj, mMock } + +func TestResolveTopologyPairs_DefaultFullMesh(t *testing.T) { + service := &SliceConfigService{} + sliceConfig := &controllerv1alpha1.SliceConfig{ + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{"cluster1", "cluster2", "cluster3"}, + }, + } + + pairs, err := service.resolveTopologyPairs(sliceConfig) + require.NoError(t, err) + require.Len(t, pairs, 6) + + expectedPairs := []util.GatewayPair{ + {Source: "cluster1", Target: "cluster2"}, + {Source: "cluster1", Target: "cluster3"}, + {Source: "cluster2", Target: "cluster1"}, + {Source: "cluster2", Target: "cluster3"}, + {Source: "cluster3", Target: "cluster1"}, + {Source: "cluster3", Target: "cluster2"}, + } + require.ElementsMatch(t, expectedPairs, pairs) +} + +func TestResolveTopologyPairs_ExplicitFullMesh(t *testing.T) { + service := &SliceConfigService{} + sliceConfig := &controllerv1alpha1.SliceConfig{ + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{"cluster1", "cluster2"}, + TopologyConfig: &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyFullMesh, + }, + }, + } + + pairs, err := service.resolveTopologyPairs(sliceConfig) + require.NoError(t, err) + require.Len(t, pairs, 2) + require.Equal(t, "cluster1", pairs[0].Source) + require.Equal(t, "cluster2", pairs[0].Target) + require.Equal(t, "cluster2", pairs[1].Source) + require.Equal(t, "cluster1", pairs[1].Target) +} + +func TestResolveTopologyPairs_CustomTopology(t *testing.T) { + service := &SliceConfigService{} + sliceConfig := &controllerv1alpha1.SliceConfig{ + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{"cluster1", "cluster2", "cluster3"}, + TopologyConfig: &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyCustom, + ConnectivityMatrix: []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "cluster1", TargetClusters: []string{"cluster2"}}, + {SourceCluster: "cluster2", TargetClusters: []string{"cluster3"}}, + }, + }, + }, + } + + pairs, err := service.resolveTopologyPairs(sliceConfig) + require.NoError(t, err) + require.Len(t, pairs, 2) + + expectedPairs := []util.GatewayPair{ + {Source: "cluster1", Target: "cluster2"}, + {Source: "cluster2", Target: "cluster3"}, + } + require.ElementsMatch(t, expectedPairs, pairs) +} + +func TestResolveTopologyPairs_CustomTopologyEmptyMatrix(t *testing.T) { + service := &SliceConfigService{} + sliceConfig := &controllerv1alpha1.SliceConfig{ + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{"cluster1", "cluster2"}, + TopologyConfig: &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyCustom, + ConnectivityMatrix: []controllerv1alpha1.ConnectivityEntry{}, + }, + }, + } + + pairs, err := service.resolveTopologyPairs(sliceConfig) + require.Error(t, err) + require.Contains(t, err.Error(), "custom topology requires connectivity matrix") + require.Nil(t, pairs) +} + +func TestResolveTopologyPairs_RestrictedTopology(t *testing.T) { + service := &SliceConfigService{} + sliceConfig := &controllerv1alpha1.SliceConfig{ + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{"cluster1", "cluster2", "cluster3"}, + TopologyConfig: &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyRestricted, + ForbiddenEdges: []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "cluster1", TargetClusters: []string{"cluster3"}}, + }, + }, + }, + } + + pairs, err := service.resolveTopologyPairs(sliceConfig) + require.NoError(t, err) + // Forbidding 1→3 also removes 3→1 (bidirectional gateway) + require.Len(t, pairs, 4) + + expectedPairs := []util.GatewayPair{ + {Source: "cluster1", Target: "cluster2"}, + {Source: "cluster2", Target: "cluster1"}, + {Source: "cluster2", Target: "cluster3"}, + {Source: "cluster3", Target: "cluster2"}, + } + require.ElementsMatch(t, expectedPairs, pairs) +} + +func TestResolveTopologyPairs_RestrictedTopologyNoForbidden(t *testing.T) { + service := &SliceConfigService{} + sliceConfig := &controllerv1alpha1.SliceConfig{ + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{"cluster1", "cluster2"}, + TopologyConfig: &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyRestricted, + }, + }, + } + + pairs, err := service.resolveTopologyPairs(sliceConfig) + require.NoError(t, err) + require.Len(t, pairs, 2) + require.Equal(t, "cluster1", pairs[0].Source) + require.Equal(t, "cluster2", pairs[0].Target) + require.Equal(t, "cluster2", pairs[1].Source) + require.Equal(t, "cluster1", pairs[1].Target) +} + +func TestResolveTopologyPairs_UnknownType(t *testing.T) { + service := &SliceConfigService{} + sliceConfig := &controllerv1alpha1.SliceConfig{ + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{"cluster1", "cluster2"}, + TopologyConfig: &controllerv1alpha1.TopologyConfig{ + TopologyType: "unknown-type", + }, + }, + } + + pairs, err := service.resolveTopologyPairs(sliceConfig) + require.Error(t, err) + require.Contains(t, err.Error(), "unknown topology type: unknown-type") + require.Nil(t, pairs) +} + +func TestResolveFullMeshTopology(t *testing.T) { + service := &SliceConfigService{} + + t.Run("SingleCluster", func(t *testing.T) { + pairs := service.resolveFullMeshTopology([]string{"cluster1"}) + require.Empty(t, pairs) + }) + + t.Run("TwoClusters", func(t *testing.T) { + pairs := service.resolveFullMeshTopology([]string{"cluster1", "cluster2"}) + require.Len(t, pairs, 2) + require.Equal(t, "cluster1", pairs[0].Source) + require.Equal(t, "cluster2", pairs[0].Target) + require.Equal(t, "cluster2", pairs[1].Source) + require.Equal(t, "cluster1", pairs[1].Target) + }) + + t.Run("ThreeClusters", func(t *testing.T) { + pairs := service.resolveFullMeshTopology([]string{"cluster1", "cluster2", "cluster3"}) + require.Len(t, pairs, 6) + + expectedPairs := []util.GatewayPair{ + {Source: "cluster1", Target: "cluster2"}, + {Source: "cluster1", Target: "cluster3"}, + {Source: "cluster2", Target: "cluster1"}, + {Source: "cluster2", Target: "cluster3"}, + {Source: "cluster3", Target: "cluster1"}, + {Source: "cluster3", Target: "cluster2"}, + } + require.ElementsMatch(t, expectedPairs, pairs) + }) + + t.Run("FourClusters", func(t *testing.T) { + pairs := service.resolveFullMeshTopology([]string{"cluster1", "cluster2", "cluster3", "cluster4"}) + require.Len(t, pairs, 12) + }) +} + +func TestResolveCustomTopology(t *testing.T) { + service := &SliceConfigService{} + clusters := []string{"cluster1", "cluster2", "cluster3"} + + t.Run("ValidMatrix", func(t *testing.T) { + matrix := []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "cluster1", TargetClusters: []string{"cluster2", "cluster3"}}, + } + + pairs, err := service.resolveCustomTopology(clusters, matrix) + require.NoError(t, err) + require.Len(t, pairs, 2) + + expectedPairs := []util.GatewayPair{ + {Source: "cluster1", Target: "cluster2"}, + {Source: "cluster1", Target: "cluster3"}, + } + require.ElementsMatch(t, expectedPairs, pairs) + }) + + t.Run("EmptyMatrix", func(t *testing.T) { + matrix := []controllerv1alpha1.ConnectivityEntry{} + + pairs, err := service.resolveCustomTopology(clusters, matrix) + require.Error(t, err) + require.Contains(t, err.Error(), "custom topology requires connectivity matrix") + require.Nil(t, pairs) + }) + + t.Run("InvalidSourceCluster", func(t *testing.T) { + matrix := []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "invalid", TargetClusters: []string{"cluster2"}}, + } + + _, err := service.resolveCustomTopology(clusters, matrix) + require.Error(t, err) + require.Contains(t, err.Error(), "unknown source cluster: invalid") + }) + + t.Run("InvalidTargetCluster", func(t *testing.T) { + matrix := []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "cluster1", TargetClusters: []string{"invalid"}}, + } + + _, err := service.resolveCustomTopology(clusters, matrix) + require.Error(t, err) + require.Contains(t, err.Error(), "unknown target cluster: invalid") + }) + + t.Run("MultipleSourceClusters", func(t *testing.T) { + matrix := []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "cluster1", TargetClusters: []string{"cluster2"}}, + {SourceCluster: "cluster2", TargetClusters: []string{"cluster3"}}, + {SourceCluster: "cluster3", TargetClusters: []string{"cluster1"}}, + } + + pairs, err := service.resolveCustomTopology(clusters, matrix) + require.NoError(t, err) + require.Len(t, pairs, 3) + }) +} + +func TestResolveRestrictedTopology(t *testing.T) { + service := &SliceConfigService{} + clusters := []string{"cluster1", "cluster2", "cluster3"} + + t.Run("NoForbiddenEdges", func(t *testing.T) { + pairs, err := service.resolveRestrictedTopology(clusters, []controllerv1alpha1.ForbiddenEdge{}) + require.NoError(t, err) + require.Len(t, pairs, 6) + + expectedPairs := []util.GatewayPair{ + {Source: "cluster1", Target: "cluster2"}, + {Source: "cluster1", Target: "cluster3"}, + {Source: "cluster2", Target: "cluster1"}, + {Source: "cluster2", Target: "cluster3"}, + {Source: "cluster3", Target: "cluster1"}, + {Source: "cluster3", Target: "cluster2"}, + } + require.ElementsMatch(t, expectedPairs, pairs) + }) + + t.Run("WithForbiddenEdges", func(t *testing.T) { + forbiddenEdges := []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "cluster1", TargetClusters: []string{"cluster3"}}, + } + + pairs, err := service.resolveRestrictedTopology(clusters, forbiddenEdges) + require.NoError(t, err) + // Forbidding 1→3 also removes 3→1 (bidirectional gateway) + require.Len(t, pairs, 4) + + expectedPairs := []util.GatewayPair{ + {Source: "cluster1", Target: "cluster2"}, + {Source: "cluster2", Target: "cluster1"}, + {Source: "cluster2", Target: "cluster3"}, + {Source: "cluster3", Target: "cluster2"}, + } + require.ElementsMatch(t, expectedPairs, pairs) + }) +} + +func TestBuildForbiddenSet(t *testing.T) { + service := &SliceConfigService{} + + t.Run("SingleEdge", func(t *testing.T) { + forbiddenEdges := []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "cluster1", TargetClusters: []string{"cluster2"}}, + } + + forbidden := service.buildForbiddenSet(forbiddenEdges) + require.Len(t, forbidden, 1) + require.True(t, forbidden["cluster1-cluster2"]) + }) + + t.Run("MultipleEdges", func(t *testing.T) { + forbiddenEdges := []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "cluster1", TargetClusters: []string{"cluster2", "cluster3"}}, + {SourceCluster: "cluster2", TargetClusters: []string{"cluster3"}}, + } + + forbidden := service.buildForbiddenSet(forbiddenEdges) + require.Len(t, forbidden, 3) + require.True(t, forbidden["cluster1-cluster2"]) + require.True(t, forbidden["cluster1-cluster3"]) + require.True(t, forbidden["cluster2-cluster3"]) + }) + + t.Run("EmptyEdges", func(t *testing.T) { + forbidden := service.buildForbiddenSet([]controllerv1alpha1.ForbiddenEdge{}) + require.Empty(t, forbidden) + }) +} + +func TestFilterForbiddenPairs(t *testing.T) { + service := &SliceConfigService{} + + t.Run("FilterSomePairs", func(t *testing.T) { + pairs := []util.GatewayPair{ + {Source: "cluster1", Target: "cluster2"}, + {Source: "cluster1", Target: "cluster3"}, + {Source: "cluster2", Target: "cluster3"}, + } + + forbidden := map[string]bool{ + "cluster1-cluster3": true, + } + + filtered := service.filterForbiddenPairs(pairs, forbidden) + require.Len(t, filtered, 2) + + expectedPairs := []util.GatewayPair{ + {Source: "cluster1", Target: "cluster2"}, + {Source: "cluster2", Target: "cluster3"}, + } + require.ElementsMatch(t, expectedPairs, filtered) + }) + + t.Run("NoForbiddenPairs", func(t *testing.T) { + pairs := []util.GatewayPair{ + {Source: "cluster1", Target: "cluster2"}, + } + + forbidden := map[string]bool{} + + filtered := service.filterForbiddenPairs(pairs, forbidden) + require.Equal(t, pairs, filtered) + }) + + t.Run("AllPairsForbidden", func(t *testing.T) { + pairs := []util.GatewayPair{ + {Source: "cluster1", Target: "cluster2"}, + } + + forbidden := map[string]bool{ + "cluster1-cluster2": true, + } + + filtered := service.filterForbiddenPairs(pairs, forbidden) + require.Empty(t, filtered) + }) +} + +func TestMakeClusterSet(t *testing.T) { + service := &SliceConfigService{} + + t.Run("ValidClusters", func(t *testing.T) { + clusters := []string{"cluster1", "cluster2", "cluster3"} + set := service.makeClusterSet(clusters) + + require.Len(t, set, 3) + require.True(t, set["cluster1"]) + require.True(t, set["cluster2"]) + require.True(t, set["cluster3"]) + require.False(t, set["nonexistent"]) + }) + + t.Run("EmptyClusters", func(t *testing.T) { + set := service.makeClusterSet([]string{}) + require.Empty(t, set) + }) + + t.Run("SingleCluster", func(t *testing.T) { + set := service.makeClusterSet([]string{"only"}) + require.Len(t, set, 1) + require.True(t, set["only"]) + }) + + t.Run("DuplicateClusters", func(t *testing.T) { + clusters := []string{"c1", "c2", "c1"} + set := service.makeClusterSet(clusters) + require.Len(t, set, 2) + require.True(t, set["c1"]) + require.True(t, set["c2"]) + }) +} + +func TestResolveTopologyPairs_NilTopology(t *testing.T) { + service := &SliceConfigService{} + + sliceConfig := &controllerv1alpha1.SliceConfig{ + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{"c1", "c2"}, + TopologyConfig: nil, + }, + } + + pairs, err := service.resolveTopologyPairs(sliceConfig) + require.NoError(t, err) + require.Len(t, pairs, 2) +} + +func TestResolveTopologyPairs_EmptyTopologyType(t *testing.T) { + service := &SliceConfigService{} + + sliceConfig := &controllerv1alpha1.SliceConfig{ + Spec: controllerv1alpha1.SliceConfigSpec{ + Clusters: []string{"c1", "c2"}, + TopologyConfig: &controllerv1alpha1.TopologyConfig{ + TopologyType: "", + }, + }, + } + + pairs, err := service.resolveTopologyPairs(sliceConfig) + require.NoError(t, err) + require.Len(t, pairs, 2) +} + +func TestResolveFullMeshTopology_ZeroClusters(t *testing.T) { + service := &SliceConfigService{} + pairs := service.resolveFullMeshTopology([]string{}) + require.Empty(t, pairs) +} + +func TestResolveFullMeshTopology_FiveClusters(t *testing.T) { + service := &SliceConfigService{} + clusters := []string{"c1", "c2", "c3", "c4", "c5"} + pairs := service.resolveFullMeshTopology(clusters) + require.Len(t, pairs, 20) +} + +func TestResolveCustomTopology_MultipleTargets(t *testing.T) { + service := &SliceConfigService{} + + clusters := []string{"c1", "c2", "c3", "c4"} + matrix := []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "c1", TargetClusters: []string{"c2", "c3", "c4"}}, + } + + pairs, err := service.resolveCustomTopology(clusters, matrix) + require.NoError(t, err) + require.Len(t, pairs, 3) +} + +func TestResolveCustomTopology_AllPossiblePairs(t *testing.T) { + service := &SliceConfigService{} + + clusters := []string{"c1", "c2"} + matrix := []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "c1", TargetClusters: []string{"c2"}}, + {SourceCluster: "c2", TargetClusters: []string{"c1"}}, + } + + pairs, err := service.resolveCustomTopology(clusters, matrix) + require.NoError(t, err) + require.Len(t, pairs, 2) +} + +func TestResolveRestrictedTopology_MultipleForbiddenEdges(t *testing.T) { + service := &SliceConfigService{} + + clusters := []string{"c1", "c2", "c3"} + forbiddenEdges := []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "c1", TargetClusters: []string{"c2"}}, + {SourceCluster: "c2", TargetClusters: []string{"c3"}}, + } + + pairs, err := service.resolveRestrictedTopology(clusters, forbiddenEdges) + require.NoError(t, err) + require.Len(t, pairs, 2) +} + +func TestBuildForbiddenSet_MultipleTargets(t *testing.T) { + service := &SliceConfigService{} + + forbiddenEdges := []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "c1", TargetClusters: []string{"c2", "c3"}}, + } + + forbidden := service.buildForbiddenSet(forbiddenEdges) + require.Len(t, forbidden, 2) + require.True(t, forbidden["c1-c2"]) + require.True(t, forbidden["c1-c3"]) +} + +func TestFilterForbiddenPairs_BidirectionalFiltering(t *testing.T) { + service := &SliceConfigService{} + + pairs := []util.GatewayPair{ + {Source: "c1", Target: "c2"}, + {Source: "c2", Target: "c1"}, + {Source: "c1", Target: "c3"}, + {Source: "c3", Target: "c1"}, + } + + forbidden := map[string]bool{ + "c1-c3": true, + } + + filtered := service.filterForbiddenPairs(pairs, forbidden) + require.Len(t, filtered, 2) + + for _, p := range filtered { + require.False(t, (p.Source == "c1" && p.Target == "c3") || (p.Source == "c3" && p.Target == "c1")) + } +} + +func TestFilterForbiddenPairs_ReverseForbidden(t *testing.T) { + service := &SliceConfigService{} + + pairs := []util.GatewayPair{ + {Source: "c1", Target: "c2"}, + } + + forbidden := map[string]bool{ + "c2-c1": true, + } + + filtered := service.filterForbiddenPairs(pairs, forbidden) + require.Empty(t, filtered) +} diff --git a/service/slice_config_webhook_validation.go b/service/slice_config_webhook_validation.go index 99ba653ae..b2e33dc06 100644 --- a/service/slice_config_webhook_validation.go +++ b/service/slice_config_webhook_validation.go @@ -59,6 +59,9 @@ func ValidateSliceConfigCreate(ctx context.Context, sliceConfig *controllerv1alp if err := validateMaxClusterCount(sliceConfig); err != nil { return nil, apierrors.NewInvalid(schema.GroupKind{Group: apiGroupKubeSliceControllers, Kind: "SliceConfig"}, sliceConfig.Name, field.ErrorList{err}) } + if err := validateTopologyConfig(sliceConfig.Spec.TopologyConfig, sliceConfig.Spec.Clusters); err != nil { + return nil, apierrors.NewInvalid(schema.GroupKind{Group: apiGroupKubeSliceControllers, Kind: "SliceConfig"}, sliceConfig.Name, field.ErrorList{err}) + } if sliceConfig.Spec.OverlayNetworkDeploymentMode != controllerv1alpha1.NONET { if err := validateSliceSubnet(sliceConfig); err != nil { return nil, apierrors.NewInvalid(schema.GroupKind{Group: apiGroupKubeSliceControllers, Kind: "SliceConfig"}, sliceConfig.Name, field.ErrorList{err}) @@ -106,7 +109,9 @@ func ValidateSliceConfigUpdate(ctx context.Context, sliceConfig *controllerv1alp if err := validateNamespaceIsolationProfile(sliceConfig); err != nil { return nil, apierrors.NewInvalid(schema.GroupKind{Group: apiGroupKubeSliceControllers, Kind: "SliceConfig"}, sliceConfig.Name, field.ErrorList{err}) } - // Validate single/multi overlay network deployment mode specific fields + if err := validateTopologyConfig(sliceConfig.Spec.TopologyConfig, sliceConfig.Spec.Clusters); err != nil { + return nil, apierrors.NewInvalid(schema.GroupKind{Group: apiGroupKubeSliceControllers, Kind: "SliceConfig"}, sliceConfig.Name, field.ErrorList{err}) + } if sliceConfig.Spec.OverlayNetworkDeploymentMode != controllerv1alpha1.NONET { if err := validateSliceSubnet(sliceConfig); err != nil { return nil, apierrors.NewInvalid(schema.GroupKind{Group: apiGroupKubeSliceControllers, Kind: "SliceConfig"}, sliceConfig.Name, field.ErrorList{err}) @@ -676,3 +681,116 @@ func checkIfQoSConfigExists(ctx context.Context, namespace string, qosProfileNam } return found } + +func validateTopologyConfig(topology *controllerv1alpha1.TopologyConfig, clusters []string) *field.Error { + if topology == nil { + return nil + } + clusterSet := make(map[string]struct{}, len(clusters)) + for _, c := range clusters { + clusterSet[c] = struct{}{} + } + topologyPath := field.NewPath("spec", "topologyConfig") + switch topology.TopologyType { + case controllerv1alpha1.TopologyCustom: + if err := validateCustomTopology(topology.ConnectivityMatrix, clusterSet, topologyPath); err != nil { + return err + } + case controllerv1alpha1.TopologyRestricted: + if err := validateRestrictedTopology(topology, clusterSet, topologyPath); err != nil { + return err + } + case controllerv1alpha1.TopologyFullMesh, "": + default: + return field.Invalid(topologyPath.Child("topologyType"), topology.TopologyType, "must be one of: restricted, full-mesh, custom") + } + return validateForbiddenEdges(topology.ForbiddenEdges, clusterSet, topologyPath) +} + +func validateCustomTopology(matrix []controllerv1alpha1.ConnectivityEntry, clusterSet map[string]struct{}, basePath *field.Path) *field.Error { + matrixPath := basePath.Child("connectivityMatrix") + if len(matrix) == 0 { + return field.Required(matrixPath, "required for custom topology") + } + for i, entry := range matrix { + entryPath := matrixPath.Index(i) + if _, exists := clusterSet[entry.SourceCluster]; !exists { + return field.Invalid(entryPath.Child("sourceCluster"), entry.SourceCluster, "not in spec.clusters") + } + for j, target := range entry.TargetClusters { + if _, exists := clusterSet[target]; !exists { + return field.Invalid(entryPath.Child("targetClusters").Index(j), target, "not in spec.clusters") + } + } + } + return nil +} + +func validateRestrictedTopology(topology *controllerv1alpha1.TopologyConfig, clusterSet map[string]struct{}, basePath *field.Path) *field.Error { + if len(topology.ForbiddenEdges) == 0 { + return nil + } + + clusters := make([]string, 0, len(clusterSet)) + for c := range clusterSet { + clusters = append(clusters, c) + } + + forbidden := buildForbiddenSetStatic(topology.ForbiddenEdges) + + reachable := make(map[string]struct{}) + if len(clusters) > 0 { + queue := []string{clusters[0]} + reachable[clusters[0]] = struct{}{} + for len(queue) > 0 { + current := queue[0] + queue = queue[1:] + + for _, next := range clusters { + if next == current { + continue + } + if _, exists := reachable[next]; exists { + continue + } + key := current + "-" + next + if !forbidden[key] { + reachable[next] = struct{}{} + queue = append(queue, next) + } + } + } + } + + if len(reachable) != len(clusterSet) { + return field.Invalid(basePath, topology, "forbidden edges create isolated clusters") + } + + return nil +} + +func buildForbiddenSetStatic(forbiddenEdges []controllerv1alpha1.ForbiddenEdge) map[string]bool { + forbidden := make(map[string]bool) + for _, edge := range forbiddenEdges { + for _, target := range edge.TargetClusters { + forbidden[edge.SourceCluster+"-"+target] = true + } + } + return forbidden +} + +func validateForbiddenEdges(edges []controllerv1alpha1.ForbiddenEdge, clusterSet map[string]struct{}, basePath *field.Path) *field.Error { + edgesPath := basePath.Child("forbiddenEdges") + for i, edge := range edges { + entryPath := edgesPath.Index(i) + if _, exists := clusterSet[edge.SourceCluster]; !exists { + return field.Invalid(entryPath.Child("sourceCluster"), edge.SourceCluster, "not in spec.clusters") + } + for j, target := range edge.TargetClusters { + if _, exists := clusterSet[target]; !exists { + return field.Invalid(entryPath.Child("targetClusters").Index(j), target, "not in spec.clusters") + } + } + } + return nil +} diff --git a/service/slice_config_webhook_validation_test.go b/service/slice_config_webhook_validation_test.go index 303738117..367af4aa7 100644 --- a/service/slice_config_webhook_validation_test.go +++ b/service/slice_config_webhook_validation_test.go @@ -2316,3 +2316,289 @@ func setupSliceConfigWebhookValidationTest(name string, namespace string) (*util ctx := util.PrepareKubeSliceControllersRequestContext(context.Background(), clientMock, nil, "SliceConfigWebhookValidationServiceTest", nil) return clientMock, sliceConfig, ctx } + +func TestValidateTopologyConfig_FullMesh(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyFullMesh, + } + clusters := []string{"c1", "c2", "c3"} + + err := validateTopologyConfig(topology, clusters) + require.Nil(t, err) +} + +func TestValidateTopologyConfig_CustomMatrix(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyCustom, + ConnectivityMatrix: []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "c1", TargetClusters: []string{"c2", "c3"}}, + }, + } + clusters := []string{"c1", "c2", "c3"} + + err := validateTopologyConfig(topology, clusters) + require.Nil(t, err) +} + +func TestValidateTopologyConfig_CustomEmptyMatrix(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyCustom, + ConnectivityMatrix: []controllerv1alpha1.ConnectivityEntry{}, + } + clusters := []string{"c1", "c2"} + + err := validateTopologyConfig(topology, clusters) + require.NotNil(t, err) + require.Contains(t, err.Error(), "required for custom topology") +} + +func TestValidateTopologyConfig_InvalidClusterInMatrix(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyCustom, + ConnectivityMatrix: []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "invalid", TargetClusters: []string{"c2"}}, + }, + } + clusters := []string{"c1", "c2"} + + err := validateTopologyConfig(topology, clusters) + require.NotNil(t, err) + require.Contains(t, err.Error(), "not in spec.clusters") +} + +func TestValidateTopologyConfig_InvalidForbiddenEdge(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyRestricted, + ForbiddenEdges: []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "invalid", TargetClusters: []string{"c1"}}, + }, + } + clusters := []string{"c1", "c2"} + + err := validateTopologyConfig(topology, clusters) + require.NotNil(t, err) + require.Contains(t, err.Error(), "not in spec.clusters") +} + +func TestValidateTopologyConfig_InvalidType(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: "invalid-type", + } + clusters := []string{"c1", "c2"} + + err := validateTopologyConfig(topology, clusters) + require.NotNil(t, err) + require.Contains(t, err.Error(), "must be one of") +} + +func TestValidateTopologyConfig_RestrictedIsolatedClusters(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyRestricted, + ForbiddenEdges: []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "c1", TargetClusters: []string{"c2", "c3"}}, + {SourceCluster: "c2", TargetClusters: []string{"c1", "c3"}}, + {SourceCluster: "c3", TargetClusters: []string{"c1", "c2"}}, + }, + } + clusters := []string{"c1", "c2", "c3"} + + err := validateTopologyConfig(topology, clusters) + require.NotNil(t, err) + require.Contains(t, err.Error(), "isolated clusters") +} + +func TestValidateTopologyConfig_RestrictedPartiallyConnected(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyRestricted, + ForbiddenEdges: []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "c1", TargetClusters: []string{"c3"}}, + }, + } + clusters := []string{"c1", "c2", "c3"} + + err := validateTopologyConfig(topology, clusters) + require.Nil(t, err) +} + +func TestValidateTopologyConfig_NilTopology(t *testing.T) { + err := validateTopologyConfig(nil, []string{"c1", "c2"}) + require.Nil(t, err) +} + +func TestValidateTopologyConfig_EmptyTopologyType(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: "", + } + err := validateTopologyConfig(topology, []string{"c1", "c2"}) + require.Nil(t, err) +} + +func TestValidateCustomTopology_InvalidTargetCluster(t *testing.T) { + matrix := []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "c1", TargetClusters: []string{"invalid"}}, + } + clusterSet := map[string]struct{}{"c1": {}, "c2": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateCustomTopology(matrix, clusterSet, basePath) + require.NotNil(t, err) + require.Contains(t, err.Error(), "not in spec.clusters") +} + +func TestValidateCustomTopology_InvalidSourceCluster(t *testing.T) { + matrix := []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "invalid", TargetClusters: []string{"c2"}}, + } + clusterSet := map[string]struct{}{"c1": {}, "c2": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateCustomTopology(matrix, clusterSet, basePath) + require.NotNil(t, err) + require.Contains(t, err.Error(), "not in spec.clusters") +} + +func TestValidateCustomTopology_MultipleInvalidTargets(t *testing.T) { + matrix := []controllerv1alpha1.ConnectivityEntry{ + {SourceCluster: "c1", TargetClusters: []string{"c2", "invalid1", "invalid2"}}, + } + clusterSet := map[string]struct{}{"c1": {}, "c2": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateCustomTopology(matrix, clusterSet, basePath) + require.NotNil(t, err) + require.Contains(t, err.Error(), "not in spec.clusters") +} + +func TestValidateRestrictedTopology_EmptyForbiddenEdges(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyRestricted, + ForbiddenEdges: []controllerv1alpha1.ForbiddenEdge{}, + } + clusterSet := map[string]struct{}{"c1": {}, "c2": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateRestrictedTopology(topology, clusterSet, basePath) + require.Nil(t, err) +} + +func TestValidateRestrictedTopology_SingleCluster(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyRestricted, + ForbiddenEdges: []controllerv1alpha1.ForbiddenEdge{}, + } + clusterSet := map[string]struct{}{"c1": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateRestrictedTopology(topology, clusterSet, basePath) + require.Nil(t, err) +} + +func TestValidateRestrictedTopology_FullyDisconnected(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyRestricted, + ForbiddenEdges: []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "c1", TargetClusters: []string{"c2"}}, + {SourceCluster: "c2", TargetClusters: []string{"c1"}}, + }, + } + clusterSet := map[string]struct{}{"c1": {}, "c2": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateRestrictedTopology(topology, clusterSet, basePath) + require.NotNil(t, err) + require.Contains(t, err.Error(), "isolated clusters") +} + +func TestValidateRestrictedTopology_FourClustersOneIsolated(t *testing.T) { + topology := &controllerv1alpha1.TopologyConfig{ + TopologyType: controllerv1alpha1.TopologyRestricted, + ForbiddenEdges: []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "c1", TargetClusters: []string{"c4"}}, + {SourceCluster: "c2", TargetClusters: []string{"c4"}}, + {SourceCluster: "c3", TargetClusters: []string{"c4"}}, + {SourceCluster: "c4", TargetClusters: []string{"c1", "c2", "c3"}}, + }, + } + clusterSet := map[string]struct{}{"c1": {}, "c2": {}, "c3": {}, "c4": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateRestrictedTopology(topology, clusterSet, basePath) + require.NotNil(t, err) + require.Contains(t, err.Error(), "isolated clusters") +} + +func TestValidateForbiddenEdges_EmptyEdges(t *testing.T) { + edges := []controllerv1alpha1.ForbiddenEdge{} + clusterSet := map[string]struct{}{"c1": {}, "c2": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateForbiddenEdges(edges, clusterSet, basePath) + require.Nil(t, err) +} + +func TestValidateForbiddenEdges_MultipleTargets(t *testing.T) { + edges := []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "c1", TargetClusters: []string{"c2", "c3"}}, + } + clusterSet := map[string]struct{}{"c1": {}, "c2": {}, "c3": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateForbiddenEdges(edges, clusterSet, basePath) + require.Nil(t, err) +} + +func TestValidateForbiddenEdges_InvalidSourceCluster(t *testing.T) { + edges := []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "invalid", TargetClusters: []string{"c2"}}, + } + clusterSet := map[string]struct{}{"c1": {}, "c2": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateForbiddenEdges(edges, clusterSet, basePath) + require.NotNil(t, err) + require.Contains(t, err.Error(), "not in spec.clusters") +} + +func TestValidateForbiddenEdges_InvalidTargetCluster(t *testing.T) { + edges := []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "c1", TargetClusters: []string{"invalid"}}, + } + clusterSet := map[string]struct{}{"c1": {}, "c2": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateForbiddenEdges(edges, clusterSet, basePath) + require.NotNil(t, err) + require.Contains(t, err.Error(), "not in spec.clusters") +} + +func TestValidateForbiddenEdges_InvalidTargetInList(t *testing.T) { + edges := []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "c1", TargetClusters: []string{"c2", "invalid", "c3"}}, + } + clusterSet := map[string]struct{}{"c1": {}, "c2": {}, "c3": {}} + basePath := field.NewPath("spec", "topologyConfig") + + err := validateForbiddenEdges(edges, clusterSet, basePath) + require.NotNil(t, err) + require.Contains(t, err.Error(), "not in spec.clusters") + require.Contains(t, err.Error(), "targetClusters[1]") +} + +func TestBuildForbiddenSetStatic_MultipleEdgesMultipleTargets(t *testing.T) { + edges := []controllerv1alpha1.ForbiddenEdge{ + {SourceCluster: "c1", TargetClusters: []string{"c2", "c3"}}, + {SourceCluster: "c2", TargetClusters: []string{"c4"}}, + } + + forbidden := buildForbiddenSetStatic(edges) + require.Len(t, forbidden, 3) + require.True(t, forbidden["c1-c2"]) + require.True(t, forbidden["c1-c3"]) + require.True(t, forbidden["c2-c4"]) +} + +func TestBuildForbiddenSetStatic_Empty(t *testing.T) { + forbidden := buildForbiddenSetStatic([]controllerv1alpha1.ForbiddenEdge{}) + require.Empty(t, forbidden) +} + diff --git a/service/worker_slice_gateway_service.go b/service/worker_slice_gateway_service.go index 1da5079f1..d068cb47c 100644 --- a/service/worker_slice_gateway_service.go +++ b/service/worker_slice_gateway_service.go @@ -46,7 +46,8 @@ const gatewayName = "%s-%s-%s" type IWorkerSliceGatewayService interface { ReconcileWorkerSliceGateways(ctx context.Context, req ctrl.Request) (ctrl.Result, error) CreateMinimumWorkerSliceGateways(ctx context.Context, sliceName string, clusterNames []string, namespace string, - label map[string]string, clusterMap map[string]int, sliceSubnet string, clusterCidr string, sliceGwSvcTypeMap map[string]*controllerv1alpha1.SliceGatewayServiceType) (ctrl.Result, error) + label map[string]string, clusterMap map[string]int, sliceSubnet string, clusterCidr string, + sliceGwSvcTypeMap map[string]*controllerv1alpha1.SliceGatewayServiceType, gatewayPairs []util.GatewayPair) (ctrl.Result, error) ListWorkerSliceGateways(ctx context.Context, ownerLabel map[string]string, namespace string) ([]v1alpha1.WorkerSliceGateway, error) DeleteWorkerSliceGatewaysByLabel(ctx context.Context, label map[string]string, namespace string) error NodeIpReconciliationOfWorkerSliceGateways(ctx context.Context, cluster *controllerv1alpha1.Cluster, namespace string) error @@ -348,9 +349,9 @@ type IndividualCertPairRequest struct { // CreateMinimumWorkerSliceGateways is a function to create gateways with minimum specification func (s *WorkerSliceGatewayService) CreateMinimumWorkerSliceGateways(ctx context.Context, sliceName string, clusterNames []string, namespace string, label map[string]string, clusterMap map[string]int, - sliceSubnet string, clusterCidr string, sliceGwSvcTypeMap map[string]*controllerv1alpha1.SliceGatewayServiceType) (ctrl.Result, error) { + sliceSubnet string, clusterCidr string, sliceGwSvcTypeMap map[string]*controllerv1alpha1.SliceGatewayServiceType, gatewayPairs []util.GatewayPair) (ctrl.Result, error) { - err := s.cleanupObsoleteGateways(ctx, namespace, label, clusterNames, clusterMap) + err := s.cleanupObsoleteGateways(ctx, namespace, label, clusterNames, clusterMap, gatewayPairs) if err != nil { return ctrl.Result{}, err } @@ -358,7 +359,7 @@ func (s *WorkerSliceGatewayService) CreateMinimumWorkerSliceGateways(ctx context return ctrl.Result{}, nil } - _, err = s.createMinimumGatewaysIfNotExists(ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap) + _, err = s.createMinimumGatewaysIfNotExists(ctx, sliceName, clusterNames, namespace, label, clusterMap, sliceSubnet, clusterCidr, sliceGwSvcTypeMap, gatewayPairs) if err != nil { return ctrl.Result{}, err } @@ -378,7 +379,7 @@ func (s *WorkerSliceGatewayService) ListWorkerSliceGateways(ctx context.Context, // cleanupObsoleteGateways is a function delete outdated gateways func (s *WorkerSliceGatewayService) cleanupObsoleteGateways(ctx context.Context, namespace string, ownerLabel map[string]string, - clusters []string, clusterMap map[string]int) error { + clusters []string, clusterMap map[string]int, gatewayPairs []util.GatewayPair) error { gateways, err := s.ListWorkerSliceGateways(ctx, ownerLabel, namespace) if err != nil { @@ -401,11 +402,21 @@ func (s *WorkerSliceGatewayService) cleanupObsoleteGateways(ctx context.Context, clusterExistMap[cluster] = true } + // Create a map of valid gateway pairs + validPairs := make(map[string]bool) + for _, pair := range gatewayPairs { + validPairs[pair.Source+"-"+pair.Target] = true + } + for _, gateway := range gateways { clusterSource := gateway.Spec.LocalGatewayConfig.ClusterName clusterDestination := gateway.Spec.RemoteGatewayConfig.ClusterName gatewayExpectedNumber := s.calculateGatewayNumber(clusterMap[clusterSource], clusterMap[clusterDestination]) - if !clusterExistMap[clusterSource] || !clusterExistMap[clusterDestination] || gatewayExpectedNumber != gateway.Spec.GatewayNumber { + + // Check if the pair is valid in the current topology + isValidPair := validPairs[clusterSource+"-"+clusterDestination] + + if !clusterExistMap[clusterSource] || !clusterExistMap[clusterDestination] || gatewayExpectedNumber != gateway.Spec.GatewayNumber || !isValidPair { err = util.DeleteResource(ctx, &gateway) if err != nil { //Register an event for worker slice gateway deletion failure @@ -438,9 +449,10 @@ func (s *WorkerSliceGatewayService) cleanupObsoleteGateways(ctx context.Context, // createMinimumGatewaysIfNotExists is a helper function to create the gateways between worker clusters if not exists func (s *WorkerSliceGatewayService) createMinimumGatewaysIfNotExists(ctx context.Context, sliceName string, clusterNames []string, namespace string, ownerLabel map[string]string, clusterMap map[string]int, - sliceSubnet string, clusterCidr string, sliceGwSvcTypeMap map[string]*controllerv1alpha1.SliceGatewayServiceType) (ctrl.Result, error) { - noClusters := len(clusterNames) + sliceSubnet string, clusterCidr string, sliceGwSvcTypeMap map[string]*controllerv1alpha1.SliceGatewayServiceType, gatewayPairs []util.GatewayPair) (ctrl.Result, error) { logger := util.CtxLogger(ctx) + + // Build cluster mapping clusterMapping := map[string]*controllerv1alpha1.Cluster{} for _, clusterName := range clusterNames { cluster := controllerv1alpha1.Cluster{} @@ -450,28 +462,42 @@ func (s *WorkerSliceGatewayService) createMinimumGatewaysIfNotExists(ctx context } clusterMapping[clusterName] = &cluster } - for i := 0; i < noClusters; i++ { - for j := i + 1; j < noClusters; j++ { - sourceCluster, destinationCluster := clusterMapping[clusterNames[i]], clusterMapping[clusterNames[j]] - gatewayNumber := s.calculateGatewayNumber(clusterMap[sourceCluster.Name], clusterMap[destinationCluster.Name]) - gatewayAddresses := s.BuildNetworkAddresses(sliceSubnet, sourceCluster.Name, destinationCluster.Name, clusterMap, clusterCidr) - // determine the gateway svc parameters - sliceGwSvcType := defaultSliceGatewayServiceType - gwSvcProtocol := defaultSliceGatewayServiceProtocol - if val, exists := sliceGwSvcTypeMap[sourceCluster.Name]; exists { - sliceGwSvcType = val.Type - gwSvcProtocol = val.Protocol - } - logger.Debugf("setting gwConType in create_minwsg %s", sliceGwSvcType) - logger.Debugf("setting gwProto in create_minwsg %s", gwSvcProtocol) - err := s.createMinimumGateWayPairIfNotExists(ctx, sourceCluster, destinationCluster, sliceName, namespace, sliceGwSvcType, gwSvcProtocol, ownerLabel, gatewayNumber, gatewayAddresses) - if err != nil { - return ctrl.Result{}, err - } + + logger.Infof("Creating %d gateway pairs for slice %s", len(gatewayPairs), sliceName) + + // Create gateways for each resolved pair + for _, pair := range gatewayPairs { + sourceCluster := clusterMapping[pair.Source] + destinationCluster := clusterMapping[pair.Target] + + if sourceCluster == nil || destinationCluster == nil { + logger.Errorf("Cluster not found in mapping: source=%s, dest=%s", pair.Source, pair.Target) + continue + } + + gatewayNumber := s.calculateGatewayNumber(clusterMap[sourceCluster.Name], clusterMap[destinationCluster.Name]) + gatewayAddresses := s.BuildNetworkAddresses(sliceSubnet, sourceCluster.Name, destinationCluster.Name, clusterMap, clusterCidr) + + // Determine the gateway svc parameters + sliceGwSvcType := defaultSliceGatewayServiceType + gwSvcProtocol := defaultSliceGatewayServiceProtocol + if val, exists := sliceGwSvcTypeMap[sourceCluster.Name]; exists { + sliceGwSvcType = val.Type + gwSvcProtocol = val.Protocol + } + + logger.Debugf("Creating gateway pair: %s <-> %s (type=%s, proto=%s)", + sourceCluster.Name, destinationCluster.Name, sliceGwSvcType, gwSvcProtocol) + + err := s.createMinimumGateWayPairIfNotExists(ctx, sourceCluster, destinationCluster, sliceName, namespace, + sliceGwSvcType, gwSvcProtocol, ownerLabel, gatewayNumber, gatewayAddresses) + if err != nil { + return ctrl.Result{}, err } } - return ctrl.Result{}, nil + logger.Infof("Successfully created %d gateway pairs for slice %s", len(gatewayPairs), sliceName) + return ctrl.Result{}, nil } // createMinimumGateWayPairIfNotExists is a function to create the pair of gatways between 2 clusters if not exists diff --git a/service/worker_slice_gateway_service_test.go b/service/worker_slice_gateway_service_test.go index f09906b8f..f42907b1d 100644 --- a/service/worker_slice_gateway_service_test.go +++ b/service/worker_slice_gateway_service_test.go @@ -320,7 +320,7 @@ func testCreateMinimumWorkerSliceGatewaysAlreadyExists(t *testing.T) { //environment := make(map[string]string, 5) //jobMock.On("CreateJob", ctx, requestObj.Namespace, "image", environment).Return(ctrl.Result{}, nil).Once() - result, err := workerSliceGatewayService.CreateMinimumWorkerSliceGateways(ctx, "red", clusterNames, requestObj.Namespace, label, clusterMap, "10.10.10.10/16", "/16", nil) + result, err := workerSliceGatewayService.CreateMinimumWorkerSliceGateways(ctx, "red", clusterNames, requestObj.Namespace, label, clusterMap, "10.10.10.10/16", "/16", nil, []util.GatewayPair{{Source: "cluster-1", Target: "cluster-2"}}) expectedResult := ctrl.Result{} require.NoError(t, nil) require.Equal(t, result, expectedResult) @@ -407,7 +407,7 @@ func testCreateMinimumWorkerSliceGatewaysNotExists(t *testing.T) { clientMock.On("Update", ctx, mock.AnythingOfType("*v1.Event")).Return(nil).Once() clientMock.On("Get", ctx, mock.Anything, mock.Anything).Return(nil).Once() mMock.On("RecordCounterMetric", mock.Anything, mock.Anything).Return().Once() - result, err := workerSliceGatewayService.CreateMinimumWorkerSliceGateways(ctx, "red", clusterNames, requestObj.Namespace, label, clusterMap, "10.10.10.10/16", "/16", nil) + result, err := workerSliceGatewayService.CreateMinimumWorkerSliceGateways(ctx, "red", clusterNames, requestObj.Namespace, label, clusterMap, "10.10.10.10/16", "/16", nil, []util.GatewayPair{{Source: "cluster-1", Target: "cluster-2"}}) expectedResult := ctrl.Result{} require.NoError(t, nil) require.Equal(t, result, expectedResult) @@ -551,7 +551,6 @@ func testNodeIpReconciliationOfWorkerSliceGatewaysExists(t *testing.T) { Annotations: nil, OwnerReferences: nil, Finalizers: nil, - ClusterName: "", ManagedFields: nil, }, Spec: controllerv1alpha1.ClusterSpec{}, diff --git a/util/common.go b/util/common.go index 72c2f72b2..311737750 100644 --- a/util/common.go +++ b/util/common.go @@ -38,6 +38,11 @@ type WorkerSliceGatewayNetworkAddresses struct { ClientVpnAddress string } +type GatewayPair struct { + Source string + Target string +} + // AppendHyphenToString is a function add hyphen at the end of string func AppendHyphenToString(stringToAppend string) string { if strings.HasSuffix(stringToAppend, "-") {