diff --git a/Makefile b/Makefile index 4a60e3f2a8..89b54ae83d 100644 --- a/Makefile +++ b/Makefile @@ -12,17 +12,17 @@ IMAGE_ORG ?= $(USER) REPO ?= quay.io/$(IMAGE_ORG) # Component versions to use in bundle / release (do not use $VERSION for that) -PREVIOUS_VERSION ?= v1.6.0-community +PREVIOUS_VERSION ?= v1.6.1-community -BUNDLE_VERSION ?= 1.6.1-community +BUNDLE_VERSION ?= 1.8.0-community #File based catalog -FBC_VERSION ?= 1.6.1-community +FBC_VERSION ?= 1.8.0-community # console plugin -export PLG_VERSION ?= v1.6.1-community +export PLG_VERSION ?= v1.8.0-community # flowlogs-pipeline -export FLP_VERSION ?= v1.6.1-community +export FLP_VERSION ?= v1.8.0-community # eBPF agent -export BPF_VERSION ?= v1.6.1-community +export BPF_VERSION ?= v1.8.0-community # Allows building bundles in Mac replacing BSD 'sed' command by GNU-compatible 'gsed' ifeq (,$(shell which gsed 2>/dev/null)) diff --git a/RELEASE.md b/RELEASE.md index 07d02f0d9c..508f1e4bc7 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -11,7 +11,7 @@ To release them, a tag in the format "v1.6.0-community" or "v1.6.0-crc0" must be E.g: ```bash -version="v1.6.1-community" +version="v1.8.0-community" git tag -a "$version" -m "$version" git push upstream --tags ``` @@ -36,7 +36,7 @@ Edit the [Makefile](./Makefile) to update `PREVIOUS_VERSION`, `BUNDLE_VERSION`, make update-bundle # Set desired operator version - CAREFUL, no leading "v" here -version="1.6.1-community" +version="1.8.0-community" vv=v$version test_branch=test-$vv @@ -84,7 +84,7 @@ Click on "Publish release". Before publishing, we should check that upgrading the operator from a previous version isn't broken. We can use `operator-sdk` for that: ```bash -previous=v1.6.0-community +previous=v1.6.1-community bin/operator-sdk run bundle quay.io/netobserv/network-observability-operator-bundle:$previous --timeout 5m PORT_FWD=false make deploy-loki deploy-sample-cr bin/operator-sdk run bundle-upgrade quay.io/netobserv/network-observability-operator-bundle:$vv --timeout 5m diff --git a/apis/flowcollector/v1beta1/flowcollector_types.go b/apis/flowcollector/v1beta1/flowcollector_types.go index 78e45fb195..12624823ec 100644 --- a/apis/flowcollector/v1beta1/flowcollector_types.go +++ b/apis/flowcollector/v1beta1/flowcollector_types.go @@ -154,7 +154,7 @@ type FlowCollectorIPFIX struct { // - `NetworkEvents`, to track Network events.
// - `PacketTranslation`, to enrich flows with packets translation information.
// - `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
-// - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+// - `UDNMapping`, to enable interfaces mapping to udn [Developer Preview].
// +kubebuilder:validation:Enum:="PacketDrop";"DNSTracking";"FlowRTT";"NetworkEvents";"PacketTranslation";"EbpfManager";"UDNMapping" type AgentFeature string @@ -272,13 +272,15 @@ type EBPFFlowFilter struct { // Set `enable` to `true` to enable the eBPF flow filtering feature. Enable *bool `json:"enable,omitempty"` - // [deprecated (*)] this setting is not used anymore. + // [deprecated (*)] this setting is not used anymore. It is replaced with the `rules` list. EBPFFlowFilterRule `json:",inline"` - // `flowFilterRules` defines a list of ebpf agent flow filtering rules + // `rules` defines a list of filtering rules on the eBPF Agents. + // When filtering is enabled, by default, flows that don't match any rule are rejected. + // To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules. // +kubebuilder:validation:MinItems:=1 // +kubebuilder:validation:MaxItems:=16 - FlowFilterRules []EBPFFlowFilterRule `json:"rules,omitempty"` + Rules []EBPFFlowFilterRule `json:"rules,omitempty"` } // `FlowCollectorEBPF` defines a FlowCollector that uses eBPF to collect the flows information @@ -364,7 +366,7 @@ type FlowCollectorEBPF struct { // the kernel debug filesystem, so the eBPF pod has to run as privileged. // - `PacketTranslation`: enable enriching flows with packet's translation information.
// - `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- // - `UDNMapping`, to enable interfaces mappind to udn.
+ // - `UDNMapping`, to enable interfaces mapping to udn.
// +optional Features []AgentFeature `json:"features,omitempty"` diff --git a/apis/flowcollector/v1beta1/zz_generated.conversion.go b/apis/flowcollector/v1beta1/zz_generated.conversion.go index 96f30bc6ba..aa0ccb4a14 100644 --- a/apis/flowcollector/v1beta1/zz_generated.conversion.go +++ b/apis/flowcollector/v1beta1/zz_generated.conversion.go @@ -530,7 +530,7 @@ func autoConvert_v1beta1_EBPFFlowFilter_To_v1beta2_EBPFFlowFilter(in *EBPFFlowFi if err := Convert_v1beta1_EBPFFlowFilterRule_To_v1beta2_EBPFFlowFilterRule(&in.EBPFFlowFilterRule, &out.EBPFFlowFilterRule, s); err != nil { return err } - out.FlowFilterRules = *(*[]v1beta2.EBPFFlowFilterRule)(unsafe.Pointer(&in.FlowFilterRules)) + out.Rules = *(*[]v1beta2.EBPFFlowFilterRule)(unsafe.Pointer(&in.Rules)) return nil } @@ -544,7 +544,7 @@ func autoConvert_v1beta2_EBPFFlowFilter_To_v1beta1_EBPFFlowFilter(in *v1beta2.EB if err := Convert_v1beta2_EBPFFlowFilterRule_To_v1beta1_EBPFFlowFilterRule(&in.EBPFFlowFilterRule, &out.EBPFFlowFilterRule, s); err != nil { return err } - out.FlowFilterRules = *(*[]EBPFFlowFilterRule)(unsafe.Pointer(&in.FlowFilterRules)) + out.Rules = *(*[]EBPFFlowFilterRule)(unsafe.Pointer(&in.Rules)) return nil } diff --git a/apis/flowcollector/v1beta1/zz_generated.deepcopy.go b/apis/flowcollector/v1beta1/zz_generated.deepcopy.go index e79aca3d57..50bbbdbc53 100644 --- a/apis/flowcollector/v1beta1/zz_generated.deepcopy.go +++ b/apis/flowcollector/v1beta1/zz_generated.deepcopy.go @@ -131,8 +131,8 @@ func (in *EBPFFlowFilter) DeepCopyInto(out *EBPFFlowFilter) { **out = **in } in.EBPFFlowFilterRule.DeepCopyInto(&out.EBPFFlowFilterRule) - if in.FlowFilterRules != nil { - in, out := &in.FlowFilterRules, &out.FlowFilterRules + if in.Rules != nil { + in, out := &in.Rules, &out.Rules *out = make([]EBPFFlowFilterRule, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) diff --git a/apis/flowcollector/v1beta2/flowcollector_types.go b/apis/flowcollector/v1beta2/flowcollector_types.go index 16a386539f..885170817d 100644 --- a/apis/flowcollector/v1beta2/flowcollector_types.go +++ b/apis/flowcollector/v1beta2/flowcollector_types.go @@ -80,7 +80,7 @@ type FlowCollectorSpec struct { // +optional Kafka FlowCollectorKafka `json:"kafka,omitempty"` - // `exporters` define additional optional exporters for custom consumption or storage. + // `exporters` defines additional optional exporters for custom consumption or storage. // +optional // +k8s:conversion-gen=false Exporters []*FlowCollectorExporter `json:"exporters"` @@ -174,10 +174,10 @@ type FlowCollectorIPFIX struct { // - `PacketDrop`, to track packet drops.
// - `DNSTracking`, to track specific information on DNS traffic.
// - `FlowRTT`, to track TCP latency.
-// - `NetworkEvents`, to track Network events [Developer Preview].
-// - `PacketTranslation`, to enrich flows with packets translation information.
-// - `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
-// - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+// - `NetworkEvents`, to track network events [Technology Preview].
+// - `PacketTranslation`, to enrich flows with packets translation information, such as Service NAT.
+// - `EbpfManager`, to enable using eBPF Manager to manage NetObserv eBPF programs. [Unsupported (*)].
+// - `UDNMapping`, to enable interfaces mapping to UDN. [Unsupported (*)].
// +kubebuilder:validation:Enum:="PacketDrop";"DNSTracking";"FlowRTT";"NetworkEvents";"PacketTranslation";"EbpfManager";"UDNMapping" type AgentFeature string @@ -285,7 +285,7 @@ type EBPFFlowFilterRule struct { // +optional PktDrops *bool `json:"pktDrops,omitempty"` - // `sampling` sampling rate for the matched flow + // `sampling` sampling rate for the matched flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`. // +optional Sampling *uint32 `json:"sampling,omitempty"` } @@ -295,13 +295,16 @@ type EBPFFlowFilter struct { // Set `enable` to `true` to enable the eBPF flow filtering feature. Enable *bool `json:"enable,omitempty"` - // [deprecated (*)] this setting is not used anymore. + // [Deprecated (*)]. This setting is not used anymore. It is replaced with the `rules` list. EBPFFlowFilterRule `json:",inline"` - // `flowFilterRules` defines a list of ebpf agent flow filtering rules + // `rules` defines a list of filtering rules on the eBPF Agents. + // When filtering is enabled, by default, flows that don't match any rule are rejected. + // To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules. + // [Unsupported (*)]. // +kubebuilder:validation:MinItems:=1 // +kubebuilder:validation:MaxItems:=16 - FlowFilterRules []EBPFFlowFilterRule `json:"rules,omitempty"` + Rules []EBPFFlowFilterRule `json:"rules,omitempty"` } // `FlowCollectorEBPF` defines a FlowCollector that uses eBPF to collect the flows information @@ -378,22 +381,20 @@ type FlowCollectorEBPF struct { Advanced *AdvancedAgentConfig `json:"advanced,omitempty"` // List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- // - `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting - // the kernel debug filesystem, so the eBPF agent pods have to run as privileged. + // - `PacketDrop`: Enable the packets drop flows logging feature. This feature requires mounting + // the kernel debug filesystem, so the eBPF agent pods must run as privileged. // If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- // - `DNSTracking`: enable the DNS tracking feature.
- // - `FlowRTT`: enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
- // - `NetworkEvents`: enable the network events monitoring feature, such as correlating flows and network policies. - // This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged. + // - `DNSTracking`: Enable the DNS tracking feature.
+ // - `FlowRTT`: Enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
+ // - `NetworkEvents`: Enable the network events monitoring feature, such as correlating flows and network policies. + // This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged. // It requires using the OVN-Kubernetes network plugin with the Observability feature. - // IMPORTANT: This feature is available as a Developer Preview.
- // - `PacketTranslation`: enable enriching flows with packet's translation information.
- // - `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- // IMPORTANT: This feature is available as a Developer Preview.
- // - `UDNMapping`, to enable interfaces mappind to udn.
- // This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged. + // IMPORTANT: This feature is available as a Technology Preview.
+ // - `PacketTranslation`: Enable enriching flows with packet translation information, such as Service NAT.
+ // - `EbpfManager`: [Unsupported (*)]. Use eBPF Manager to manage NetObserv eBPF programs. Pre-requisite: the eBPF Manager operator (or upstream bpfman operator) must be installed.
+ // - `UDNMapping`: [Unsupported (*)]. Enable interfaces mapping to User Defined Networks (UDN).
+ // This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged. // It requires using the OVN-Kubernetes network plugin with the Observability feature. - // IMPORTANT: This feature is available as a Developer Preview.
// +optional Features []AgentFeature `json:"features,omitempty"` @@ -647,7 +648,7 @@ type FlowCollectorFLP struct { KafkaConsumerBatchSize int `json:"kafkaConsumerBatchSize"` // `logTypes` defines the desired record types to generate. Possible values are:
- // - `Flows` (default) to export regular network flows.
+ // - `Flows` to export regular network flows. This is the default.
// - `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" updates.
// - `EndedConversations` to generate only ended conversations events.
// - `All` to generate both network flows and all conversations events. It is not recommended due to the impact on resources footprint.
@@ -676,13 +677,15 @@ type FlowCollectorFLP struct { SubnetLabels SubnetLabels `json:"subnetLabels,omitempty"` //+optional - // `deduper` allows to sample or drop flows identified as duplicates, in order to save on resource usage. - // IMPORTANT: This feature is available as a Developer Preview. + // `deduper` allows you to sample or drop flows identified as duplicates, in order to save on resource usage. + // [Unsupported (*)]. Deduper *FLPDeduper `json:"deduper,omitempty"` // +optional - // `filters` let you define custom filters to limit the amount of generated flows. - // IMPORTANT: This feature is available as a Developer Preview. + // `filters` lets you define custom filters to limit the amount of generated flows. + // These filters provide more flexibility than the eBPF Agent filters (in `spec.agent.ebpf.flowFilter`), such as allowing to filter by Kubernetes namespace, + // but with a lesser improvement in performance. + // [Unsupported (*)]. Filters []FLPFilterSet `json:"filters"` // `advanced` allows setting some aspects of the internal configuration of the flow processor. @@ -700,11 +703,11 @@ const ( FLPDeduperSample FLPDeduperMode = "Sample" ) -// `FLPDeduper` defines the desired configuration for FLP-based deduper +// `FLPDeduper` defines the desired configuration for FLP-based deduper. type FLPDeduper struct { // Set the Processor de-duplication mode. It comes in addition to the Agent-based deduplication because the Agent cannot de-duplicate same flows reported from different nodes.
- // - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially loosing some information such as the network interfaces used from peer, or network events.
- // - Use `Sample` to randomly keep only 1 flow on 50 (by default) among the ones considered as duplicates. This is a compromise between dropping every duplicates or keeping every duplicates. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling are 50, the combined sampling is 1:2500.
+ // - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially losing some information such as the network interfaces used from peer, or network events.
+ // - Use `Sample` to randomly keep only one flow on 50, which is the default, among the ones considered as duplicates. This is a compromise between dropping every duplicate or keeping every duplicate. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling values are `50`, the combined sampling is 1:2500.
// - Use `Disabled` to turn off Processor-based de-duplication.
// +kubebuilder:validation:Enum:="Disabled";"Drop";"Sample" // +kubebuilder:default:=Disabled @@ -732,13 +735,13 @@ const ( FLPFilterTargetExporters FLPFilterTarget = "Exporters" ) -// `FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions +// `FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions. type FLPFilterSet struct { // `filters` is a list of matches that must be all satisfied in order to remove a flow. // +optional AllOf []FLPSingleFilter `json:"allOf"` - // If specified, this filters only target a single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted. + // If specified, these filters only target a single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted. // +optional // +kubebuilder:validation:Enum:="";"Loki";"Metrics";"Exporters" OutputTarget FLPFilterTarget `json:"outputTarget,omitempty"` @@ -749,15 +752,15 @@ type FLPFilterSet struct { Sampling int32 `json:"sampling,omitempty"` } -// `FLPSingleFilter` defines the desired configuration for a single FLP-based filter +// `FLPSingleFilter` defines the desired configuration for a single FLP-based filter. type FLPSingleFilter struct { - // Type of matching to apply + // Type of matching to apply. // +kubebuilder:validation:Enum:="Equal";"NotEqual";"Presence";"Absence";"MatchRegex";"NotMatchRegex" // +kubebuilder:default:="Equal" MatchType FLPFilterMatch `json:"matchType"` - // Name of the field to filter on - // Refer to the documentation for the list of available fields: https://docs.openshift.com/container-platform/latest/observability/network_observability/json-flows-format-reference.html. + // Name of the field to filter on. + // Refer to the documentation for the list of available fields: https://github.com/netobserv/network-observability-operator/blob/main/docs/flows-format.adoc. // +required Field string `json:"field"` @@ -1327,7 +1330,7 @@ type AdvancedProcessorConfig struct { // +optional Scheduling *SchedulingConfig `json:"scheduling,omitempty"` - // Define secondary networks to be checked for resources identification. + // Defines secondary networks to be checked for resources identification. // To guarantee a correct identification, indexed values must form an unique identifier across the cluster. // If the same index is used by several resources, those resources might be incorrectly labeled. // +optional @@ -1417,7 +1420,7 @@ type AdvancedPluginConfig struct { Scheduling *SchedulingConfig `json:"scheduling,omitempty"` } -// `SubnetLabels` allows to define custom labels on subnets and IPs or to enable automatic labelling of recognized subnets in OpenShift. +// `SubnetLabels` allows you to define custom labels on subnets and IPs or to enable automatic labelling of recognized subnets in OpenShift. type SubnetLabels struct { // `openShiftAutoDetect` allows, when set to `true`, to detect automatically the machines, pods and services subnets based on the // OpenShift install configuration and the Cluster Network Operator configuration. Indirectly, this is a way to accurately detect @@ -1475,7 +1478,7 @@ type FlowCollectorExporter struct { type FlowCollectorStatus struct { // Important: Run "make" to regenerate code after modifying this file - // `conditions` represent the latest available observations of an object's state + // `conditions` represents the latest available observations of an object's state Conditions []metav1.Condition `json:"conditions"` // Namespace where console plugin and flowlogs-pipeline have been deployed. diff --git a/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go b/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go index 3d9e66c828..480e0fc4a4 100644 --- a/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go +++ b/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go @@ -119,8 +119,8 @@ func (r *FlowCollector) validateAgent(_ context.Context, fc *FlowCollectorSpec) var errs []error if fc.Agent.EBPF.FlowFilter != nil && fc.Agent.EBPF.FlowFilter.Enable != nil && *fc.Agent.EBPF.FlowFilter.Enable { m := make(map[string]bool) - for i := range fc.Agent.EBPF.FlowFilter.FlowFilterRules { - rule := fc.Agent.EBPF.FlowFilter.FlowFilterRules[i] + for i := range fc.Agent.EBPF.FlowFilter.Rules { + rule := fc.Agent.EBPF.FlowFilter.Rules[i] key := rule.CIDR + "-" + rule.PeerCIDR if found := m[key]; found { errs = append(errs, fmt.Errorf("flow filter rule CIDR and PeerCIDR %s already exists", diff --git a/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go b/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go index 0ff2dac08a..02c1d3993b 100644 --- a/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go +++ b/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go @@ -44,7 +44,7 @@ func TestValidateAgent(t *testing.T) { Sampling: ptr.To(int32(100)), FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - FlowFilterRules: []EBPFFlowFilterRule{ + Rules: []EBPFFlowFilterRule{ { Action: "Accept", CIDR: "0.0.0.0/0", @@ -73,7 +73,7 @@ func TestValidateAgent(t *testing.T) { Sampling: ptr.To(int32(100)), FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - FlowFilterRules: []EBPFFlowFilterRule{ + Rules: []EBPFFlowFilterRule{ { Action: "Accept", CIDR: "0.0.0.0/0", @@ -181,7 +181,7 @@ func TestValidateAgent(t *testing.T) { EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - FlowFilterRules: []EBPFFlowFilterRule{ + Rules: []EBPFFlowFilterRule{ { Action: "Accept", CIDR: "0.0.0.0/0", @@ -208,7 +208,7 @@ func TestValidateAgent(t *testing.T) { EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - FlowFilterRules: []EBPFFlowFilterRule{ + Rules: []EBPFFlowFilterRule{ { Ports: intstr.FromString("abcd"), }, @@ -232,7 +232,7 @@ func TestValidateAgent(t *testing.T) { EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - FlowFilterRules: []EBPFFlowFilterRule{ + Rules: []EBPFFlowFilterRule{ { Ports: intstr.FromString("80-255"), }, @@ -255,7 +255,7 @@ func TestValidateAgent(t *testing.T) { EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - FlowFilterRules: []EBPFFlowFilterRule{ + Rules: []EBPFFlowFilterRule{ { Ports: intstr.FromString("255-80"), }, @@ -279,7 +279,7 @@ func TestValidateAgent(t *testing.T) { EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - FlowFilterRules: []EBPFFlowFilterRule{ + Rules: []EBPFFlowFilterRule{ { Ports: intstr.FromString("80-?"), }, @@ -303,7 +303,7 @@ func TestValidateAgent(t *testing.T) { EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - FlowFilterRules: []EBPFFlowFilterRule{ + Rules: []EBPFFlowFilterRule{ { Ports: intstr.FromString("255,80"), }, @@ -326,7 +326,7 @@ func TestValidateAgent(t *testing.T) { EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - FlowFilterRules: []EBPFFlowFilterRule{ + Rules: []EBPFFlowFilterRule{ { Ports: intstr.FromString("80,100,250"), }, @@ -350,7 +350,7 @@ func TestValidateAgent(t *testing.T) { EBPF: FlowCollectorEBPF{ FlowFilter: &EBPFFlowFilter{ Enable: ptr.To(true), - FlowFilterRules: []EBPFFlowFilterRule{ + Rules: []EBPFFlowFilterRule{ { CIDR: "1.1.1.1", }, diff --git a/apis/flowcollector/v1beta2/zz_generated.deepcopy.go b/apis/flowcollector/v1beta2/zz_generated.deepcopy.go index 6461b82b21..3960ab41f9 100644 --- a/apis/flowcollector/v1beta2/zz_generated.deepcopy.go +++ b/apis/flowcollector/v1beta2/zz_generated.deepcopy.go @@ -290,8 +290,8 @@ func (in *EBPFFlowFilter) DeepCopyInto(out *EBPFFlowFilter) { **out = **in } in.EBPFFlowFilterRule.DeepCopyInto(&out.EBPFFlowFilterRule) - if in.FlowFilterRules != nil { - in, out := &in.FlowFilterRules, &out.FlowFilterRules + if in.Rules != nil { + in, out := &in.Rules, &out.Rules *out = make([]EBPFFlowFilterRule, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) diff --git a/apis/flowmetrics/v1alpha1/flowmetric_types.go b/apis/flowmetrics/v1alpha1/flowmetric_types.go index 6b8ce9a675..dee58a025a 100644 --- a/apis/flowmetrics/v1alpha1/flowmetric_types.go +++ b/apis/flowmetrics/v1alpha1/flowmetric_types.go @@ -93,7 +93,7 @@ type FlowMetricSpec struct { // +optional Labels []string `json:"labels"` - // `flatten` is a list of list-type fields that must be flattened, such as Interfaces and NetworkEvents. Flattened fields generate one metric per item in that field. + // `flatten` is a list of array-type fields that must be flattened, such as Interfaces or NetworkEvents. Flattened fields generate one metric per item in that field. // For instance, when flattening `Interfaces` on a bytes counter, a flow having Interfaces [br-ex, ens5] increases one counter for `br-ex` and another for `ens5`. // +optional Flatten []string `json:"flatten"` diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml index 1d0d19fd19..46e2164bae 100644 --- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml +++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml @@ -137,7 +137,7 @@ spec: the kernel debug filesystem, so the eBPF pod has to run as privileged. - `PacketTranslation`: enable enriching flows with packet's translation information.
- `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- - `UDNMapping`, to enable interfaces mappind to udn.
+ - `UDNMapping`, to enable interfaces mapping to udn.
items: description: |- Agent feature, can be one of:
@@ -147,7 +147,7 @@ spec: - `NetworkEvents`, to track Network events.
- `PacketTranslation`, to enrich flows with packets translation information.
- `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
- - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+ - `UDNMapping`, to enable interfaces mapping to udn [Developer Preview].
enum: - PacketDrop - DNSTracking @@ -242,8 +242,10 @@ spec: - SCTP type: string rules: - description: '`flowFilterRules` defines a list of ebpf - agent flow filtering rules' + description: |- + `rules` defines a list of filtering rules on the eBPF Agents. + When filtering is enabled, by default, flows that don't match any rule are rejected. + To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules. items: description: '`EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering @@ -3950,32 +3952,30 @@ spec: features: description: |- List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- - `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting - the kernel debug filesystem, so the eBPF agent pods have to run as privileged. + - `PacketDrop`: Enable the packets drop flows logging feature. This feature requires mounting + the kernel debug filesystem, so the eBPF agent pods must run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- - `DNSTracking`: enable the DNS tracking feature.
- - `FlowRTT`: enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
- - `NetworkEvents`: enable the network events monitoring feature, such as correlating flows and network policies. - This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged. + - `DNSTracking`: Enable the DNS tracking feature.
+ - `FlowRTT`: Enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
+ - `NetworkEvents`: Enable the network events monitoring feature, such as correlating flows and network policies. + This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged. It requires using the OVN-Kubernetes network plugin with the Observability feature. - IMPORTANT: This feature is available as a Developer Preview.
- - `PacketTranslation`: enable enriching flows with packet's translation information.
- - `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- IMPORTANT: This feature is available as a Developer Preview.
- - `UDNMapping`, to enable interfaces mappind to udn.
- This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged. + IMPORTANT: This feature is available as a Technology Preview.
+ - `PacketTranslation`: Enable enriching flows with packet translation information, such as Service NAT.
+ - `EbpfManager`: [Unsupported (*)]. Use eBPF Manager to manage NetObserv eBPF programs. Pre-requisite: the eBPF Manager operator (or upstream bpfman operator) must be installed.
+ - `UDNMapping`: [Unsupported (*)]. Enable interfaces mapping to User Defined Networks (UDN).
+ This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged. It requires using the OVN-Kubernetes network plugin with the Observability feature. - IMPORTANT: This feature is available as a Developer Preview.
items: description: |- Agent feature, can be one of:
- `PacketDrop`, to track packet drops.
- `DNSTracking`, to track specific information on DNS traffic.
- `FlowRTT`, to track TCP latency.
- - `NetworkEvents`, to track Network events [Developer Preview].
- - `PacketTranslation`, to enrich flows with packets translation information.
- - `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
- - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+ - `NetworkEvents`, to track network events [Technology Preview].
+ - `PacketTranslation`, to enrich flows with packets translation information, such as Service NAT.
+ - `EbpfManager`, to enable using eBPF Manager to manage NetObserv eBPF programs. [Unsupported (*)].
+ - `UDNMapping`, to enable interfaces mapping to UDN. [Unsupported (*)].
enum: - PacketDrop - DNSTracking @@ -4070,8 +4070,11 @@ spec: - SCTP type: string rules: - description: '`flowFilterRules` defines a list of ebpf - agent flow filtering rules' + description: |- + `rules` defines a list of filtering rules on the eBPF Agents. + When filtering is enabled, by default, flows that don't match any rule are rejected. + To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules. + [Unsupported (*)]. items: description: '`EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering @@ -4155,7 +4158,8 @@ spec: type: string sampling: description: '`sampling` sampling rate for the matched - flow' + flows, overriding the global sampling defined + at `spec.agent.ebpf.sampling`.' format: int32 type: integer sourcePorts: @@ -4190,7 +4194,7 @@ spec: type: array sampling: description: '`sampling` sampling rate for the matched - flow' + flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`.' format: int32 type: integer sourcePorts: @@ -6052,7 +6056,7 @@ spec: - Kafka type: string exporters: - description: '`exporters` define additional optional exporters for + description: '`exporters` defines additional optional exporters for custom consumption or storage.' items: description: '`FlowCollectorExporter` defines an additional exporter @@ -8155,7 +8159,7 @@ spec: type: object secondaryNetworks: description: |- - Define secondary networks to be checked for resources identification. + Defines secondary networks to be checked for resources identification. To guarantee a correct identification, indexed values must form an unique identifier across the cluster. If the same index is used by several resources, those resources might be incorrectly labeled. items: @@ -8193,15 +8197,15 @@ spec: type: string deduper: description: |- - `deduper` allows to sample or drop flows identified as duplicates, in order to save on resource usage. - IMPORTANT: This feature is available as a Developer Preview. + `deduper` allows you to sample or drop flows identified as duplicates, in order to save on resource usage. + [Unsupported (*)]. properties: mode: default: Disabled description: |- Set the Processor de-duplication mode. It comes in addition to the Agent-based deduplication because the Agent cannot de-duplicate same flows reported from different nodes.
- - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially loosing some information such as the network interfaces used from peer, or network events.
- - Use `Sample` to randomly keep only 1 flow on 50 (by default) among the ones considered as duplicates. This is a compromise between dropping every duplicates or keeping every duplicates. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling are 50, the combined sampling is 1:2500.
+ - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially losing some information such as the network interfaces used from peer, or network events.
+ - Use `Sample` to randomly keep only one flow on 50, which is the default, among the ones considered as duplicates. This is a compromise between dropping every duplicate or keeping every duplicate. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling values are `50`, the combined sampling is 1:2500.
- Use `Disabled` to turn off Processor-based de-duplication.
enum: - Disabled @@ -8218,27 +8222,29 @@ spec: type: object filters: description: |- - `filters` let you define custom filters to limit the amount of generated flows. - IMPORTANT: This feature is available as a Developer Preview. + `filters` lets you define custom filters to limit the amount of generated flows. + These filters provide more flexibility than the eBPF Agent filters (in `spec.agent.ebpf.flowFilter`), such as allowing to filter by Kubernetes namespace, + but with a lesser improvement in performance. + [Unsupported (*)]. items: description: '`FLPFilterSet` defines the desired configuration - for FLP-based filtering satisfying all conditions' + for FLP-based filtering satisfying all conditions.' properties: allOf: description: '`filters` is a list of matches that must be all satisfied in order to remove a flow.' items: description: '`FLPSingleFilter` defines the desired configuration - for a single FLP-based filter' + for a single FLP-based filter.' properties: field: description: |- - Name of the field to filter on - Refer to the documentation for the list of available fields: https://docs.openshift.com/container-platform/latest/observability/network_observability/json-flows-format-reference.html. + Name of the field to filter on. + Refer to the documentation for the list of available fields: https://github.com/netobserv/network-observability-operator/blob/main/docs/flows-format.adoc. type: string matchType: default: Equal - description: Type of matching to apply + description: Type of matching to apply. enum: - Equal - NotEqual @@ -8259,8 +8265,8 @@ spec: type: object type: array outputTarget: - description: 'If specified, this filters only target a single - output: `Loki`, `Metrics` or `Exporters`. By default, + description: 'If specified, these filters only target a + single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted.' enum: - "" @@ -8623,7 +8629,7 @@ spec: default: Flows description: |- `logTypes` defines the desired record types to generate. Possible values are:
- - `Flows` (default) to export regular network flows.
+ - `Flows` to export regular network flows. This is the default.
- `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" updates.
- `EndedConversations` to generate only ended conversations events.
- `All` to generate both network flows and all conversations events. It is not recommended due to the impact on resources footprint.
@@ -9041,7 +9047,7 @@ spec: description: '`FlowCollectorStatus` defines the observed state of FlowCollector' properties: conditions: - description: '`conditions` represent the latest available observations + description: '`conditions` represents the latest available observations of an object''s state' items: description: Condition contains details for one aspect of the current diff --git a/bundle/manifests/flows.netobserv.io_flowmetrics.yaml b/bundle/manifests/flows.netobserv.io_flowmetrics.yaml index ad3b13f7d6..818da1b670 100644 --- a/bundle/manifests/flows.netobserv.io_flowmetrics.yaml +++ b/bundle/manifests/flows.netobserv.io_flowmetrics.yaml @@ -198,7 +198,7 @@ spec: type: array flatten: description: |- - `flatten` is a list of list-type fields that must be flattened, such as Interfaces and NetworkEvents. Flattened fields generate one metric per item in that field. + `flatten` is a list of array-type fields that must be flattened, such as Interfaces or NetworkEvents. Flattened fields generate one metric per item in that field. For instance, when flattening `Interfaces` on a bytes counter, a flow having Interfaces [br-ex, ens5] increases one counter for `br-ex` and another for `ens5`. items: type: string diff --git a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml index c49e5cea10..cf471b1632 100644 --- a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml +++ b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml @@ -434,7 +434,7 @@ metadata: capabilities: Seamless Upgrades categories: Monitoring, Networking console.openshift.io/plugins: '["netobserv-plugin"]' - containerImage: quay.io/netobserv/network-observability-operator:1.6.1-community + containerImage: quay.io/netobserv/network-observability-operator:1.8.0-community createdAt: ':created-at:' description: Network flows collector and monitoring solution operatorframework.io/initialization-resource: '{"apiVersion":"flows.netobserv.io/v1beta2", @@ -450,7 +450,7 @@ metadata: operatorframework.io/arch.ppc64le: supported operatorframework.io/arch.s390x: supported operatorframework.io/os.linux: supported - name: netobserv-operator.v1.6.1-community + name: netobserv-operator.v1.8.0-community namespace: placeholder spec: apiservicedefinitions: {} @@ -890,7 +890,7 @@ spec: ## Configuration - The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/config/samples/flows_v1beta2_flowcollector.yaml). + The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/config/samples/flows_v1beta2_flowcollector.yaml). To edit configuration in cluster, run: @@ -906,7 +906,7 @@ spec: - Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you might have to configure differently if you used another installation method. Make sure to disable it (`spec.loki.enable`) if you don't want to use Loki. - - Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/docs/QuickFilters.md). + - Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/docs/QuickFilters.md). - Kafka (`spec.deploymentModel: KAFKA` and `spec.kafka`): when enabled, integrates the flow collection pipeline with Kafka, by splitting ingestion from transformation (kube enrichment, derived metrics, ...). Kafka can provide better scalability, resiliency and high availability ([view more details](https://www.redhat.com/en/topics/integration/what-is-apache-kafka)). Assumes Kafka is already deployed and a topic is created. @@ -921,7 +921,7 @@ spec: This documentation includes: - An [overview](https://github.com/netobserv/network-observability-operator#openshift-console) of the features, with screenshots - - More information on [configuring metrics](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/docs/Metrics.md). + - More information on [configuring metrics](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/docs/Metrics.md). - A [performance](https://github.com/netobserv/network-observability-operator#performance-fine-tuning) section, for fine-tuning - A [security](https://github.com/netobserv/network-observability-operator#securing-data-and-communications) section - An [F.A.Q.](https://github.com/netobserv/network-observability-operator#faq--troubleshooting) section @@ -1213,15 +1213,15 @@ spec: - /manager env: - name: RELATED_IMAGE_EBPF_AGENT - value: quay.io/netobserv/netobserv-ebpf-agent:v1.6.1-community + value: quay.io/netobserv/netobserv-ebpf-agent:v1.8.0-community - name: RELATED_IMAGE_FLOWLOGS_PIPELINE - value: quay.io/netobserv/flowlogs-pipeline:v1.6.1-community + value: quay.io/netobserv/flowlogs-pipeline:v1.8.0-community - name: RELATED_IMAGE_CONSOLE_PLUGIN - value: quay.io/netobserv/network-observability-console-plugin:v1.6.1-community + value: quay.io/netobserv/network-observability-console-plugin:v1.8.0-community - name: DOWNSTREAM_DEPLOYMENT value: "false" - name: PROFILING_BIND_ADDRESS - image: quay.io/netobserv/network-observability-operator:1.6.1-community + image: quay.io/netobserv/network-observability-operator:1.8.0-community imagePullPolicy: Always livenessProbe: httpGet: @@ -1353,14 +1353,14 @@ spec: name: Red Hat url: https://www.redhat.com relatedImages: - - image: quay.io/netobserv/netobserv-ebpf-agent:v1.6.1-community + - image: quay.io/netobserv/netobserv-ebpf-agent:v1.8.0-community name: ebpf-agent - - image: quay.io/netobserv/flowlogs-pipeline:v1.6.1-community + - image: quay.io/netobserv/flowlogs-pipeline:v1.8.0-community name: flowlogs-pipeline - - image: quay.io/netobserv/network-observability-console-plugin:v1.6.1-community + - image: quay.io/netobserv/network-observability-console-plugin:v1.8.0-community name: console-plugin - replaces: netobserv-operator.v1.6.0-community - version: 1.6.1-community + replaces: netobserv-operator.v1.6.1-community + version: 1.8.0-community webhookdefinitions: - admissionReviewVersions: - v1 diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml index 495a3a5e2d..223ea27cb3 100644 --- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml +++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml @@ -122,7 +122,7 @@ spec: the kernel debug filesystem, so the eBPF pod has to run as privileged. - `PacketTranslation`: enable enriching flows with packet's translation information.
- `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- - `UDNMapping`, to enable interfaces mappind to udn.
+ - `UDNMapping`, to enable interfaces mapping to udn.
items: description: |- Agent feature, can be one of:
@@ -132,7 +132,7 @@ spec: - `NetworkEvents`, to track Network events.
- `PacketTranslation`, to enrich flows with packets translation information.
- `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
- - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+ - `UDNMapping`, to enable interfaces mapping to udn [Developer Preview].
enum: - PacketDrop - DNSTracking @@ -215,7 +215,10 @@ spec: - SCTP type: string rules: - description: '`flowFilterRules` defines a list of ebpf agent flow filtering rules' + description: |- + `rules` defines a list of filtering rules on the eBPF Agents. + When filtering is enabled, by default, flows that don't match any rule are rejected. + To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules. items: description: '`EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering rule.' properties: @@ -3632,32 +3635,30 @@ spec: features: description: |- List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- - `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting - the kernel debug filesystem, so the eBPF agent pods have to run as privileged. + - `PacketDrop`: Enable the packets drop flows logging feature. This feature requires mounting + the kernel debug filesystem, so the eBPF agent pods must run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- - `DNSTracking`: enable the DNS tracking feature.
- - `FlowRTT`: enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
- - `NetworkEvents`: enable the network events monitoring feature, such as correlating flows and network policies. - This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged. + - `DNSTracking`: Enable the DNS tracking feature.
+ - `FlowRTT`: Enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
+ - `NetworkEvents`: Enable the network events monitoring feature, such as correlating flows and network policies. + This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged. It requires using the OVN-Kubernetes network plugin with the Observability feature. - IMPORTANT: This feature is available as a Developer Preview.
- - `PacketTranslation`: enable enriching flows with packet's translation information.
- - `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- IMPORTANT: This feature is available as a Developer Preview.
- - `UDNMapping`, to enable interfaces mappind to udn.
- This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged. + IMPORTANT: This feature is available as a Technology Preview.
+ - `PacketTranslation`: Enable enriching flows with packet translation information, such as Service NAT.
+ - `EbpfManager`: [Unsupported (*)]. Use eBPF Manager to manage NetObserv eBPF programs. Pre-requisite: the eBPF Manager operator (or upstream bpfman operator) must be installed.
+ - `UDNMapping`: [Unsupported (*)]. Enable interfaces mapping to User Defined Networks (UDN).
+ This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged. It requires using the OVN-Kubernetes network plugin with the Observability feature. - IMPORTANT: This feature is available as a Developer Preview.
items: description: |- Agent feature, can be one of:
- `PacketDrop`, to track packet drops.
- `DNSTracking`, to track specific information on DNS traffic.
- `FlowRTT`, to track TCP latency.
- - `NetworkEvents`, to track Network events [Developer Preview].
- - `PacketTranslation`, to enrich flows with packets translation information.
- - `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
- - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+ - `NetworkEvents`, to track network events [Technology Preview].
+ - `PacketTranslation`, to enrich flows with packets translation information, such as Service NAT.
+ - `EbpfManager`, to enable using eBPF Manager to manage NetObserv eBPF programs. [Unsupported (*)].
+ - `UDNMapping`, to enable interfaces mapping to UDN. [Unsupported (*)].
enum: - PacketDrop - DNSTracking @@ -3740,7 +3741,11 @@ spec: - SCTP type: string rules: - description: '`flowFilterRules` defines a list of ebpf agent flow filtering rules' + description: |- + `rules` defines a list of filtering rules on the eBPF Agents. + When filtering is enabled, by default, flows that don't match any rule are rejected. + To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules. + [Unsupported (*)]. items: description: '`EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering rule.' properties: @@ -3810,7 +3815,7 @@ spec: - SCTP type: string sampling: - description: '`sampling` sampling rate for the matched flow' + description: '`sampling` sampling rate for the matched flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`.' format: int32 type: integer sourcePorts: @@ -3844,7 +3849,7 @@ spec: minItems: 1 type: array sampling: - description: '`sampling` sampling rate for the matched flow' + description: '`sampling` sampling rate for the matched flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`.' format: int32 type: integer sourcePorts: @@ -5604,7 +5609,7 @@ spec: - Kafka type: string exporters: - description: '`exporters` define additional optional exporters for custom consumption or storage.' + description: '`exporters` defines additional optional exporters for custom consumption or storage.' items: description: '`FlowCollectorExporter` defines an additional exporter to send enriched flows to.' properties: @@ -7499,7 +7504,7 @@ spec: type: object secondaryNetworks: description: |- - Define secondary networks to be checked for resources identification. + Defines secondary networks to be checked for resources identification. To guarantee a correct identification, indexed values must form an unique identifier across the cluster. If the same index is used by several resources, those resources might be incorrectly labeled. items: @@ -7532,15 +7537,15 @@ spec: type: string deduper: description: |- - `deduper` allows to sample or drop flows identified as duplicates, in order to save on resource usage. - IMPORTANT: This feature is available as a Developer Preview. + `deduper` allows you to sample or drop flows identified as duplicates, in order to save on resource usage. + [Unsupported (*)]. properties: mode: default: Disabled description: |- Set the Processor de-duplication mode. It comes in addition to the Agent-based deduplication because the Agent cannot de-duplicate same flows reported from different nodes.
- - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially loosing some information such as the network interfaces used from peer, or network events.
- - Use `Sample` to randomly keep only 1 flow on 50 (by default) among the ones considered as duplicates. This is a compromise between dropping every duplicates or keeping every duplicates. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling are 50, the combined sampling is 1:2500.
+ - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially losing some information such as the network interfaces used from peer, or network events.
+ - Use `Sample` to randomly keep only one flow on 50, which is the default, among the ones considered as duplicates. This is a compromise between dropping every duplicate or keeping every duplicate. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling values are `50`, the combined sampling is 1:2500.
- Use `Disabled` to turn off Processor-based de-duplication.
enum: - Disabled @@ -7556,24 +7561,26 @@ spec: type: object filters: description: |- - `filters` let you define custom filters to limit the amount of generated flows. - IMPORTANT: This feature is available as a Developer Preview. + `filters` lets you define custom filters to limit the amount of generated flows. + These filters provide more flexibility than the eBPF Agent filters (in `spec.agent.ebpf.flowFilter`), such as allowing to filter by Kubernetes namespace, + but with a lesser improvement in performance. + [Unsupported (*)]. items: - description: '`FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions' + description: '`FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions.' properties: allOf: description: '`filters` is a list of matches that must be all satisfied in order to remove a flow.' items: - description: '`FLPSingleFilter` defines the desired configuration for a single FLP-based filter' + description: '`FLPSingleFilter` defines the desired configuration for a single FLP-based filter.' properties: field: description: |- - Name of the field to filter on - Refer to the documentation for the list of available fields: https://docs.openshift.com/container-platform/latest/observability/network_observability/json-flows-format-reference.html. + Name of the field to filter on. + Refer to the documentation for the list of available fields: https://github.com/netobserv/network-observability-operator/blob/main/docs/flows-format.adoc. type: string matchType: default: Equal - description: Type of matching to apply + description: Type of matching to apply. enum: - Equal - NotEqual @@ -7591,7 +7598,7 @@ spec: type: object type: array outputTarget: - description: 'If specified, this filters only target a single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted.' + description: 'If specified, these filters only target a single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted.' enum: - "" - Loki @@ -7944,7 +7951,7 @@ spec: default: Flows description: |- `logTypes` defines the desired record types to generate. Possible values are:
- - `Flows` (default) to export regular network flows.
+ - `Flows` to export regular network flows. This is the default.
- `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" updates.
- `EndedConversations` to generate only ended conversations events.
- `All` to generate both network flows and all conversations events. It is not recommended due to the impact on resources footprint.
@@ -8323,7 +8330,7 @@ spec: description: '`FlowCollectorStatus` defines the observed state of FlowCollector' properties: conditions: - description: '`conditions` represent the latest available observations of an object''s state' + description: '`conditions` represents the latest available observations of an object''s state' items: description: Condition contains details for one aspect of the current state of this API Resource. properties: diff --git a/config/crd/bases/flows.netobserv.io_flowmetrics.yaml b/config/crd/bases/flows.netobserv.io_flowmetrics.yaml index f8d868b2af..5adc45fa18 100644 --- a/config/crd/bases/flows.netobserv.io_flowmetrics.yaml +++ b/config/crd/bases/flows.netobserv.io_flowmetrics.yaml @@ -188,7 +188,7 @@ spec: type: array flatten: description: |- - `flatten` is a list of list-type fields that must be flattened, such as Interfaces and NetworkEvents. Flattened fields generate one metric per item in that field. + `flatten` is a list of array-type fields that must be flattened, such as Interfaces or NetworkEvents. Flattened fields generate one metric per item in that field. For instance, when flattening `Interfaces` on a bytes counter, a flow having Interfaces [br-ex, ens5] increases one counter for `br-ex` and another for `ens5`. items: type: string diff --git a/config/csv/bases/netobserv-operator.clusterserviceversion.yaml b/config/csv/bases/netobserv-operator.clusterserviceversion.yaml index 775919a310..74c5b763e5 100644 --- a/config/csv/bases/netobserv-operator.clusterserviceversion.yaml +++ b/config/csv/bases/netobserv-operator.clusterserviceversion.yaml @@ -353,5 +353,5 @@ spec: provider: name: Red Hat url: https://www.redhat.com - replaces: netobserv-operator.v1.6.0-community + replaces: netobserv-operator.v1.6.1-community version: 0.0.0 diff --git a/config/descriptions/ocp.md b/config/descriptions/ocp.md index 25fe797f2b..8f4aa7c8e1 100644 --- a/config/descriptions/ocp.md +++ b/config/descriptions/ocp.md @@ -38,7 +38,7 @@ In that case, you can still get the Prometheus metrics or export raw flows to a ## Configuration -The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/config/samples/flows_v1beta2_flowcollector.yaml). +The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/config/samples/flows_v1beta2_flowcollector.yaml). To edit configuration in cluster, run: @@ -54,7 +54,7 @@ A couple of settings deserve special attention: - Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you might have to configure differently if you used another installation method. Make sure to disable it (`spec.loki.enable`) if you don't want to use Loki. -- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/docs/QuickFilters.md). +- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/docs/QuickFilters.md). - Kafka (`spec.deploymentModel: KAFKA` and `spec.kafka`): when enabled, integrates the flow collection pipeline with Kafka, by splitting ingestion from transformation (kube enrichment, derived metrics, ...). Kafka can provide better scalability, resiliency and high availability ([view more details](https://www.redhat.com/en/topics/integration/what-is-apache-kafka)). Assumes Kafka is already deployed and a topic is created. @@ -69,7 +69,7 @@ Please refer to the documentation on GitHub for more information. This documentation includes: - An [overview](https://github.com/netobserv/network-observability-operator#openshift-console) of the features, with screenshots -- More information on [configuring metrics](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/docs/Metrics.md). +- More information on [configuring metrics](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/docs/Metrics.md). - A [performance](https://github.com/netobserv/network-observability-operator#performance-fine-tuning) section, for fine-tuning - A [security](https://github.com/netobserv/network-observability-operator#securing-data-and-communications) section - An [F.A.Q.](https://github.com/netobserv/network-observability-operator#faq--troubleshooting) section diff --git a/config/descriptions/upstream.md b/config/descriptions/upstream.md index 2f4045899d..fe297341fd 100644 --- a/config/descriptions/upstream.md +++ b/config/descriptions/upstream.md @@ -42,7 +42,7 @@ In that case, you can still get the Prometheus metrics or export raw flows to a ## Configuration -The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/config/samples/flows_v1beta2_flowcollector.yaml). +The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/config/samples/flows_v1beta2_flowcollector.yaml). To edit configuration in cluster, run: @@ -58,7 +58,7 @@ A couple of settings deserve special attention: - Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you might have to configure differently if you used another installation method. Make sure to disable it (`spec.loki.enable`) if you don't want to use Loki. -- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/docs/QuickFilters.md). +- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/docs/QuickFilters.md). - Kafka (`spec.deploymentModel: KAFKA` and `spec.kafka`): when enabled, integrates the flow collection pipeline with Kafka, by splitting ingestion from transformation (kube enrichment, derived metrics, ...). Kafka can provide better scalability, resiliency and high availability ([view more details](https://www.redhat.com/en/topics/integration/what-is-apache-kafka)). Assumes Kafka is already deployed and a topic is created. @@ -73,7 +73,7 @@ Please refer to the documentation on GitHub for more information. This documentation includes: - An [overview](https://github.com/netobserv/network-observability-operator#openshift-console) of the features, with screenshots -- More information on [configuring metrics](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/docs/Metrics.md). +- More information on [configuring metrics](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/docs/Metrics.md). - A [performance](https://github.com/netobserv/network-observability-operator#performance-fine-tuning) section, for fine-tuning - A [security](https://github.com/netobserv/network-observability-operator#securing-data-and-communications) section - An [F.A.Q.](https://github.com/netobserv/network-observability-operator#faq--troubleshooting) section diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index e3e208eab5..749bcaebba 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -16,6 +16,6 @@ kind: Kustomization images: - name: controller newName: quay.io/netobserv/network-observability-operator - newTag: 1.6.1-community + newTag: 1.8.0-community commonLabels: app: netobserv-operator diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 31fc8a49cb..621357481e 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -31,11 +31,11 @@ spec: - --profiling-bind-address=$(PROFILING_BIND_ADDRESS) env: - name: RELATED_IMAGE_EBPF_AGENT - value: quay.io/netobserv/netobserv-ebpf-agent:v1.6.1-community + value: quay.io/netobserv/netobserv-ebpf-agent:v1.8.0-community - name: RELATED_IMAGE_FLOWLOGS_PIPELINE - value: quay.io/netobserv/flowlogs-pipeline:v1.6.1-community + value: quay.io/netobserv/flowlogs-pipeline:v1.8.0-community - name: RELATED_IMAGE_CONSOLE_PLUGIN - value: quay.io/netobserv/network-observability-console-plugin:v1.6.1-community + value: quay.io/netobserv/network-observability-console-plugin:v1.8.0-community - name: DOWNSTREAM_DEPLOYMENT value: "false" - name: PROFILING_BIND_ADDRESS diff --git a/controllers/consoleplugin/config/static-frontend-config.yaml b/controllers/consoleplugin/config/static-frontend-config.yaml index b9339b5716..7bd8064b35 100644 --- a/controllers/consoleplugin/config/static-frontend-config.yaml +++ b/controllers/consoleplugin/config/static-frontend-config.yaml @@ -1038,22 +1038,22 @@ filters: name: Xlat Zone Id component: number - id: xlat_src_address - name: Xlat src address + name: Xlat source address component: text category: source hint: Specify a single IP or range. - id: xlat_dst_address - name: Xlat dst address + name: Xlat destination address component: text category: destination hint: Specify a single IP or range. - id: xlat_src_port - name: Xlat src port + name: Xlat source port component: autocomplete category: source hint: Specify a single port number or name. - id: xlat_dst_port - name: Xlat dst port + name: Xlat destination port component: autocomplete category: destination hint: Specify a single port number or name. @@ -1402,22 +1402,22 @@ fields: description: packet translation zone id - name: XlatSrcPort type: number - description: packet translation src port + description: packet translation source port - name: XlatDstPort type: number - description: packet translation dst port + description: packet translation destination port - name: XlatSrcAddr type: string - description: packet translation src address + description: packet translation source address - name: XlatDstAddr type: string - description: packet translation dst address + description: packet translation destination address - name: K8S_ClusterName type: string description: Cluster name or identifier - name: _RecordType type: string - description: "Type of record: 'flowLog' for regular flow logs, or 'newConnection', 'heartbeat', 'endConnection' for conversation tracking" + description: "Type of record: `flowLog` for regular flow logs, or `newConnection`, `heartbeat`, `endConnection` for conversation tracking" - name: _HashId type: string description: In conversation tracking, the conversation identifier diff --git a/controllers/ebpf/agent_controller.go b/controllers/ebpf/agent_controller.go index e4cea0f27b..1d9412ff9e 100644 --- a/controllers/ebpf/agent_controller.go +++ b/controllers/ebpf/agent_controller.go @@ -504,8 +504,8 @@ func (c *AgentController) envConfig(ctx context.Context, coll *flowslatest.FlowC if helper.IsEBFPFlowFilterEnabled(&coll.Spec.Agent.EBPF) { config = append(config, corev1.EnvVar{Name: envEnableFlowFilter, Value: "true"}) - if len(coll.Spec.Agent.EBPF.FlowFilter.FlowFilterRules) != 0 { - if filterRules := c.configureFlowFiltersRules(coll.Spec.Agent.EBPF.FlowFilter.FlowFilterRules); filterRules != nil { + if len(coll.Spec.Agent.EBPF.FlowFilter.Rules) != 0 { + if filterRules := c.configureFlowFiltersRules(coll.Spec.Agent.EBPF.FlowFilter.Rules); filterRules != nil { config = append(config, filterRules...) } } else { diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md index 030fecae85..df65197081 100644 --- a/docs/FlowCollector.md +++ b/docs/FlowCollector.md @@ -294,7 +294,7 @@ If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.< the kernel debug filesystem, so the eBPF pod has to run as privileged. - `PacketTranslation`: enable enriching flows with packet's translation information.
- `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
-- `UDNMapping`, to enable interfaces mappind to udn.

+- `UDNMapping`, to enable interfaces mapping to udn.

false @@ -537,7 +537,9 @@ To filter two ports, use a "port1,port2" in string format. For example, `ports: rules []object - `flowFilterRules` defines a list of ebpf agent flow filtering rules
+ `rules` defines a list of filtering rules on the eBPF Agents. +When filtering is enabled, by default, flows that don't match any rule are rejected. +To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules.
false @@ -6106,7 +6108,7 @@ Kafka can provide better scalability, resiliency, and high availability (for mor exporters []object - `exporters` define additional optional exporters for custom consumption or storage.
+ `exporters` defines additional optional exporters for custom consumption or storage.
false @@ -6271,22 +6273,20 @@ Otherwise it is matched as a case-sensitive string.
[]enum List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
-- `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting -the kernel debug filesystem, so the eBPF agent pods have to run as privileged. +- `PacketDrop`: Enable the packets drop flows logging feature. This feature requires mounting +the kernel debug filesystem, so the eBPF agent pods must run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
-- `DNSTracking`: enable the DNS tracking feature.
-- `FlowRTT`: enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
-- `NetworkEvents`: enable the network events monitoring feature, such as correlating flows and network policies. -This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged. +- `DNSTracking`: Enable the DNS tracking feature.
+- `FlowRTT`: Enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
+- `NetworkEvents`: Enable the network events monitoring feature, such as correlating flows and network policies. +This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged. It requires using the OVN-Kubernetes network plugin with the Observability feature. -IMPORTANT: This feature is available as a Developer Preview.
-- `PacketTranslation`: enable enriching flows with packet's translation information.
-- `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
-IMPORTANT: This feature is available as a Developer Preview.
-- `UDNMapping`, to enable interfaces mappind to udn.
-This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged. -It requires using the OVN-Kubernetes network plugin with the Observability feature. -IMPORTANT: This feature is available as a Developer Preview.

+IMPORTANT: This feature is available as a Technology Preview.
+- `PacketTranslation`: Enable enriching flows with packet translation information, such as Service NAT.
+- `EbpfManager`: [Unsupported (*)]. Use eBPF Manager to manage NetObserv eBPF programs. Pre-requisite: the eBPF Manager operator (or upstream bpfman operator) must be installed.
+- `UDNMapping`: [Unsupported (*)]. Enable interfaces mapping to User Defined Networks (UDN).
+This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged. +It requires using the OVN-Kubernetes network plugin with the Observability feature.
false @@ -8283,14 +8283,17 @@ To filter two ports, use a "port1,port2" in string format. For example, `ports: rules []object - `flowFilterRules` defines a list of ebpf agent flow filtering rules
+ `rules` defines a list of filtering rules on the eBPF Agents. +When filtering is enabled, by default, flows that don't match any rule are rejected. +To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules. +[Unsupported (*)].
false sampling integer - `sampling` sampling rate for the matched flow
+ `sampling` sampling rate for the matched flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`.

Format: int32
@@ -8431,7 +8434,7 @@ To filter two ports, use a "port1,port2" in string format. For example, `ports: sampling integer - `sampling` sampling rate for the matched flow
+ `sampling` sampling rate for the matched flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`.

Format: int32
@@ -14383,16 +14386,18 @@ such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk.
deduper object - `deduper` allows to sample or drop flows identified as duplicates, in order to save on resource usage. -IMPORTANT: This feature is available as a Developer Preview.
+ `deduper` allows you to sample or drop flows identified as duplicates, in order to save on resource usage. +[Unsupported (*)].
false filters []object - `filters` let you define custom filters to limit the amount of generated flows. -IMPORTANT: This feature is available as a Developer Preview.
+ `filters` lets you define custom filters to limit the amount of generated flows. +These filters provide more flexibility than the eBPF Agent filters (in `spec.agent.ebpf.flowFilter`), such as allowing to filter by Kubernetes namespace, +but with a lesser improvement in performance. +[Unsupported (*)].
false @@ -14458,7 +14463,7 @@ This setting is ignored when Kafka is disabled.
enum `logTypes` defines the desired record types to generate. Possible values are:
-- `Flows` (default) to export regular network flows.
+- `Flows` to export regular network flows. This is the default.
- `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" updates.
- `EndedConversations` to generate only ended conversations events.
- `All` to generate both network flows and all conversations events. It is not recommended due to the impact on resources footprint.

@@ -14628,7 +14633,7 @@ By convention, some values are forbidden. It must be greater than 1024 and diffe secondaryNetworks []object - Define secondary networks to be checked for resources identification. + Defines secondary networks to be checked for resources identification. To guarantee a correct identification, indexed values must form an unique identifier across the cluster. If the same index is used by several resources, those resources might be incorrectly labeled.
@@ -16425,8 +16430,8 @@ Fields absent from the 'k8s.v1.cni.cncf.io/network-status' annotation must not b -`deduper` allows to sample or drop flows identified as duplicates, in order to save on resource usage. -IMPORTANT: This feature is available as a Developer Preview. +`deduper` allows you to sample or drop flows identified as duplicates, in order to save on resource usage. +[Unsupported (*)]. @@ -16442,8 +16447,8 @@ IMPORTANT: This feature is available as a Developer Preview.
enum Set the Processor de-duplication mode. It comes in addition to the Agent-based deduplication because the Agent cannot de-duplicate same flows reported from different nodes.
-- Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially loosing some information such as the network interfaces used from peer, or network events.
-- Use `Sample` to randomly keep only 1 flow on 50 (by default) among the ones considered as duplicates. This is a compromise between dropping every duplicates or keeping every duplicates. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling are 50, the combined sampling is 1:2500.
+- Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially losing some information such as the network interfaces used from peer, or network events.
+- Use `Sample` to randomly keep only one flow on 50, which is the default, among the ones considered as duplicates. This is a compromise between dropping every duplicate or keeping every duplicate. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling values are `50`, the combined sampling is 1:2500.
- Use `Disabled` to turn off Processor-based de-duplication.


Enum: Disabled, Drop, Sample
@@ -16470,7 +16475,7 @@ IMPORTANT: This feature is available as a Developer Preview. -`FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions +`FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions. @@ -16492,7 +16497,7 @@ IMPORTANT: This feature is available as a Developer Preview. @@ -16516,7 +16521,7 @@ IMPORTANT: This feature is available as a Developer Preview. -`FLPSingleFilter` defines the desired configuration for a single FLP-based filter +`FLPSingleFilter` defines the desired configuration for a single FLP-based filter.
outputTarget enum - If specified, this filters only target a single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted.
+ If specified, these filters only target a single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted.

Enum: , Loki, Metrics, Exporters
@@ -16531,15 +16536,15 @@ IMPORTANT: This feature is available as a Developer Preview. diff --git a/docs/FlowMetric.md b/docs/FlowMetric.md index afa5f90977..31c95eb70c 100644 --- a/docs/FlowMetric.md +++ b/docs/FlowMetric.md @@ -155,7 +155,7 @@ Refer to the documentation for the list of available fields: https://docs.opensh diff --git a/docs/flowcollector-flows-netobserv-io-v1beta2.adoc b/docs/flowcollector-flows-netobserv-io-v1beta2.adoc index 76865c55f4..762d9fb546 100644 --- a/docs/flowcollector-flows-netobserv-io-v1beta2.adoc +++ b/docs/flowcollector-flows-netobserv-io-v1beta2.adoc @@ -102,7 +102,7 @@ Kafka can provide better scalability, resiliency, and high availability (for mor | `exporters` | `array` -| `exporters` define additional optional exporters for custom consumption or storage. +| `exporters` defines additional optional exporters for custom consumption or storage. | `kafka` | `object` @@ -204,19 +204,27 @@ Otherwise it is matched as a case-sensitive string. | `array (string)` | List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are: + -- `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting -the kernel debug filesystem, so the eBPF agent pods have to run as privileged. +- `PacketDrop`: Enable the packets drop flows logging feature. This feature requires mounting +the kernel debug filesystem, so the eBPF agent pods must run as privileged. If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported. + -- `DNSTracking`: enable the DNS tracking feature. + +- `DNSTracking`: Enable the DNS tracking feature. + -- `FlowRTT`: enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic. + +- `FlowRTT`: Enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic. + -- `NetworkEvents`: enable the network events monitoring feature, such as correlating flows and network policies. -This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged. +- `NetworkEvents`: Enable the network events monitoring feature, such as correlating flows and network policies. +This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged. It requires using the OVN-Kubernetes network plugin with the Observability feature. -IMPORTANT: This feature is available as a Developer Preview. + +IMPORTANT: This feature is available as a Technology Preview. + +- `PacketTranslation`: Enable enriching flows with packet translation information, such as Service NAT. + + +- `EbpfManager`: [Unsupported (*)]. Use eBPF Manager to manage Network Observability eBPF programs. Pre-requisite: the eBPF Manager operator (or upstream bpfman operator) must be installed. + + +- `UDNMapping`: [Unsupported (*)]. Enable interfaces mapping to User Defined Networks (UDN). + + +This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged. +It requires using the OVN-Kubernetes network plugin with the Observability feature. | `flowFilter` | `object` @@ -407,6 +415,11 @@ To filter two ports, use a "port1,port2" in string format. For example, `ports: | `integer` | `icmpType`, for ICMP traffic, optionally defines the ICMP type to filter flows by. +| `peerCIDR` +| `string` +| `peerCIDR` defines the Peer IP CIDR to filter flows by. +Examples: `10.10.10.0/24` or `100:100:100:100::/64` + | `peerIP` | `string` | `peerIP` optionally defines the remote IP address to filter flows by. @@ -427,6 +440,120 @@ To filter two ports, use a "port1,port2" in string format. For example, `ports: | `string` | `protocol` optionally defines a protocol to filter flows by. The available options are `TCP`, `UDP`, `ICMP`, `ICMPv6`, and `SCTP`. +| `rules` +| `array` +| `rules` defines a list of filtering rules on the eBPF Agents. +When filtering is enabled, by default, flows that don't match any rule are rejected. +To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules. +[Unsupported (*)]. + +| `sampling` +| `integer` +| `sampling` sampling rate for the matched flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`. + +| `sourcePorts` +| `integer-or-string` +| `sourcePorts` optionally defines the source ports to filter flows by. +To filter a single port, set a single port as an integer value. For example, `sourcePorts: 80`. +To filter a range of ports, use a "start-end" range in string format. For example, `sourcePorts: "80-100"`. +To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. + +| `tcpFlags` +| `string` +| `tcpFlags` optionally defines TCP flags to filter flows by. +In addition to the standard flags (RFC-9293), you can also filter by one of the three following combinations: `SYN-ACK`, `FIN-ACK`, and `RST-ACK`. + +|=== +== .spec.agent.ebpf.flowFilter.rules +Description:: ++ +-- +`rules` defines a list of filtering rules on the eBPF Agents. +When filtering is enabled, by default, flows that don't match any rule are rejected. +To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules. +[Unsupported (*)]. +-- + +Type:: + `array` + + + + +== .spec.agent.ebpf.flowFilter.rules[] +Description:: ++ +-- +`EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering rule. +-- + +Type:: + `object` + + + + +[cols="1,1,1",options="header"] +|=== +| Property | Type | Description + +| `action` +| `string` +| `action` defines the action to perform on the flows that match the filter. The available options are `Accept`, which is the default, and `Reject`. + +| `cidr` +| `string` +| `cidr` defines the IP CIDR to filter flows by. +Examples: `10.10.10.0/24` or `100:100:100:100::/64` + +| `destPorts` +| `integer-or-string` +| `destPorts` optionally defines the destination ports to filter flows by. +To filter a single port, set a single port as an integer value. For example, `destPorts: 80`. +To filter a range of ports, use a "start-end" range in string format. For example, `destPorts: "80-100"`. +To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. + +| `direction` +| `string` +| `direction` optionally defines a direction to filter flows by. The available options are `Ingress` and `Egress`. + +| `icmpCode` +| `integer` +| `icmpCode`, for Internet Control Message Protocol (ICMP) traffic, optionally defines the ICMP code to filter flows by. + +| `icmpType` +| `integer` +| `icmpType`, for ICMP traffic, optionally defines the ICMP type to filter flows by. + +| `peerCIDR` +| `string` +| `peerCIDR` defines the Peer IP CIDR to filter flows by. +Examples: `10.10.10.0/24` or `100:100:100:100::/64` + +| `peerIP` +| `string` +| `peerIP` optionally defines the remote IP address to filter flows by. +Example: `10.10.10.10`. + +| `pktDrops` +| `boolean` +| `pktDrops` optionally filters only flows containing packet drops. + +| `ports` +| `integer-or-string` +| `ports` optionally defines the ports to filter flows by. It is used both for source and destination ports. +To filter a single port, set a single port as an integer value. For example, `ports: 80`. +To filter a range of ports, use a "start-end" range in string format. For example, `ports: "80-100"`. +To filter two ports, use a "port1,port2" in string format. For example, `ports: "80,100"`. + +| `protocol` +| `string` +| `protocol` optionally defines a protocol to filter flows by. The available options are `TCP`, `UDP`, `ICMP`, `ICMPv6`, and `SCTP`. + +| `sampling` +| `integer` +| `sampling` sampling rate for the matched flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`. + | `sourcePorts` | `integer-or-string` | `sourcePorts` optionally defines the source ports to filter flows by. @@ -937,7 +1064,7 @@ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-co Description:: + -- -`exporters` define additional optional exporters for custom consumption or storage. +`exporters` defines additional optional exporters for custom consumption or storage. -- Type:: @@ -2575,6 +2702,18 @@ such as `GOGC` and `GOMAXPROCS` env vars. Set these values at your own risk. | `string` | `clusterName` is the name of the cluster to appear in the flows data. This is useful in a multi-cluster context. When using {product-title}, leave empty to make it automatically determined. +| `deduper` +| `object` +| `deduper` allows you to sample or drop flows identified as duplicates, in order to save on resource usage. +[Unsupported (*)]. + +| `filters` +| `array` +| `filters` lets you define custom filters to limit the amount of generated flows. +These filters provide more flexibility than the eBPF Agent filters (in `spec.agent.ebpf.flowFilter`), such as allowing to filter by Kubernetes namespace, +but with a lesser improvement in performance. +[Unsupported (*)]. + | `imagePullPolicy` | `string` | `imagePullPolicy` is the Kubernetes pull policy for the image defined above @@ -2605,13 +2744,13 @@ This setting is ignored when Kafka is disabled. | `string` | `logTypes` defines the desired record types to generate. Possible values are: + -- `Flows` (default) to export regular network flows + +- `Flows` to export regular network flows. This is the default. + -- `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" updates + +- `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" updates. + -- `EndedConversations` to generate only ended conversations events + +- `EndedConversations` to generate only ended conversations events. + -- `All` to generate both network flows and all conversations events + +- `All` to generate both network flows and all conversations events. It is not recommended due to the impact on resources footprint. + | `metrics` @@ -2700,7 +2839,7 @@ By convention, some values are forbidden. It must be greater than 1024 and diffe | `secondaryNetworks` | `array` -| Define secondary networks to be checked for resources identification. +| Defines secondary networks to be checked for resources identification. To guarantee a correct identification, indexed values must form an unique identifier across the cluster. If the same index is used by several resources, those resources might be incorrectly labeled. @@ -2773,7 +2912,7 @@ Type:: Description:: + -- -Define secondary networks to be checked for resources identification. +Defines secondary networks to be checked for resources identification. To guarantee a correct identification, indexed values must form an unique identifier across the cluster. If the same index is used by several resources, those resources might be incorrectly labeled. -- @@ -2814,6 +2953,133 @@ Fields absent from the 'k8s.v1.cni.cncf.io/network-status' annotation must not b | `string` | `name` should match the network name as visible in the pods annotation 'k8s.v1.cni.cncf.io/network-status'. +|=== +== .spec.processor.deduper +Description:: ++ +-- +`deduper` allows you to sample or drop flows identified as duplicates, in order to save on resource usage. +[Unsupported (*)]. +-- + +Type:: + `object` + + + + +[cols="1,1,1",options="header"] +|=== +| Property | Type | Description + +| `mode` +| `string` +| Set the Processor de-duplication mode. It comes in addition to the Agent-based deduplication because the Agent cannot de-duplicate same flows reported from different nodes. + + +- Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially losing some information such as the network interfaces used from peer, or network events. + + +- Use `Sample` to randomly keep only one flow on 50, which is the default, among the ones considered as duplicates. This is a compromise between dropping every duplicate or keeping every duplicate. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling values are `50`, the combined sampling is 1:2500. + + +- Use `Disabled` to turn off Processor-based de-duplication. + + + +| `sampling` +| `integer` +| `sampling` is the sampling rate when deduper `mode` is `Sample`. + +|=== +== .spec.processor.filters +Description:: ++ +-- +`filters` lets you define custom filters to limit the amount of generated flows. +These filters provide more flexibility than the eBPF Agent filters (in `spec.agent.ebpf.flowFilter`), such as allowing to filter by Kubernetes namespace, +but with a lesser improvement in performance. +[Unsupported (*)]. +-- + +Type:: + `array` + + + + +== .spec.processor.filters[] +Description:: ++ +-- +`FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions. +-- + +Type:: + `object` + + + + +[cols="1,1,1",options="header"] +|=== +| Property | Type | Description + +| `allOf` +| `array` +| `filters` is a list of matches that must be all satisfied in order to remove a flow. + +| `outputTarget` +| `string` +| If specified, these filters only target a single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted. + +| `sampling` +| `integer` +| `sampling` is an optional sampling rate to apply to this filter. + +|=== +== .spec.processor.filters[].allOf +Description:: ++ +-- +`filters` is a list of matches that must be all satisfied in order to remove a flow. +-- + +Type:: + `array` + + + + +== .spec.processor.filters[].allOf[] +Description:: ++ +-- +`FLPSingleFilter` defines the desired configuration for a single FLP-based filter. +-- + +Type:: + `object` + +Required:: + - `field` + - `matchType` + + + +[cols="1,1,1",options="header"] +|=== +| Property | Type | Description + +| `field` +| `string` +| Name of the field to filter on. +Refer to the documentation for the list of available fields: https://github.com/netobserv/network-observability-operator/blob/main/docs/flows-format.adoc. + +| `matchType` +| `string` +| Type of matching to apply. + +| `value` +| `string` +| Value to filter on. When `matchType` is `Equal` or `NotEqual`, you can use field injection with `$(SomeField)` to refer to any other field of the flow. + |=== == .spec.processor.kafkaConsumerAutoscaler Description:: @@ -2865,7 +3131,8 @@ Note that the more metrics you add, the bigger is the impact on Prometheus workl Metrics enabled by default are: `namespace_flows_total`, `node_ingress_bytes_total`, `node_egress_bytes_total`, `workload_ingress_bytes_total`, `workload_egress_bytes_total`, `namespace_drop_packets_total` (when `PacketDrop` feature is enabled), -`namespace_rtt_seconds` (when `FlowRTT` feature is enabled), `namespace_dns_latency_seconds` (when `DNSTracking` feature is enabled). +`namespace_rtt_seconds` (when `FlowRTT` feature is enabled), `namespace_dns_latency_seconds` (when `DNSTracking` feature is enabled), +`namespace_network_policy_events_total` (when `NetworkEvents` feature is enabled). More information, with full list of available metrics: https://github.com/netobserv/network-observability-operator/blob/main/docs/Metrics.md | `server` diff --git a/docs/flowmetric-flows-netobserv-io-v1alpha1.adoc b/docs/flowmetric-flows-netobserv-io-v1alpha1.adoc index 6bda2865cc..88647448dc 100644 --- a/docs/flowmetric-flows-netobserv-io-v1alpha1.adoc +++ b/docs/flowmetric-flows-netobserv-io-v1alpha1.adoc @@ -107,6 +107,11 @@ When set to `Egress`, it is equivalent to adding the regular expression filter o be used to eliminate duplicates: `Duplicate != "true"` and `FlowDirection = "0"`. Refer to the documentation for the list of available fields: https://docs.openshift.com/container-platform/latest/observability/network_observability/json-flows-format-reference.html. +| `flatten` +| `array (string)` +| `flatten` is a list of array-type fields that must be flattened, such as Interfaces or NetworkEvents. Flattened fields generate one metric per item in that field. +For instance, when flattening `Interfaces` on a bytes counter, a flow having Interfaces [br-ex, ens5] increases one counter for `br-ex` and another for `ens5`. + | `labels` | `array (string)` | `labels` is a list of fields that should be used as Prometheus labels, also known as dimensions. diff --git a/docs/flows-format.adoc b/docs/flows-format.adoc index 76f5c85d3c..d87ab52abe 100644 --- a/docs/flows-format.adoc +++ b/docs/flows-format.adoc @@ -99,6 +99,13 @@ The "Cardinality" column gives information about the implied metric cardinality | yes | fine | destination.k8s.namespace.name +| `DstK8S_NetworkName` +| string +| Destination network name +| `dst_network` +| no +| fine +| n/a | `DstK8S_OwnerName` | string | Name of the destination owner, such as Deployment name, StatefulSet name, etc. @@ -156,14 +163,14 @@ The "Cardinality" column gives information about the implied metric cardinality | fine | n/a | `Flags` -| number -| Logical OR combination of unique TCP flags comprised in the flow, as per RFC-9293, with additional custom flags to represent the following per-packet combinations: + -- SYN+ACK (0x100) + -- FIN+ACK (0x200) + -- RST+ACK (0x400) +| string[] +| List of TCP flags comprised in the flow, as per RFC-9293, with additional custom flags to represent the following per-packet combinations: + +- SYN_ACK + +- FIN_ACK + +- RST_ACK | `tcp_flags` | no -| fine +| careful | tcp.flags | `FlowDirection` | number @@ -190,7 +197,7 @@ The "Cardinality" column gives information about the implied metric cardinality | fine | icmp.type | `IfDirections` -| number +| number[] | Flow directions from the network interface observation point. Can be one of: + - 0: Ingress (interface incoming traffic) + - 1: Egress (interface outgoing traffic) @@ -199,7 +206,7 @@ The "Cardinality" column gives information about the implied metric cardinality | fine | interface.directions | `Interfaces` -| string +| string[] | Network interfaces | `interfaces` | no @@ -220,8 +227,14 @@ The "Cardinality" column gives information about the implied metric cardinality | fine | k8s.layer | `NetworkEvents` -| string -| Network events flow monitoring +| object[] +| Network events, such as network policy actions, composed of nested fields: + +- Feature (such as "acl" for network policies) + +- Type (such as an "AdminNetworkPolicy") + +- Namespace (namespace where the event applies, if any) + +- Name (name of the resource that triggered the event) + +- Action (such as "allow" or "drop") + +- Direction (Ingress or Egress) | `network_events` | no | avoid @@ -229,7 +242,7 @@ The "Cardinality" column gives information about the implied metric cardinality | `Packets` | number | Number of packets -| n/a +| `pkt_drop_cause` | no | avoid | packets @@ -275,6 +288,13 @@ The "Cardinality" column gives information about the implied metric cardinality | no | fine | protocol +| `Sampling` +| number +| Sampling rate used for this flow +| n/a +| no +| fine +| n/a | `SrcAddr` | string | Source IP address (ipv4 or ipv6) @@ -310,6 +330,13 @@ The "Cardinality" column gives information about the implied metric cardinality | yes | fine | source.k8s.namespace.name +| `SrcK8S_NetworkName` +| string +| Source network name +| `src_network` +| no +| fine +| n/a | `SrcK8S_OwnerName` | string | Name of the source owner, such as Deployment name, StatefulSet name, etc. @@ -387,6 +414,48 @@ The "Cardinality" column gives information about the implied metric cardinality | no | avoid | timereceived +| `Udns` +| string[] +| List of User Defined Networks +| `udns` +| no +| careful +| n/a +| `XlatDstAddr` +| string +| packet translation destination address +| `xlat_dst_address` +| no +| avoid +| n/a +| `XlatDstPort` +| number +| packet translation destination port +| `xlat_dst_port` +| no +| careful +| n/a +| `XlatSrcAddr` +| string +| packet translation source address +| `xlat_src_address` +| no +| avoid +| n/a +| `XlatSrcPort` +| number +| packet translation source port +| `xlat_src_port` +| no +| careful +| n/a +| `ZoneId` +| number +| packet translation zone id +| `xlat_zone_id` +| no +| avoid +| n/a | `_HashId` | string | In conversation tracking, the conversation identifier @@ -396,7 +465,7 @@ The "Cardinality" column gives information about the implied metric cardinality | n/a | `_RecordType` | string -| Type of record: 'flowLog' for regular flow logs, or 'newConnection', 'heartbeat', 'endConnection' for conversation tracking +| Type of record: `flowLog` for regular flow logs, or `newConnection`, `heartbeat`, `endConnection` for conversation tracking | `type` | yes | fine diff --git a/hack/asciidoc-flows-gen.sh b/hack/asciidoc-flows-gen.sh index 95443f7020..7019f44149 100755 --- a/hack/asciidoc-flows-gen.sh +++ b/hack/asciidoc-flows-gen.sh @@ -63,9 +63,9 @@ for i in $(seq 0 $(( $nbfields-1 )) ); do echo -e "| $otel" >> $ADOC done +echo -e '|===' >> $ADOC + if [[ $errors != "" ]]; then echo -e $errors exit 1 fi - -echo -e '|===' >> $ADOC diff --git a/pkg/helper/cardinality/cardinality.json b/pkg/helper/cardinality/cardinality.json index 1f19d555e7..84088a788c 100644 --- a/pkg/helper/cardinality/cardinality.json +++ b/pkg/helper/cardinality/cardinality.json @@ -13,6 +13,7 @@ "SrcK8S_HostIP": "fine", "SrcK8S_HostName": "fine", "SrcK8S_Zone": "fine", + "SrcK8S_NetworkName": "fine", "SrcSubnetLabel": "fine", "DstK8S_Name": "careful", "DstK8S_Type": "fine", @@ -25,6 +26,7 @@ "DstK8S_HostIP": "fine", "DstK8S_HostName": "fine", "DstK8S_Zone": "fine", + "DstK8S_NetworkName": "fine", "DstSubnetLabel": "fine", "K8S_FlowLayer": "fine", "Proto": "fine", @@ -64,6 +66,7 @@ "XlatIcmpId": "avoid", "XlatDstPort": "careful", "XlatDstAddr": "avoid", + "Udns": "careful", "_RecordType": "fine", "_HashId": "avoid" }
field string - Name of the field to filter on -Refer to the documentation for the list of available fields: https://docs.openshift.com/container-platform/latest/observability/network_observability/json-flows-format-reference.html.
+ Name of the field to filter on. +Refer to the documentation for the list of available fields: https://github.com/netobserv/network-observability-operator/blob/main/docs/flows-format.adoc.
true
matchType enum - Type of matching to apply
+ Type of matching to apply.

Enum: Equal, NotEqual, Presence, Absence, MatchRegex, NotMatchRegex
Default: Equal
@@ -18231,7 +18236,7 @@ If the namespace is different, the config map or the secret is copied so that it
conditions []object - `conditions` represent the latest available observations of an object's state
+ `conditions` represents the latest available observations of an object's state
true
flatten []string - `flatten` is a list of list-type fields that must be flattened, such as Interfaces and NetworkEvents. Flattened fields generate one metric per item in that field. + `flatten` is a list of array-type fields that must be flattened, such as Interfaces or NetworkEvents. Flattened fields generate one metric per item in that field. For instance, when flattening `Interfaces` on a bytes counter, a flow having Interfaces [br-ex, ens5] increases one counter for `br-ex` and another for `ens5`.
false