diff --git a/Makefile b/Makefile
index 4a60e3f2a8..89b54ae83d 100644
--- a/Makefile
+++ b/Makefile
@@ -12,17 +12,17 @@ IMAGE_ORG ?= $(USER)
REPO ?= quay.io/$(IMAGE_ORG)
# Component versions to use in bundle / release (do not use $VERSION for that)
-PREVIOUS_VERSION ?= v1.6.0-community
+PREVIOUS_VERSION ?= v1.6.1-community
-BUNDLE_VERSION ?= 1.6.1-community
+BUNDLE_VERSION ?= 1.8.0-community
#File based catalog
-FBC_VERSION ?= 1.6.1-community
+FBC_VERSION ?= 1.8.0-community
# console plugin
-export PLG_VERSION ?= v1.6.1-community
+export PLG_VERSION ?= v1.8.0-community
# flowlogs-pipeline
-export FLP_VERSION ?= v1.6.1-community
+export FLP_VERSION ?= v1.8.0-community
# eBPF agent
-export BPF_VERSION ?= v1.6.1-community
+export BPF_VERSION ?= v1.8.0-community
# Allows building bundles in Mac replacing BSD 'sed' command by GNU-compatible 'gsed'
ifeq (,$(shell which gsed 2>/dev/null))
diff --git a/RELEASE.md b/RELEASE.md
index 07d02f0d9c..508f1e4bc7 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -11,7 +11,7 @@ To release them, a tag in the format "v1.6.0-community" or "v1.6.0-crc0" must be
E.g:
```bash
-version="v1.6.1-community"
+version="v1.8.0-community"
git tag -a "$version" -m "$version"
git push upstream --tags
```
@@ -36,7 +36,7 @@ Edit the [Makefile](./Makefile) to update `PREVIOUS_VERSION`, `BUNDLE_VERSION`,
make update-bundle
# Set desired operator version - CAREFUL, no leading "v" here
-version="1.6.1-community"
+version="1.8.0-community"
vv=v$version
test_branch=test-$vv
@@ -84,7 +84,7 @@ Click on "Publish release".
Before publishing, we should check that upgrading the operator from a previous version isn't broken. We can use `operator-sdk` for that:
```bash
-previous=v1.6.0-community
+previous=v1.6.1-community
bin/operator-sdk run bundle quay.io/netobserv/network-observability-operator-bundle:$previous --timeout 5m
PORT_FWD=false make deploy-loki deploy-sample-cr
bin/operator-sdk run bundle-upgrade quay.io/netobserv/network-observability-operator-bundle:$vv --timeout 5m
diff --git a/apis/flowcollector/v1beta1/flowcollector_types.go b/apis/flowcollector/v1beta1/flowcollector_types.go
index 78e45fb195..12624823ec 100644
--- a/apis/flowcollector/v1beta1/flowcollector_types.go
+++ b/apis/flowcollector/v1beta1/flowcollector_types.go
@@ -154,7 +154,7 @@ type FlowCollectorIPFIX struct {
// - `NetworkEvents`, to track Network events.
// - `PacketTranslation`, to enrich flows with packets translation information.
// - `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
-// - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+// - `UDNMapping`, to enable interfaces mapping to udn [Developer Preview].
// +kubebuilder:validation:Enum:="PacketDrop";"DNSTracking";"FlowRTT";"NetworkEvents";"PacketTranslation";"EbpfManager";"UDNMapping"
type AgentFeature string
@@ -272,13 +272,15 @@ type EBPFFlowFilter struct {
// Set `enable` to `true` to enable the eBPF flow filtering feature.
Enable *bool `json:"enable,omitempty"`
- // [deprecated (*)] this setting is not used anymore.
+ // [deprecated (*)] this setting is not used anymore. It is replaced with the `rules` list.
EBPFFlowFilterRule `json:",inline"`
- // `flowFilterRules` defines a list of ebpf agent flow filtering rules
+ // `rules` defines a list of filtering rules on the eBPF Agents.
+ // When filtering is enabled, by default, flows that don't match any rule are rejected.
+ // To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules.
// +kubebuilder:validation:MinItems:=1
// +kubebuilder:validation:MaxItems:=16
- FlowFilterRules []EBPFFlowFilterRule `json:"rules,omitempty"`
+ Rules []EBPFFlowFilterRule `json:"rules,omitempty"`
}
// `FlowCollectorEBPF` defines a FlowCollector that uses eBPF to collect the flows information
@@ -364,7 +366,7 @@ type FlowCollectorEBPF struct {
// the kernel debug filesystem, so the eBPF pod has to run as privileged.
// - `PacketTranslation`: enable enriching flows with packet's translation information.
// - `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- // - `UDNMapping`, to enable interfaces mappind to udn.
+ // - `UDNMapping`, to enable interfaces mapping to udn.
// +optional
Features []AgentFeature `json:"features,omitempty"`
diff --git a/apis/flowcollector/v1beta1/zz_generated.conversion.go b/apis/flowcollector/v1beta1/zz_generated.conversion.go
index 96f30bc6ba..aa0ccb4a14 100644
--- a/apis/flowcollector/v1beta1/zz_generated.conversion.go
+++ b/apis/flowcollector/v1beta1/zz_generated.conversion.go
@@ -530,7 +530,7 @@ func autoConvert_v1beta1_EBPFFlowFilter_To_v1beta2_EBPFFlowFilter(in *EBPFFlowFi
if err := Convert_v1beta1_EBPFFlowFilterRule_To_v1beta2_EBPFFlowFilterRule(&in.EBPFFlowFilterRule, &out.EBPFFlowFilterRule, s); err != nil {
return err
}
- out.FlowFilterRules = *(*[]v1beta2.EBPFFlowFilterRule)(unsafe.Pointer(&in.FlowFilterRules))
+ out.Rules = *(*[]v1beta2.EBPFFlowFilterRule)(unsafe.Pointer(&in.Rules))
return nil
}
@@ -544,7 +544,7 @@ func autoConvert_v1beta2_EBPFFlowFilter_To_v1beta1_EBPFFlowFilter(in *v1beta2.EB
if err := Convert_v1beta2_EBPFFlowFilterRule_To_v1beta1_EBPFFlowFilterRule(&in.EBPFFlowFilterRule, &out.EBPFFlowFilterRule, s); err != nil {
return err
}
- out.FlowFilterRules = *(*[]EBPFFlowFilterRule)(unsafe.Pointer(&in.FlowFilterRules))
+ out.Rules = *(*[]EBPFFlowFilterRule)(unsafe.Pointer(&in.Rules))
return nil
}
diff --git a/apis/flowcollector/v1beta1/zz_generated.deepcopy.go b/apis/flowcollector/v1beta1/zz_generated.deepcopy.go
index e79aca3d57..50bbbdbc53 100644
--- a/apis/flowcollector/v1beta1/zz_generated.deepcopy.go
+++ b/apis/flowcollector/v1beta1/zz_generated.deepcopy.go
@@ -131,8 +131,8 @@ func (in *EBPFFlowFilter) DeepCopyInto(out *EBPFFlowFilter) {
**out = **in
}
in.EBPFFlowFilterRule.DeepCopyInto(&out.EBPFFlowFilterRule)
- if in.FlowFilterRules != nil {
- in, out := &in.FlowFilterRules, &out.FlowFilterRules
+ if in.Rules != nil {
+ in, out := &in.Rules, &out.Rules
*out = make([]EBPFFlowFilterRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
diff --git a/apis/flowcollector/v1beta2/flowcollector_types.go b/apis/flowcollector/v1beta2/flowcollector_types.go
index 16a386539f..885170817d 100644
--- a/apis/flowcollector/v1beta2/flowcollector_types.go
+++ b/apis/flowcollector/v1beta2/flowcollector_types.go
@@ -80,7 +80,7 @@ type FlowCollectorSpec struct {
// +optional
Kafka FlowCollectorKafka `json:"kafka,omitempty"`
- // `exporters` define additional optional exporters for custom consumption or storage.
+ // `exporters` defines additional optional exporters for custom consumption or storage.
// +optional
// +k8s:conversion-gen=false
Exporters []*FlowCollectorExporter `json:"exporters"`
@@ -174,10 +174,10 @@ type FlowCollectorIPFIX struct {
// - `PacketDrop`, to track packet drops.
// - `DNSTracking`, to track specific information on DNS traffic.
// - `FlowRTT`, to track TCP latency.
-// - `NetworkEvents`, to track Network events [Developer Preview].
-// - `PacketTranslation`, to enrich flows with packets translation information.
-// - `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
-// - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+// - `NetworkEvents`, to track network events [Technology Preview].
+// - `PacketTranslation`, to enrich flows with packets translation information, such as Service NAT.
+// - `EbpfManager`, to enable using eBPF Manager to manage NetObserv eBPF programs. [Unsupported (*)].
+// - `UDNMapping`, to enable interfaces mapping to UDN. [Unsupported (*)].
// +kubebuilder:validation:Enum:="PacketDrop";"DNSTracking";"FlowRTT";"NetworkEvents";"PacketTranslation";"EbpfManager";"UDNMapping"
type AgentFeature string
@@ -285,7 +285,7 @@ type EBPFFlowFilterRule struct {
// +optional
PktDrops *bool `json:"pktDrops,omitempty"`
- // `sampling` sampling rate for the matched flow
+ // `sampling` sampling rate for the matched flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`.
// +optional
Sampling *uint32 `json:"sampling,omitempty"`
}
@@ -295,13 +295,16 @@ type EBPFFlowFilter struct {
// Set `enable` to `true` to enable the eBPF flow filtering feature.
Enable *bool `json:"enable,omitempty"`
- // [deprecated (*)] this setting is not used anymore.
+ // [Deprecated (*)]. This setting is not used anymore. It is replaced with the `rules` list.
EBPFFlowFilterRule `json:",inline"`
- // `flowFilterRules` defines a list of ebpf agent flow filtering rules
+ // `rules` defines a list of filtering rules on the eBPF Agents.
+ // When filtering is enabled, by default, flows that don't match any rule are rejected.
+ // To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules.
+ // [Unsupported (*)].
// +kubebuilder:validation:MinItems:=1
// +kubebuilder:validation:MaxItems:=16
- FlowFilterRules []EBPFFlowFilterRule `json:"rules,omitempty"`
+ Rules []EBPFFlowFilterRule `json:"rules,omitempty"`
}
// `FlowCollectorEBPF` defines a FlowCollector that uses eBPF to collect the flows information
@@ -378,22 +381,20 @@ type FlowCollectorEBPF struct {
Advanced *AdvancedAgentConfig `json:"advanced,omitempty"`
// List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- // - `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting
- // the kernel debug filesystem, so the eBPF agent pods have to run as privileged.
+ // - `PacketDrop`: Enable the packets drop flows logging feature. This feature requires mounting
+ // the kernel debug filesystem, so the eBPF agent pods must run as privileged.
// If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- // - `DNSTracking`: enable the DNS tracking feature.
- // - `FlowRTT`: enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
- // - `NetworkEvents`: enable the network events monitoring feature, such as correlating flows and network policies.
- // This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged.
+ // - `DNSTracking`: Enable the DNS tracking feature.
+ // - `FlowRTT`: Enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
+ // - `NetworkEvents`: Enable the network events monitoring feature, such as correlating flows and network policies.
+ // This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged.
// It requires using the OVN-Kubernetes network plugin with the Observability feature.
- // IMPORTANT: This feature is available as a Developer Preview.
- // - `PacketTranslation`: enable enriching flows with packet's translation information.
- // - `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- // IMPORTANT: This feature is available as a Developer Preview.
- // - `UDNMapping`, to enable interfaces mappind to udn.
- // This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged.
+ // IMPORTANT: This feature is available as a Technology Preview.
+ // - `PacketTranslation`: Enable enriching flows with packet translation information, such as Service NAT.
+ // - `EbpfManager`: [Unsupported (*)]. Use eBPF Manager to manage NetObserv eBPF programs. Pre-requisite: the eBPF Manager operator (or upstream bpfman operator) must be installed.
+ // - `UDNMapping`: [Unsupported (*)]. Enable interfaces mapping to User Defined Networks (UDN).
+ // This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged.
// It requires using the OVN-Kubernetes network plugin with the Observability feature.
- // IMPORTANT: This feature is available as a Developer Preview.
// +optional
Features []AgentFeature `json:"features,omitempty"`
@@ -647,7 +648,7 @@ type FlowCollectorFLP struct {
KafkaConsumerBatchSize int `json:"kafkaConsumerBatchSize"`
// `logTypes` defines the desired record types to generate. Possible values are:
- // - `Flows` (default) to export regular network flows.
+ // - `Flows` to export regular network flows. This is the default.
// - `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" updates.
// - `EndedConversations` to generate only ended conversations events.
// - `All` to generate both network flows and all conversations events. It is not recommended due to the impact on resources footprint.
@@ -676,13 +677,15 @@ type FlowCollectorFLP struct {
SubnetLabels SubnetLabels `json:"subnetLabels,omitempty"`
//+optional
- // `deduper` allows to sample or drop flows identified as duplicates, in order to save on resource usage.
- // IMPORTANT: This feature is available as a Developer Preview.
+ // `deduper` allows you to sample or drop flows identified as duplicates, in order to save on resource usage.
+ // [Unsupported (*)].
Deduper *FLPDeduper `json:"deduper,omitempty"`
// +optional
- // `filters` let you define custom filters to limit the amount of generated flows.
- // IMPORTANT: This feature is available as a Developer Preview.
+ // `filters` lets you define custom filters to limit the amount of generated flows.
+ // These filters provide more flexibility than the eBPF Agent filters (in `spec.agent.ebpf.flowFilter`), such as allowing to filter by Kubernetes namespace,
+ // but with a lesser improvement in performance.
+ // [Unsupported (*)].
Filters []FLPFilterSet `json:"filters"`
// `advanced` allows setting some aspects of the internal configuration of the flow processor.
@@ -700,11 +703,11 @@ const (
FLPDeduperSample FLPDeduperMode = "Sample"
)
-// `FLPDeduper` defines the desired configuration for FLP-based deduper
+// `FLPDeduper` defines the desired configuration for FLP-based deduper.
type FLPDeduper struct {
// Set the Processor de-duplication mode. It comes in addition to the Agent-based deduplication because the Agent cannot de-duplicate same flows reported from different nodes.
- // - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially loosing some information such as the network interfaces used from peer, or network events.
- // - Use `Sample` to randomly keep only 1 flow on 50 (by default) among the ones considered as duplicates. This is a compromise between dropping every duplicates or keeping every duplicates. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling are 50, the combined sampling is 1:2500.
+ // - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially losing some information such as the network interfaces used from peer, or network events.
+ // - Use `Sample` to randomly keep only one flow on 50, which is the default, among the ones considered as duplicates. This is a compromise between dropping every duplicate or keeping every duplicate. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling values are `50`, the combined sampling is 1:2500.
// - Use `Disabled` to turn off Processor-based de-duplication.
// +kubebuilder:validation:Enum:="Disabled";"Drop";"Sample"
// +kubebuilder:default:=Disabled
@@ -732,13 +735,13 @@ const (
FLPFilterTargetExporters FLPFilterTarget = "Exporters"
)
-// `FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions
+// `FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions.
type FLPFilterSet struct {
// `filters` is a list of matches that must be all satisfied in order to remove a flow.
// +optional
AllOf []FLPSingleFilter `json:"allOf"`
- // If specified, this filters only target a single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted.
+ // If specified, these filters only target a single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted.
// +optional
// +kubebuilder:validation:Enum:="";"Loki";"Metrics";"Exporters"
OutputTarget FLPFilterTarget `json:"outputTarget,omitempty"`
@@ -749,15 +752,15 @@ type FLPFilterSet struct {
Sampling int32 `json:"sampling,omitempty"`
}
-// `FLPSingleFilter` defines the desired configuration for a single FLP-based filter
+// `FLPSingleFilter` defines the desired configuration for a single FLP-based filter.
type FLPSingleFilter struct {
- // Type of matching to apply
+ // Type of matching to apply.
// +kubebuilder:validation:Enum:="Equal";"NotEqual";"Presence";"Absence";"MatchRegex";"NotMatchRegex"
// +kubebuilder:default:="Equal"
MatchType FLPFilterMatch `json:"matchType"`
- // Name of the field to filter on
- // Refer to the documentation for the list of available fields: https://docs.openshift.com/container-platform/latest/observability/network_observability/json-flows-format-reference.html.
+ // Name of the field to filter on.
+ // Refer to the documentation for the list of available fields: https://github.com/netobserv/network-observability-operator/blob/main/docs/flows-format.adoc.
// +required
Field string `json:"field"`
@@ -1327,7 +1330,7 @@ type AdvancedProcessorConfig struct {
// +optional
Scheduling *SchedulingConfig `json:"scheduling,omitempty"`
- // Define secondary networks to be checked for resources identification.
+ // Defines secondary networks to be checked for resources identification.
// To guarantee a correct identification, indexed values must form an unique identifier across the cluster.
// If the same index is used by several resources, those resources might be incorrectly labeled.
// +optional
@@ -1417,7 +1420,7 @@ type AdvancedPluginConfig struct {
Scheduling *SchedulingConfig `json:"scheduling,omitempty"`
}
-// `SubnetLabels` allows to define custom labels on subnets and IPs or to enable automatic labelling of recognized subnets in OpenShift.
+// `SubnetLabels` allows you to define custom labels on subnets and IPs or to enable automatic labelling of recognized subnets in OpenShift.
type SubnetLabels struct {
// `openShiftAutoDetect` allows, when set to `true`, to detect automatically the machines, pods and services subnets based on the
// OpenShift install configuration and the Cluster Network Operator configuration. Indirectly, this is a way to accurately detect
@@ -1475,7 +1478,7 @@ type FlowCollectorExporter struct {
type FlowCollectorStatus struct {
// Important: Run "make" to regenerate code after modifying this file
- // `conditions` represent the latest available observations of an object's state
+ // `conditions` represents the latest available observations of an object's state
Conditions []metav1.Condition `json:"conditions"`
// Namespace where console plugin and flowlogs-pipeline have been deployed.
diff --git a/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go b/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go
index 3d9e66c828..480e0fc4a4 100644
--- a/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go
+++ b/apis/flowcollector/v1beta2/flowcollector_validation_webhook.go
@@ -119,8 +119,8 @@ func (r *FlowCollector) validateAgent(_ context.Context, fc *FlowCollectorSpec)
var errs []error
if fc.Agent.EBPF.FlowFilter != nil && fc.Agent.EBPF.FlowFilter.Enable != nil && *fc.Agent.EBPF.FlowFilter.Enable {
m := make(map[string]bool)
- for i := range fc.Agent.EBPF.FlowFilter.FlowFilterRules {
- rule := fc.Agent.EBPF.FlowFilter.FlowFilterRules[i]
+ for i := range fc.Agent.EBPF.FlowFilter.Rules {
+ rule := fc.Agent.EBPF.FlowFilter.Rules[i]
key := rule.CIDR + "-" + rule.PeerCIDR
if found := m[key]; found {
errs = append(errs, fmt.Errorf("flow filter rule CIDR and PeerCIDR %s already exists",
diff --git a/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go b/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go
index 0ff2dac08a..02c1d3993b 100644
--- a/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go
+++ b/apis/flowcollector/v1beta2/flowcollector_validation_webhook_test.go
@@ -44,7 +44,7 @@ func TestValidateAgent(t *testing.T) {
Sampling: ptr.To(int32(100)),
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- FlowFilterRules: []EBPFFlowFilterRule{
+ Rules: []EBPFFlowFilterRule{
{
Action: "Accept",
CIDR: "0.0.0.0/0",
@@ -73,7 +73,7 @@ func TestValidateAgent(t *testing.T) {
Sampling: ptr.To(int32(100)),
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- FlowFilterRules: []EBPFFlowFilterRule{
+ Rules: []EBPFFlowFilterRule{
{
Action: "Accept",
CIDR: "0.0.0.0/0",
@@ -181,7 +181,7 @@ func TestValidateAgent(t *testing.T) {
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- FlowFilterRules: []EBPFFlowFilterRule{
+ Rules: []EBPFFlowFilterRule{
{
Action: "Accept",
CIDR: "0.0.0.0/0",
@@ -208,7 +208,7 @@ func TestValidateAgent(t *testing.T) {
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- FlowFilterRules: []EBPFFlowFilterRule{
+ Rules: []EBPFFlowFilterRule{
{
Ports: intstr.FromString("abcd"),
},
@@ -232,7 +232,7 @@ func TestValidateAgent(t *testing.T) {
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- FlowFilterRules: []EBPFFlowFilterRule{
+ Rules: []EBPFFlowFilterRule{
{
Ports: intstr.FromString("80-255"),
},
@@ -255,7 +255,7 @@ func TestValidateAgent(t *testing.T) {
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- FlowFilterRules: []EBPFFlowFilterRule{
+ Rules: []EBPFFlowFilterRule{
{
Ports: intstr.FromString("255-80"),
},
@@ -279,7 +279,7 @@ func TestValidateAgent(t *testing.T) {
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- FlowFilterRules: []EBPFFlowFilterRule{
+ Rules: []EBPFFlowFilterRule{
{
Ports: intstr.FromString("80-?"),
},
@@ -303,7 +303,7 @@ func TestValidateAgent(t *testing.T) {
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- FlowFilterRules: []EBPFFlowFilterRule{
+ Rules: []EBPFFlowFilterRule{
{
Ports: intstr.FromString("255,80"),
},
@@ -326,7 +326,7 @@ func TestValidateAgent(t *testing.T) {
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- FlowFilterRules: []EBPFFlowFilterRule{
+ Rules: []EBPFFlowFilterRule{
{
Ports: intstr.FromString("80,100,250"),
},
@@ -350,7 +350,7 @@ func TestValidateAgent(t *testing.T) {
EBPF: FlowCollectorEBPF{
FlowFilter: &EBPFFlowFilter{
Enable: ptr.To(true),
- FlowFilterRules: []EBPFFlowFilterRule{
+ Rules: []EBPFFlowFilterRule{
{
CIDR: "1.1.1.1",
},
diff --git a/apis/flowcollector/v1beta2/zz_generated.deepcopy.go b/apis/flowcollector/v1beta2/zz_generated.deepcopy.go
index 6461b82b21..3960ab41f9 100644
--- a/apis/flowcollector/v1beta2/zz_generated.deepcopy.go
+++ b/apis/flowcollector/v1beta2/zz_generated.deepcopy.go
@@ -290,8 +290,8 @@ func (in *EBPFFlowFilter) DeepCopyInto(out *EBPFFlowFilter) {
**out = **in
}
in.EBPFFlowFilterRule.DeepCopyInto(&out.EBPFFlowFilterRule)
- if in.FlowFilterRules != nil {
- in, out := &in.FlowFilterRules, &out.FlowFilterRules
+ if in.Rules != nil {
+ in, out := &in.Rules, &out.Rules
*out = make([]EBPFFlowFilterRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
diff --git a/apis/flowmetrics/v1alpha1/flowmetric_types.go b/apis/flowmetrics/v1alpha1/flowmetric_types.go
index 6b8ce9a675..dee58a025a 100644
--- a/apis/flowmetrics/v1alpha1/flowmetric_types.go
+++ b/apis/flowmetrics/v1alpha1/flowmetric_types.go
@@ -93,7 +93,7 @@ type FlowMetricSpec struct {
// +optional
Labels []string `json:"labels"`
- // `flatten` is a list of list-type fields that must be flattened, such as Interfaces and NetworkEvents. Flattened fields generate one metric per item in that field.
+ // `flatten` is a list of array-type fields that must be flattened, such as Interfaces or NetworkEvents. Flattened fields generate one metric per item in that field.
// For instance, when flattening `Interfaces` on a bytes counter, a flow having Interfaces [br-ex, ens5] increases one counter for `br-ex` and another for `ens5`.
// +optional
Flatten []string `json:"flatten"`
diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml
index 1d0d19fd19..46e2164bae 100644
--- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml
+++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml
@@ -137,7 +137,7 @@ spec:
the kernel debug filesystem, so the eBPF pod has to run as privileged.
- `PacketTranslation`: enable enriching flows with packet's translation information.
- `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- - `UDNMapping`, to enable interfaces mappind to udn.
+ - `UDNMapping`, to enable interfaces mapping to udn.
items:
description: |-
Agent feature, can be one of:
@@ -147,7 +147,7 @@ spec:
- `NetworkEvents`, to track Network events.
- `PacketTranslation`, to enrich flows with packets translation information.
- `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
- - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+ - `UDNMapping`, to enable interfaces mapping to udn [Developer Preview].
enum:
- PacketDrop
- DNSTracking
@@ -242,8 +242,10 @@ spec:
- SCTP
type: string
rules:
- description: '`flowFilterRules` defines a list of ebpf
- agent flow filtering rules'
+ description: |-
+ `rules` defines a list of filtering rules on the eBPF Agents.
+ When filtering is enabled, by default, flows that don't match any rule are rejected.
+ To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules.
items:
description: '`EBPFFlowFilterRule` defines the desired
eBPF agent configuration regarding flow filtering
@@ -3950,32 +3952,30 @@ spec:
features:
description: |-
List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- - `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting
- the kernel debug filesystem, so the eBPF agent pods have to run as privileged.
+ - `PacketDrop`: Enable the packets drop flows logging feature. This feature requires mounting
+ the kernel debug filesystem, so the eBPF agent pods must run as privileged.
If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- - `DNSTracking`: enable the DNS tracking feature.
- - `FlowRTT`: enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
- - `NetworkEvents`: enable the network events monitoring feature, such as correlating flows and network policies.
- This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged.
+ - `DNSTracking`: Enable the DNS tracking feature.
+ - `FlowRTT`: Enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
+ - `NetworkEvents`: Enable the network events monitoring feature, such as correlating flows and network policies.
+ This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged.
It requires using the OVN-Kubernetes network plugin with the Observability feature.
- IMPORTANT: This feature is available as a Developer Preview.
- - `PacketTranslation`: enable enriching flows with packet's translation information.
- - `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- IMPORTANT: This feature is available as a Developer Preview.
- - `UDNMapping`, to enable interfaces mappind to udn.
- This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged.
+ IMPORTANT: This feature is available as a Technology Preview.
+ - `PacketTranslation`: Enable enriching flows with packet translation information, such as Service NAT.
+ - `EbpfManager`: [Unsupported (*)]. Use eBPF Manager to manage NetObserv eBPF programs. Pre-requisite: the eBPF Manager operator (or upstream bpfman operator) must be installed.
+ - `UDNMapping`: [Unsupported (*)]. Enable interfaces mapping to User Defined Networks (UDN).
+ This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged.
It requires using the OVN-Kubernetes network plugin with the Observability feature.
- IMPORTANT: This feature is available as a Developer Preview.
items:
description: |-
Agent feature, can be one of:
- `PacketDrop`, to track packet drops.
- `DNSTracking`, to track specific information on DNS traffic.
- `FlowRTT`, to track TCP latency.
- - `NetworkEvents`, to track Network events [Developer Preview].
- - `PacketTranslation`, to enrich flows with packets translation information.
- - `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
- - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+ - `NetworkEvents`, to track network events [Technology Preview].
+ - `PacketTranslation`, to enrich flows with packets translation information, such as Service NAT.
+ - `EbpfManager`, to enable using eBPF Manager to manage NetObserv eBPF programs. [Unsupported (*)].
+ - `UDNMapping`, to enable interfaces mapping to UDN. [Unsupported (*)].
enum:
- PacketDrop
- DNSTracking
@@ -4070,8 +4070,11 @@ spec:
- SCTP
type: string
rules:
- description: '`flowFilterRules` defines a list of ebpf
- agent flow filtering rules'
+ description: |-
+ `rules` defines a list of filtering rules on the eBPF Agents.
+ When filtering is enabled, by default, flows that don't match any rule are rejected.
+ To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules.
+ [Unsupported (*)].
items:
description: '`EBPFFlowFilterRule` defines the desired
eBPF agent configuration regarding flow filtering
@@ -4155,7 +4158,8 @@ spec:
type: string
sampling:
description: '`sampling` sampling rate for the matched
- flow'
+ flows, overriding the global sampling defined
+ at `spec.agent.ebpf.sampling`.'
format: int32
type: integer
sourcePorts:
@@ -4190,7 +4194,7 @@ spec:
type: array
sampling:
description: '`sampling` sampling rate for the matched
- flow'
+ flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`.'
format: int32
type: integer
sourcePorts:
@@ -6052,7 +6056,7 @@ spec:
- Kafka
type: string
exporters:
- description: '`exporters` define additional optional exporters for
+ description: '`exporters` defines additional optional exporters for
custom consumption or storage.'
items:
description: '`FlowCollectorExporter` defines an additional exporter
@@ -8155,7 +8159,7 @@ spec:
type: object
secondaryNetworks:
description: |-
- Define secondary networks to be checked for resources identification.
+ Defines secondary networks to be checked for resources identification.
To guarantee a correct identification, indexed values must form an unique identifier across the cluster.
If the same index is used by several resources, those resources might be incorrectly labeled.
items:
@@ -8193,15 +8197,15 @@ spec:
type: string
deduper:
description: |-
- `deduper` allows to sample or drop flows identified as duplicates, in order to save on resource usage.
- IMPORTANT: This feature is available as a Developer Preview.
+ `deduper` allows you to sample or drop flows identified as duplicates, in order to save on resource usage.
+ [Unsupported (*)].
properties:
mode:
default: Disabled
description: |-
Set the Processor de-duplication mode. It comes in addition to the Agent-based deduplication because the Agent cannot de-duplicate same flows reported from different nodes.
- - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially loosing some information such as the network interfaces used from peer, or network events.
- - Use `Sample` to randomly keep only 1 flow on 50 (by default) among the ones considered as duplicates. This is a compromise between dropping every duplicates or keeping every duplicates. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling are 50, the combined sampling is 1:2500.
+ - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially losing some information such as the network interfaces used from peer, or network events.
+ - Use `Sample` to randomly keep only one flow on 50, which is the default, among the ones considered as duplicates. This is a compromise between dropping every duplicate or keeping every duplicate. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling values are `50`, the combined sampling is 1:2500.
- Use `Disabled` to turn off Processor-based de-duplication.
enum:
- Disabled
@@ -8218,27 +8222,29 @@ spec:
type: object
filters:
description: |-
- `filters` let you define custom filters to limit the amount of generated flows.
- IMPORTANT: This feature is available as a Developer Preview.
+ `filters` lets you define custom filters to limit the amount of generated flows.
+ These filters provide more flexibility than the eBPF Agent filters (in `spec.agent.ebpf.flowFilter`), such as allowing to filter by Kubernetes namespace,
+ but with a lesser improvement in performance.
+ [Unsupported (*)].
items:
description: '`FLPFilterSet` defines the desired configuration
- for FLP-based filtering satisfying all conditions'
+ for FLP-based filtering satisfying all conditions.'
properties:
allOf:
description: '`filters` is a list of matches that must be
all satisfied in order to remove a flow.'
items:
description: '`FLPSingleFilter` defines the desired configuration
- for a single FLP-based filter'
+ for a single FLP-based filter.'
properties:
field:
description: |-
- Name of the field to filter on
- Refer to the documentation for the list of available fields: https://docs.openshift.com/container-platform/latest/observability/network_observability/json-flows-format-reference.html.
+ Name of the field to filter on.
+ Refer to the documentation for the list of available fields: https://github.com/netobserv/network-observability-operator/blob/main/docs/flows-format.adoc.
type: string
matchType:
default: Equal
- description: Type of matching to apply
+ description: Type of matching to apply.
enum:
- Equal
- NotEqual
@@ -8259,8 +8265,8 @@ spec:
type: object
type: array
outputTarget:
- description: 'If specified, this filters only target a single
- output: `Loki`, `Metrics` or `Exporters`. By default,
+ description: 'If specified, these filters only target a
+ single output: `Loki`, `Metrics` or `Exporters`. By default,
all outputs are targeted.'
enum:
- ""
@@ -8623,7 +8629,7 @@ spec:
default: Flows
description: |-
`logTypes` defines the desired record types to generate. Possible values are:
- - `Flows` (default) to export regular network flows.
+ - `Flows` to export regular network flows. This is the default.
- `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" updates.
- `EndedConversations` to generate only ended conversations events.
- `All` to generate both network flows and all conversations events. It is not recommended due to the impact on resources footprint.
@@ -9041,7 +9047,7 @@ spec:
description: '`FlowCollectorStatus` defines the observed state of FlowCollector'
properties:
conditions:
- description: '`conditions` represent the latest available observations
+ description: '`conditions` represents the latest available observations
of an object''s state'
items:
description: Condition contains details for one aspect of the current
diff --git a/bundle/manifests/flows.netobserv.io_flowmetrics.yaml b/bundle/manifests/flows.netobserv.io_flowmetrics.yaml
index ad3b13f7d6..818da1b670 100644
--- a/bundle/manifests/flows.netobserv.io_flowmetrics.yaml
+++ b/bundle/manifests/flows.netobserv.io_flowmetrics.yaml
@@ -198,7 +198,7 @@ spec:
type: array
flatten:
description: |-
- `flatten` is a list of list-type fields that must be flattened, such as Interfaces and NetworkEvents. Flattened fields generate one metric per item in that field.
+ `flatten` is a list of array-type fields that must be flattened, such as Interfaces or NetworkEvents. Flattened fields generate one metric per item in that field.
For instance, when flattening `Interfaces` on a bytes counter, a flow having Interfaces [br-ex, ens5] increases one counter for `br-ex` and another for `ens5`.
items:
type: string
diff --git a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml
index c49e5cea10..cf471b1632 100644
--- a/bundle/manifests/netobserv-operator.clusterserviceversion.yaml
+++ b/bundle/manifests/netobserv-operator.clusterserviceversion.yaml
@@ -434,7 +434,7 @@ metadata:
capabilities: Seamless Upgrades
categories: Monitoring, Networking
console.openshift.io/plugins: '["netobserv-plugin"]'
- containerImage: quay.io/netobserv/network-observability-operator:1.6.1-community
+ containerImage: quay.io/netobserv/network-observability-operator:1.8.0-community
createdAt: ':created-at:'
description: Network flows collector and monitoring solution
operatorframework.io/initialization-resource: '{"apiVersion":"flows.netobserv.io/v1beta2",
@@ -450,7 +450,7 @@ metadata:
operatorframework.io/arch.ppc64le: supported
operatorframework.io/arch.s390x: supported
operatorframework.io/os.linux: supported
- name: netobserv-operator.v1.6.1-community
+ name: netobserv-operator.v1.8.0-community
namespace: placeholder
spec:
apiservicedefinitions: {}
@@ -890,7 +890,7 @@ spec:
## Configuration
- The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/config/samples/flows_v1beta2_flowcollector.yaml).
+ The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/config/samples/flows_v1beta2_flowcollector.yaml).
To edit configuration in cluster, run:
@@ -906,7 +906,7 @@ spec:
- Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you might have to configure differently if you used another installation method. Make sure to disable it (`spec.loki.enable`) if you don't want to use Loki.
- - Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/docs/QuickFilters.md).
+ - Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/docs/QuickFilters.md).
- Kafka (`spec.deploymentModel: KAFKA` and `spec.kafka`): when enabled, integrates the flow collection pipeline with Kafka, by splitting ingestion from transformation (kube enrichment, derived metrics, ...). Kafka can provide better scalability, resiliency and high availability ([view more details](https://www.redhat.com/en/topics/integration/what-is-apache-kafka)). Assumes Kafka is already deployed and a topic is created.
@@ -921,7 +921,7 @@ spec:
This documentation includes:
- An [overview](https://github.com/netobserv/network-observability-operator#openshift-console) of the features, with screenshots
- - More information on [configuring metrics](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/docs/Metrics.md).
+ - More information on [configuring metrics](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/docs/Metrics.md).
- A [performance](https://github.com/netobserv/network-observability-operator#performance-fine-tuning) section, for fine-tuning
- A [security](https://github.com/netobserv/network-observability-operator#securing-data-and-communications) section
- An [F.A.Q.](https://github.com/netobserv/network-observability-operator#faq--troubleshooting) section
@@ -1213,15 +1213,15 @@ spec:
- /manager
env:
- name: RELATED_IMAGE_EBPF_AGENT
- value: quay.io/netobserv/netobserv-ebpf-agent:v1.6.1-community
+ value: quay.io/netobserv/netobserv-ebpf-agent:v1.8.0-community
- name: RELATED_IMAGE_FLOWLOGS_PIPELINE
- value: quay.io/netobserv/flowlogs-pipeline:v1.6.1-community
+ value: quay.io/netobserv/flowlogs-pipeline:v1.8.0-community
- name: RELATED_IMAGE_CONSOLE_PLUGIN
- value: quay.io/netobserv/network-observability-console-plugin:v1.6.1-community
+ value: quay.io/netobserv/network-observability-console-plugin:v1.8.0-community
- name: DOWNSTREAM_DEPLOYMENT
value: "false"
- name: PROFILING_BIND_ADDRESS
- image: quay.io/netobserv/network-observability-operator:1.6.1-community
+ image: quay.io/netobserv/network-observability-operator:1.8.0-community
imagePullPolicy: Always
livenessProbe:
httpGet:
@@ -1353,14 +1353,14 @@ spec:
name: Red Hat
url: https://www.redhat.com
relatedImages:
- - image: quay.io/netobserv/netobserv-ebpf-agent:v1.6.1-community
+ - image: quay.io/netobserv/netobserv-ebpf-agent:v1.8.0-community
name: ebpf-agent
- - image: quay.io/netobserv/flowlogs-pipeline:v1.6.1-community
+ - image: quay.io/netobserv/flowlogs-pipeline:v1.8.0-community
name: flowlogs-pipeline
- - image: quay.io/netobserv/network-observability-console-plugin:v1.6.1-community
+ - image: quay.io/netobserv/network-observability-console-plugin:v1.8.0-community
name: console-plugin
- replaces: netobserv-operator.v1.6.0-community
- version: 1.6.1-community
+ replaces: netobserv-operator.v1.6.1-community
+ version: 1.8.0-community
webhookdefinitions:
- admissionReviewVersions:
- v1
diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml
index 495a3a5e2d..223ea27cb3 100644
--- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml
+++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml
@@ -122,7 +122,7 @@ spec:
the kernel debug filesystem, so the eBPF pod has to run as privileged.
- `PacketTranslation`: enable enriching flows with packet's translation information.
- `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- - `UDNMapping`, to enable interfaces mappind to udn.
+ - `UDNMapping`, to enable interfaces mapping to udn.
items:
description: |-
Agent feature, can be one of:
@@ -132,7 +132,7 @@ spec:
- `NetworkEvents`, to track Network events.
- `PacketTranslation`, to enrich flows with packets translation information.
- `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
- - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+ - `UDNMapping`, to enable interfaces mapping to udn [Developer Preview].
enum:
- PacketDrop
- DNSTracking
@@ -215,7 +215,10 @@ spec:
- SCTP
type: string
rules:
- description: '`flowFilterRules` defines a list of ebpf agent flow filtering rules'
+ description: |-
+ `rules` defines a list of filtering rules on the eBPF Agents.
+ When filtering is enabled, by default, flows that don't match any rule are rejected.
+ To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules.
items:
description: '`EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering rule.'
properties:
@@ -3632,32 +3635,30 @@ spec:
features:
description: |-
List of additional features to enable. They are all disabled by default. Enabling additional features might have performance impacts. Possible values are:
- - `PacketDrop`: enable the packets drop flows logging feature. This feature requires mounting
- the kernel debug filesystem, so the eBPF agent pods have to run as privileged.
+ - `PacketDrop`: Enable the packets drop flows logging feature. This feature requires mounting
+ the kernel debug filesystem, so the eBPF agent pods must run as privileged.
If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.
- - `DNSTracking`: enable the DNS tracking feature.
- - `FlowRTT`: enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
- - `NetworkEvents`: enable the network events monitoring feature, such as correlating flows and network policies.
- This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged.
+ - `DNSTracking`: Enable the DNS tracking feature.
+ - `FlowRTT`: Enable flow latency (sRTT) extraction in the eBPF agent from TCP traffic.
+ - `NetworkEvents`: Enable the network events monitoring feature, such as correlating flows and network policies.
+ This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged.
It requires using the OVN-Kubernetes network plugin with the Observability feature.
- IMPORTANT: This feature is available as a Developer Preview.
- - `PacketTranslation`: enable enriching flows with packet's translation information.
- - `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
- IMPORTANT: This feature is available as a Developer Preview.
- - `UDNMapping`, to enable interfaces mappind to udn.
- This feature requires mounting the kernel debug filesystem, so the eBPF agent pods have to run as privileged.
+ IMPORTANT: This feature is available as a Technology Preview.
+ - `PacketTranslation`: Enable enriching flows with packet translation information, such as Service NAT.
+ - `EbpfManager`: [Unsupported (*)]. Use eBPF Manager to manage NetObserv eBPF programs. Pre-requisite: the eBPF Manager operator (or upstream bpfman operator) must be installed.
+ - `UDNMapping`: [Unsupported (*)]. Enable interfaces mapping to User Defined Networks (UDN).
+ This feature requires mounting the kernel debug filesystem, so the eBPF agent pods must run as privileged.
It requires using the OVN-Kubernetes network plugin with the Observability feature.
- IMPORTANT: This feature is available as a Developer Preview.
items:
description: |-
Agent feature, can be one of:
- `PacketDrop`, to track packet drops.
- `DNSTracking`, to track specific information on DNS traffic.
- `FlowRTT`, to track TCP latency.
- - `NetworkEvents`, to track Network events [Developer Preview].
- - `PacketTranslation`, to enrich flows with packets translation information.
- - `EbpfManager`, to enable using EBPF Manager to manage netobserv ebpf programs [Developer Preview].
- - `UDNMapping`, to enable interfaces mappind to udn [Developer Preview].
+ - `NetworkEvents`, to track network events [Technology Preview].
+ - `PacketTranslation`, to enrich flows with packets translation information, such as Service NAT.
+ - `EbpfManager`, to enable using eBPF Manager to manage NetObserv eBPF programs. [Unsupported (*)].
+ - `UDNMapping`, to enable interfaces mapping to UDN. [Unsupported (*)].
enum:
- PacketDrop
- DNSTracking
@@ -3740,7 +3741,11 @@ spec:
- SCTP
type: string
rules:
- description: '`flowFilterRules` defines a list of ebpf agent flow filtering rules'
+ description: |-
+ `rules` defines a list of filtering rules on the eBPF Agents.
+ When filtering is enabled, by default, flows that don't match any rule are rejected.
+ To change the default, you can define a rule that accepts everything: `{ action: "Accept", cidr: "0.0.0.0/0" }`, and then refine with rejecting rules.
+ [Unsupported (*)].
items:
description: '`EBPFFlowFilterRule` defines the desired eBPF agent configuration regarding flow filtering rule.'
properties:
@@ -3810,7 +3815,7 @@ spec:
- SCTP
type: string
sampling:
- description: '`sampling` sampling rate for the matched flow'
+ description: '`sampling` sampling rate for the matched flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`.'
format: int32
type: integer
sourcePorts:
@@ -3844,7 +3849,7 @@ spec:
minItems: 1
type: array
sampling:
- description: '`sampling` sampling rate for the matched flow'
+ description: '`sampling` sampling rate for the matched flows, overriding the global sampling defined at `spec.agent.ebpf.sampling`.'
format: int32
type: integer
sourcePorts:
@@ -5604,7 +5609,7 @@ spec:
- Kafka
type: string
exporters:
- description: '`exporters` define additional optional exporters for custom consumption or storage.'
+ description: '`exporters` defines additional optional exporters for custom consumption or storage.'
items:
description: '`FlowCollectorExporter` defines an additional exporter to send enriched flows to.'
properties:
@@ -7499,7 +7504,7 @@ spec:
type: object
secondaryNetworks:
description: |-
- Define secondary networks to be checked for resources identification.
+ Defines secondary networks to be checked for resources identification.
To guarantee a correct identification, indexed values must form an unique identifier across the cluster.
If the same index is used by several resources, those resources might be incorrectly labeled.
items:
@@ -7532,15 +7537,15 @@ spec:
type: string
deduper:
description: |-
- `deduper` allows to sample or drop flows identified as duplicates, in order to save on resource usage.
- IMPORTANT: This feature is available as a Developer Preview.
+ `deduper` allows you to sample or drop flows identified as duplicates, in order to save on resource usage.
+ [Unsupported (*)].
properties:
mode:
default: Disabled
description: |-
Set the Processor de-duplication mode. It comes in addition to the Agent-based deduplication because the Agent cannot de-duplicate same flows reported from different nodes.
- - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially loosing some information such as the network interfaces used from peer, or network events.
- - Use `Sample` to randomly keep only 1 flow on 50 (by default) among the ones considered as duplicates. This is a compromise between dropping every duplicates or keeping every duplicates. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling are 50, the combined sampling is 1:2500.
+ - Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially losing some information such as the network interfaces used from peer, or network events.
+ - Use `Sample` to randomly keep only one flow on 50, which is the default, among the ones considered as duplicates. This is a compromise between dropping every duplicate or keeping every duplicate. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling values are `50`, the combined sampling is 1:2500.
- Use `Disabled` to turn off Processor-based de-duplication.
enum:
- Disabled
@@ -7556,24 +7561,26 @@ spec:
type: object
filters:
description: |-
- `filters` let you define custom filters to limit the amount of generated flows.
- IMPORTANT: This feature is available as a Developer Preview.
+ `filters` lets you define custom filters to limit the amount of generated flows.
+ These filters provide more flexibility than the eBPF Agent filters (in `spec.agent.ebpf.flowFilter`), such as allowing to filter by Kubernetes namespace,
+ but with a lesser improvement in performance.
+ [Unsupported (*)].
items:
- description: '`FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions'
+ description: '`FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions.'
properties:
allOf:
description: '`filters` is a list of matches that must be all satisfied in order to remove a flow.'
items:
- description: '`FLPSingleFilter` defines the desired configuration for a single FLP-based filter'
+ description: '`FLPSingleFilter` defines the desired configuration for a single FLP-based filter.'
properties:
field:
description: |-
- Name of the field to filter on
- Refer to the documentation for the list of available fields: https://docs.openshift.com/container-platform/latest/observability/network_observability/json-flows-format-reference.html.
+ Name of the field to filter on.
+ Refer to the documentation for the list of available fields: https://github.com/netobserv/network-observability-operator/blob/main/docs/flows-format.adoc.
type: string
matchType:
default: Equal
- description: Type of matching to apply
+ description: Type of matching to apply.
enum:
- Equal
- NotEqual
@@ -7591,7 +7598,7 @@ spec:
type: object
type: array
outputTarget:
- description: 'If specified, this filters only target a single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted.'
+ description: 'If specified, these filters only target a single output: `Loki`, `Metrics` or `Exporters`. By default, all outputs are targeted.'
enum:
- ""
- Loki
@@ -7944,7 +7951,7 @@ spec:
default: Flows
description: |-
`logTypes` defines the desired record types to generate. Possible values are:
- - `Flows` (default) to export regular network flows.
+ - `Flows` to export regular network flows. This is the default.
- `Conversations` to generate events for started conversations, ended conversations as well as periodic "tick" updates.
- `EndedConversations` to generate only ended conversations events.
- `All` to generate both network flows and all conversations events. It is not recommended due to the impact on resources footprint.
@@ -8323,7 +8330,7 @@ spec:
description: '`FlowCollectorStatus` defines the observed state of FlowCollector'
properties:
conditions:
- description: '`conditions` represent the latest available observations of an object''s state'
+ description: '`conditions` represents the latest available observations of an object''s state'
items:
description: Condition contains details for one aspect of the current state of this API Resource.
properties:
diff --git a/config/crd/bases/flows.netobserv.io_flowmetrics.yaml b/config/crd/bases/flows.netobserv.io_flowmetrics.yaml
index f8d868b2af..5adc45fa18 100644
--- a/config/crd/bases/flows.netobserv.io_flowmetrics.yaml
+++ b/config/crd/bases/flows.netobserv.io_flowmetrics.yaml
@@ -188,7 +188,7 @@ spec:
type: array
flatten:
description: |-
- `flatten` is a list of list-type fields that must be flattened, such as Interfaces and NetworkEvents. Flattened fields generate one metric per item in that field.
+ `flatten` is a list of array-type fields that must be flattened, such as Interfaces or NetworkEvents. Flattened fields generate one metric per item in that field.
For instance, when flattening `Interfaces` on a bytes counter, a flow having Interfaces [br-ex, ens5] increases one counter for `br-ex` and another for `ens5`.
items:
type: string
diff --git a/config/csv/bases/netobserv-operator.clusterserviceversion.yaml b/config/csv/bases/netobserv-operator.clusterserviceversion.yaml
index 775919a310..74c5b763e5 100644
--- a/config/csv/bases/netobserv-operator.clusterserviceversion.yaml
+++ b/config/csv/bases/netobserv-operator.clusterserviceversion.yaml
@@ -353,5 +353,5 @@ spec:
provider:
name: Red Hat
url: https://www.redhat.com
- replaces: netobserv-operator.v1.6.0-community
+ replaces: netobserv-operator.v1.6.1-community
version: 0.0.0
diff --git a/config/descriptions/ocp.md b/config/descriptions/ocp.md
index 25fe797f2b..8f4aa7c8e1 100644
--- a/config/descriptions/ocp.md
+++ b/config/descriptions/ocp.md
@@ -38,7 +38,7 @@ In that case, you can still get the Prometheus metrics or export raw flows to a
## Configuration
-The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/config/samples/flows_v1beta2_flowcollector.yaml).
+The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/config/samples/flows_v1beta2_flowcollector.yaml).
To edit configuration in cluster, run:
@@ -54,7 +54,7 @@ A couple of settings deserve special attention:
- Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you might have to configure differently if you used another installation method. Make sure to disable it (`spec.loki.enable`) if you don't want to use Loki.
-- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/docs/QuickFilters.md).
+- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/docs/QuickFilters.md).
- Kafka (`spec.deploymentModel: KAFKA` and `spec.kafka`): when enabled, integrates the flow collection pipeline with Kafka, by splitting ingestion from transformation (kube enrichment, derived metrics, ...). Kafka can provide better scalability, resiliency and high availability ([view more details](https://www.redhat.com/en/topics/integration/what-is-apache-kafka)). Assumes Kafka is already deployed and a topic is created.
@@ -69,7 +69,7 @@ Please refer to the documentation on GitHub for more information.
This documentation includes:
- An [overview](https://github.com/netobserv/network-observability-operator#openshift-console) of the features, with screenshots
-- More information on [configuring metrics](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/docs/Metrics.md).
+- More information on [configuring metrics](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/docs/Metrics.md).
- A [performance](https://github.com/netobserv/network-observability-operator#performance-fine-tuning) section, for fine-tuning
- A [security](https://github.com/netobserv/network-observability-operator#securing-data-and-communications) section
- An [F.A.Q.](https://github.com/netobserv/network-observability-operator#faq--troubleshooting) section
diff --git a/config/descriptions/upstream.md b/config/descriptions/upstream.md
index 2f4045899d..fe297341fd 100644
--- a/config/descriptions/upstream.md
+++ b/config/descriptions/upstream.md
@@ -42,7 +42,7 @@ In that case, you can still get the Prometheus metrics or export raw flows to a
## Configuration
-The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/config/samples/flows_v1beta2_flowcollector.yaml).
+The `FlowCollector` resource is used to configure the operator and its managed components. A comprehensive documentation is [available here](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/docs/FlowCollector.md), and a full sample file [there](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/config/samples/flows_v1beta2_flowcollector.yaml).
To edit configuration in cluster, run:
@@ -58,7 +58,7 @@ A couple of settings deserve special attention:
- Loki (`spec.loki`): configure here how to reach Loki. The default values match the Loki quick install paths mentioned above, but you might have to configure differently if you used another installation method. Make sure to disable it (`spec.loki.enable`) if you don't want to use Loki.
-- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/docs/QuickFilters.md).
+- Quick filters (`spec.consolePlugin.quickFilters`): configure preset filters to be displayed in the Console plugin. They offer a way to quickly switch from filters to others, such as showing / hiding pods network, or infrastructure network, or application network, etc. They can be tuned to reflect the different workloads running on your cluster. For a list of available filters, [check this page](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/docs/QuickFilters.md).
- Kafka (`spec.deploymentModel: KAFKA` and `spec.kafka`): when enabled, integrates the flow collection pipeline with Kafka, by splitting ingestion from transformation (kube enrichment, derived metrics, ...). Kafka can provide better scalability, resiliency and high availability ([view more details](https://www.redhat.com/en/topics/integration/what-is-apache-kafka)). Assumes Kafka is already deployed and a topic is created.
@@ -73,7 +73,7 @@ Please refer to the documentation on GitHub for more information.
This documentation includes:
- An [overview](https://github.com/netobserv/network-observability-operator#openshift-console) of the features, with screenshots
-- More information on [configuring metrics](https://github.com/netobserv/network-observability-operator/blob/1.6.1-community/docs/Metrics.md).
+- More information on [configuring metrics](https://github.com/netobserv/network-observability-operator/blob/1.8.0-community/docs/Metrics.md).
- A [performance](https://github.com/netobserv/network-observability-operator#performance-fine-tuning) section, for fine-tuning
- A [security](https://github.com/netobserv/network-observability-operator#securing-data-and-communications) section
- An [F.A.Q.](https://github.com/netobserv/network-observability-operator#faq--troubleshooting) section
diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml
index e3e208eab5..749bcaebba 100644
--- a/config/manager/kustomization.yaml
+++ b/config/manager/kustomization.yaml
@@ -16,6 +16,6 @@ kind: Kustomization
images:
- name: controller
newName: quay.io/netobserv/network-observability-operator
- newTag: 1.6.1-community
+ newTag: 1.8.0-community
commonLabels:
app: netobserv-operator
diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml
index 31fc8a49cb..621357481e 100644
--- a/config/manager/manager.yaml
+++ b/config/manager/manager.yaml
@@ -31,11 +31,11 @@ spec:
- --profiling-bind-address=$(PROFILING_BIND_ADDRESS)
env:
- name: RELATED_IMAGE_EBPF_AGENT
- value: quay.io/netobserv/netobserv-ebpf-agent:v1.6.1-community
+ value: quay.io/netobserv/netobserv-ebpf-agent:v1.8.0-community
- name: RELATED_IMAGE_FLOWLOGS_PIPELINE
- value: quay.io/netobserv/flowlogs-pipeline:v1.6.1-community
+ value: quay.io/netobserv/flowlogs-pipeline:v1.8.0-community
- name: RELATED_IMAGE_CONSOLE_PLUGIN
- value: quay.io/netobserv/network-observability-console-plugin:v1.6.1-community
+ value: quay.io/netobserv/network-observability-console-plugin:v1.8.0-community
- name: DOWNSTREAM_DEPLOYMENT
value: "false"
- name: PROFILING_BIND_ADDRESS
diff --git a/controllers/consoleplugin/config/static-frontend-config.yaml b/controllers/consoleplugin/config/static-frontend-config.yaml
index b9339b5716..7bd8064b35 100644
--- a/controllers/consoleplugin/config/static-frontend-config.yaml
+++ b/controllers/consoleplugin/config/static-frontend-config.yaml
@@ -1038,22 +1038,22 @@ filters:
name: Xlat Zone Id
component: number
- id: xlat_src_address
- name: Xlat src address
+ name: Xlat source address
component: text
category: source
hint: Specify a single IP or range.
- id: xlat_dst_address
- name: Xlat dst address
+ name: Xlat destination address
component: text
category: destination
hint: Specify a single IP or range.
- id: xlat_src_port
- name: Xlat src port
+ name: Xlat source port
component: autocomplete
category: source
hint: Specify a single port number or name.
- id: xlat_dst_port
- name: Xlat dst port
+ name: Xlat destination port
component: autocomplete
category: destination
hint: Specify a single port number or name.
@@ -1402,22 +1402,22 @@ fields:
description: packet translation zone id
- name: XlatSrcPort
type: number
- description: packet translation src port
+ description: packet translation source port
- name: XlatDstPort
type: number
- description: packet translation dst port
+ description: packet translation destination port
- name: XlatSrcAddr
type: string
- description: packet translation src address
+ description: packet translation source address
- name: XlatDstAddr
type: string
- description: packet translation dst address
+ description: packet translation destination address
- name: K8S_ClusterName
type: string
description: Cluster name or identifier
- name: _RecordType
type: string
- description: "Type of record: 'flowLog' for regular flow logs, or 'newConnection', 'heartbeat', 'endConnection' for conversation tracking"
+ description: "Type of record: `flowLog` for regular flow logs, or `newConnection`, `heartbeat`, `endConnection` for conversation tracking"
- name: _HashId
type: string
description: In conversation tracking, the conversation identifier
diff --git a/controllers/ebpf/agent_controller.go b/controllers/ebpf/agent_controller.go
index e4cea0f27b..1d9412ff9e 100644
--- a/controllers/ebpf/agent_controller.go
+++ b/controllers/ebpf/agent_controller.go
@@ -504,8 +504,8 @@ func (c *AgentController) envConfig(ctx context.Context, coll *flowslatest.FlowC
if helper.IsEBFPFlowFilterEnabled(&coll.Spec.Agent.EBPF) {
config = append(config, corev1.EnvVar{Name: envEnableFlowFilter, Value: "true"})
- if len(coll.Spec.Agent.EBPF.FlowFilter.FlowFilterRules) != 0 {
- if filterRules := c.configureFlowFiltersRules(coll.Spec.Agent.EBPF.FlowFilter.FlowFilterRules); filterRules != nil {
+ if len(coll.Spec.Agent.EBPF.FlowFilter.Rules) != 0 {
+ if filterRules := c.configureFlowFiltersRules(coll.Spec.Agent.EBPF.FlowFilter.Rules); filterRules != nil {
config = append(config, filterRules...)
}
} else {
diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md
index 030fecae85..df65197081 100644
--- a/docs/FlowCollector.md
+++ b/docs/FlowCollector.md
@@ -294,7 +294,7 @@ If the `spec.agent.ebpf.privileged` parameter is not set, an error is reported.<
the kernel debug filesystem, so the eBPF pod has to run as privileged.
- `PacketTranslation`: enable enriching flows with packet's translation information.
- `EbpfManager`: allow using eBPF manager to manage netobserv ebpf programs.
-- `UDNMapping`, to enable interfaces mappind to udn.
+- `UDNMapping`, to enable interfaces mapping to udn.
| enum |
Set the Processor de-duplication mode. It comes in addition to the Agent-based deduplication because the Agent cannot de-duplicate same flows reported from different nodes. -- Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially loosing some information such as the network interfaces used from peer, or network events. -- Use `Sample` to randomly keep only 1 flow on 50 (by default) among the ones considered as duplicates. This is a compromise between dropping every duplicates or keeping every duplicates. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling are 50, the combined sampling is 1:2500. +- Use `Drop` to drop every flow considered as duplicates, allowing saving more on resource usage but potentially losing some information such as the network interfaces used from peer, or network events. +- Use `Sample` to randomly keep only one flow on 50, which is the default, among the ones considered as duplicates. This is a compromise between dropping every duplicate or keeping every duplicate. This sampling action comes in addition to the Agent-based sampling. If both Agent and Processor sampling values are `50`, the combined sampling is 1:2500. - Use `Disabled` to turn off Processor-based de-duplication. Enum: Disabled, Drop, Sample @@ -16470,7 +16475,7 @@ IMPORTANT: This feature is available as a Developer Preview. -`FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions +`FLPFilterSet` defines the desired configuration for FLP-based filtering satisfying all conditions.
|