Skip to content

Commit

Permalink
Use standard collector selectors in target allocator config
Browse files Browse the repository at this point in the history
  • Loading branch information
swiatekm committed Dec 20, 2023
1 parent 9d99573 commit f50e03e
Show file tree
Hide file tree
Showing 14 changed files with 149 additions and 65 deletions.
16 changes: 16 additions & 0 deletions .chloggen/feat_targetallocator-selector.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: breaking

# The name of the component, or a single word describing the area of concern, (e.g. operator, target allocator, github action)
component: target allocator

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Use standard K8s label selectors for collectors in target allocator config

# One or more tracking issues related to the change
issues: [2422]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext: This is a breaking change only for users of standalone target allocator. Operator users are unaffected.
9 changes: 6 additions & 3 deletions cmd/otel-allocator/collector/collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ import (
"github.com/prometheus/client_golang/prometheus/promauto"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
Expand Down Expand Up @@ -63,11 +62,15 @@ func NewClient(logger logr.Logger, kubeConfig *rest.Config) (*Client, error) {
}, nil
}

func (k *Client) Watch(ctx context.Context, labelMap map[string]string, fn func(collectors map[string]*allocation.Collector)) error {
func (k *Client) Watch(ctx context.Context, labelSelector *metav1.LabelSelector, fn func(collectors map[string]*allocation.Collector)) error {
collectorMap := map[string]*allocation.Collector{}

selector, err := metav1.LabelSelectorAsSelector(labelSelector)
if err != nil {
return err
}
opts := metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labelMap).String(),
LabelSelector: selector.String(),
}
pods, err := k.k8sClient.CoreV1().Pods(ns).List(ctx, opts)
if err != nil {
Expand Down
13 changes: 7 additions & 6 deletions cmd/otel-allocator/collector/collector_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ import (
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes/fake"

"github.com/open-telemetry/opentelemetry-operator/cmd/otel-allocator/allocation"
Expand All @@ -42,14 +41,16 @@ func getTestClient() (Client, watch.Interface) {
close: make(chan struct{}),
log: logger,
}

labelMap := map[string]string{
"app.kubernetes.io/instance": "default.test",
"app.kubernetes.io/managed-by": "opentelemetry-operator",
labelSelector := metav1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/instance": "default.test",
"app.kubernetes.io/managed-by": "opentelemetry-operator",
},
}
selector, err := metav1.LabelSelectorAsSelector(&labelSelector)

opts := metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labelMap).String(),
LabelSelector: selector.String(),
}
watcher, err := kubeClient.k8sClient.CoreV1().Pods("test-ns").Watch(context.Background(), opts)
if err != nil {
Expand Down
24 changes: 12 additions & 12 deletions cmd/otel-allocator/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
_ "github.com/prometheus/prometheus/discovery/install"
"github.com/spf13/pflag"
"gopkg.in/yaml.v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
Expand All @@ -39,17 +40,17 @@ const DefaultConfigFilePath string = "/conf/targetallocator.yaml"
const DefaultCRScrapeInterval model.Duration = model.Duration(time.Second * 30)

type Config struct {
ListenAddr string `yaml:"listen_addr,omitempty"`
KubeConfigFilePath string `yaml:"kube_config_file_path,omitempty"`
ClusterConfig *rest.Config `yaml:"-"`
RootLogger logr.Logger `yaml:"-"`
LabelSelector map[string]string `yaml:"label_selector,omitempty"`
PromConfig *promconfig.Config `yaml:"config"`
AllocationStrategy *string `yaml:"allocation_strategy,omitempty"`
FilterStrategy *string `yaml:"filter_strategy,omitempty"`
PrometheusCR PrometheusCRConfig `yaml:"prometheus_cr,omitempty"`
PodMonitorSelector map[string]string `yaml:"pod_monitor_selector,omitempty"`
ServiceMonitorSelector map[string]string `yaml:"service_monitor_selector,omitempty"`
ListenAddr string `yaml:"listen_addr,omitempty"`
KubeConfigFilePath string `yaml:"kube_config_file_path,omitempty"`
ClusterConfig *rest.Config `yaml:"-"`
RootLogger logr.Logger `yaml:"-"`
CollectorSelector metav1.LabelSelector `yaml:"collector_selector,omitempty"`
PromConfig *promconfig.Config `yaml:"config"`
AllocationStrategy *string `yaml:"allocation_strategy,omitempty"`
FilterStrategy *string `yaml:"filter_strategy,omitempty"`
PrometheusCR PrometheusCRConfig `yaml:"prometheus_cr,omitempty"`
PodMonitorSelector map[string]string `yaml:"pod_monitor_selector,omitempty"`
ServiceMonitorSelector map[string]string `yaml:"service_monitor_selector,omitempty"`
}

type PrometheusCRConfig struct {
Expand Down Expand Up @@ -114,7 +115,6 @@ func LoadFromCLI(target *Config, flagSet *pflag.FlagSet) error {
}

func unmarshal(cfg *Config, configFile string) error {

yamlFile, err := os.ReadFile(configFile)
if err != nil {
return err
Expand Down
17 changes: 11 additions & 6 deletions cmd/otel-allocator/config/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (

commonconfig "github.com/prometheus/common/config"
promconfig "github.com/prometheus/prometheus/config"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/discovery"
Expand All @@ -44,9 +45,11 @@ func TestLoad(t *testing.T) {
file: "./testdata/config_test.yaml",
},
want: Config{
LabelSelector: map[string]string{
"app.kubernetes.io/instance": "default.test",
"app.kubernetes.io/managed-by": "opentelemetry-operator",
CollectorSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/instance": "default.test",
"app.kubernetes.io/managed-by": "opentelemetry-operator",
},
},
PrometheusCR: PrometheusCRConfig{
ScrapeInterval: model.Duration(time.Second * 60),
Expand Down Expand Up @@ -108,9 +111,11 @@ func TestLoad(t *testing.T) {
file: "./testdata/pod_service_selector_test.yaml",
},
want: Config{
LabelSelector: map[string]string{
"app.kubernetes.io/instance": "default.test",
"app.kubernetes.io/managed-by": "opentelemetry-operator",
CollectorSelector: metav1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/instance": "default.test",
"app.kubernetes.io/managed-by": "opentelemetry-operator",
},
},
PrometheusCR: PrometheusCRConfig{
ScrapeInterval: DefaultCRScrapeInterval,
Expand Down
7 changes: 4 additions & 3 deletions cmd/otel-allocator/config/testdata/config_test.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
label_selector:
app.kubernetes.io/instance: default.test
app.kubernetes.io/managed-by: opentelemetry-operator
collector_selector:
matchlabels:
app.kubernetes.io/instance: default.test
app.kubernetes.io/managed-by: opentelemetry-operator
prometheus_cr:
scrape_interval: 60s
config:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
label_selector:
app.kubernetes.io/instance: default.test
app.kubernetes.io/managed-by: opentelemetry-operator
collector_selector:
matchlabels:
app.kubernetes.io/instance: default.test
app.kubernetes.io/managed-by: opentelemetry-operator
pod_monitor_selector:
release: test
service_monitor_selector:
Expand Down
2 changes: 1 addition & 1 deletion cmd/otel-allocator/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ func main() {
})
runGroup.Add(
func() error {
err := collectorWatcher.Watch(ctx, cfg.LabelSelector, allocator.SetCollectors)
err := collectorWatcher.Watch(ctx, &cfg.CollectorSelector, allocator.SetCollectors)
setupLog.Info("Collector watcher exited")
return err
},
Expand Down
7 changes: 4 additions & 3 deletions cmd/otel-allocator/target/testdata/test.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
label_selector:
app.kubernetes.io/instance: default.test
app.kubernetes.io/managed-by: opentelemetry-operator
collector_selector:
matchlabels:
app.kubernetes.io/instance: default.test
app.kubernetes.io/managed-by: opentelemetry-operator
config:
scrape_configs:
- job_name: prometheus
Expand Down
7 changes: 4 additions & 3 deletions cmd/otel-allocator/target/testdata/test_update.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
label_selector:
app.kubernetes.io/instance: default.test
app.kubernetes.io/managed-by: opentelemetry-operator
collector_selector:
matchlabels:
app.kubernetes.io/instance: default.test
app.kubernetes.io/managed-by: opentelemetry-operator
config:
scrape_configs:
- job_name: prometheus
Expand Down
56 changes: 52 additions & 4 deletions controllers/builder_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1316,7 +1316,31 @@ service:
Annotations: nil,
},
Data: map[string]string{
"targetallocator.yaml": "allocation_strategy: least-weighted\nconfig:\n scrape_configs:\n - job_name: example\n metric_relabel_configs:\n - replacement: $1_$2\n source_labels:\n - job\n target_label: job\n relabel_configs:\n - replacement: my_service_$1\n source_labels:\n - __meta_service_id\n target_label: job\n - replacement: $1\n source_labels:\n - __meta_service_name\n target_label: instance\nlabel_selector:\n app.kubernetes.io/component: opentelemetry-collector\n app.kubernetes.io/instance: test.test\n app.kubernetes.io/managed-by: opentelemetry-operator\n app.kubernetes.io/part-of: opentelemetry\n",
"targetallocator.yaml": `allocation_strategy: least-weighted
collector_selector:
matchlabels:
app.kubernetes.io/component: opentelemetry-collector
app.kubernetes.io/instance: test.test
app.kubernetes.io/managed-by: opentelemetry-operator
app.kubernetes.io/part-of: opentelemetry
config:
scrape_configs:
- job_name: example
metric_relabel_configs:
- replacement: $1_$2
source_labels:
- job
target_label: job
relabel_configs:
- replacement: my_service_$1
source_labels:
- __meta_service_id
target_label: job
- replacement: $1
source_labels:
- __meta_service_name
target_label: instance
`,
},
},
&appsv1.Deployment{
Expand Down Expand Up @@ -1346,7 +1370,7 @@ service:
"app.kubernetes.io/part-of": "opentelemetry",
},
Annotations: map[string]string{
"opentelemetry-targetallocator-config/hash": "4d1911fd40106e9e2dd3d928f067a6c8c9eab0c569f737ba3701c6f5a9aad6d7",
"opentelemetry-targetallocator-config/hash": "40afbbdb738923bf9cb4a117648cd86030cc5b748601d19120226eb1ee74c91a",
},
},
Spec: corev1.PodSpec{
Expand Down Expand Up @@ -1672,7 +1696,31 @@ service:
Annotations: nil,
},
Data: map[string]string{
"targetallocator.yaml": "allocation_strategy: least-weighted\nconfig:\n scrape_configs:\n - job_name: example\n metric_relabel_configs:\n - replacement: $1_$2\n source_labels:\n - job\n target_label: job\n relabel_configs:\n - replacement: my_service_$1\n source_labels:\n - __meta_service_id\n target_label: job\n - replacement: $1\n source_labels:\n - __meta_service_name\n target_label: instance\nlabel_selector:\n app.kubernetes.io/component: opentelemetry-collector\n app.kubernetes.io/instance: test.test\n app.kubernetes.io/managed-by: opentelemetry-operator\n app.kubernetes.io/part-of: opentelemetry\n",
"targetallocator.yaml": `allocation_strategy: least-weighted
collector_selector:
matchlabels:
app.kubernetes.io/component: opentelemetry-collector
app.kubernetes.io/instance: test.test
app.kubernetes.io/managed-by: opentelemetry-operator
app.kubernetes.io/part-of: opentelemetry
config:
scrape_configs:
- job_name: example
metric_relabel_configs:
- replacement: $1_$2
source_labels:
- job
target_label: job
relabel_configs:
- replacement: my_service_$1
source_labels:
- __meta_service_id
target_label: job
- replacement: $1
source_labels:
- __meta_service_name
target_label: instance
`,
},
},
&appsv1.Deployment{
Expand Down Expand Up @@ -1702,7 +1750,7 @@ service:
"app.kubernetes.io/part-of": "opentelemetry",
},
Annotations: map[string]string{
"opentelemetry-targetallocator-config/hash": "4d1911fd40106e9e2dd3d928f067a6c8c9eab0c569f737ba3701c6f5a9aad6d7",
"opentelemetry-targetallocator-config/hash": "40afbbdb738923bf9cb4a117648cd86030cc5b748601d19120226eb1ee74c91a",
},
},
Spec: corev1.PodSpec{
Expand Down
12 changes: 7 additions & 5 deletions controllers/reconcile_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -443,11 +443,13 @@ func TestOpenTelemetryCollectorReconciler_Reconcile(t *testing.T) {
assert.NoError(t, err)

taConfig := make(map[interface{}]interface{})
taConfig["label_selector"] = map[string]string{
"app.kubernetes.io/instance": "default.test",
"app.kubernetes.io/managed-by": "opentelemetry-operator",
"app.kubernetes.io/component": "opentelemetry-collector",
"app.kubernetes.io/part-of": "opentelemetry",
taConfig["collector_selector"] = map[string]any{
"matchlabels": map[string]string{
"app.kubernetes.io/instance": "default.test",
"app.kubernetes.io/managed-by": "opentelemetry-operator",
"app.kubernetes.io/component": "opentelemetry-collector",
"app.kubernetes.io/part-of": "opentelemetry",
},
}
taConfig["config"] = promConfig["config"]
taConfig["allocation_strategy"] = "least-weighted"
Expand Down
4 changes: 3 additions & 1 deletion internal/manifests/targetallocator/configmap.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,9 @@ func ConfigMap(params manifests.Params) (*corev1.ConfigMap, error) {

taConfig := make(map[interface{}]interface{})
prometheusCRConfig := make(map[interface{}]interface{})
taConfig["label_selector"] = manifestutils.SelectorLabels(params.OtelCol.ObjectMeta, collector.ComponentOpenTelemetryCollector)
taConfig["collector_selector"] = map[string]any{
"matchlabels": manifestutils.SelectorLabels(params.OtelCol.ObjectMeta, collector.ComponentOpenTelemetryCollector),
}
// We only take the "config" from the returned object, if it's present
if prometheusConfig, ok := prometheusReceiverConfig["config"]; ok {
taConfig["config"] = prometheusConfig
Expand Down
33 changes: 18 additions & 15 deletions internal/manifests/targetallocator/configmap_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,12 @@ func TestDesiredConfigMap(t *testing.T) {

expectedData := map[string]string{
"targetallocator.yaml": `allocation_strategy: least-weighted
collector_selector:
matchlabels:
app.kubernetes.io/component: opentelemetry-collector
app.kubernetes.io/instance: default.my-instance
app.kubernetes.io/managed-by: opentelemetry-operator
app.kubernetes.io/part-of: opentelemetry
config:
scrape_configs:
- job_name: otel-collector
Expand All @@ -48,11 +54,6 @@ config:
- targets:
- 0.0.0.0:8888
- 0.0.0.0:9999
label_selector:
app.kubernetes.io/component: opentelemetry-collector
app.kubernetes.io/instance: default.my-instance
app.kubernetes.io/managed-by: opentelemetry-operator
app.kubernetes.io/part-of: opentelemetry
`,
}
instance := collectorInstance()
Expand All @@ -76,6 +77,12 @@ label_selector:

expectedData := map[string]string{
"targetallocator.yaml": `allocation_strategy: least-weighted
collector_selector:
matchlabels:
app.kubernetes.io/component: opentelemetry-collector
app.kubernetes.io/instance: default.my-instance
app.kubernetes.io/managed-by: opentelemetry-operator
app.kubernetes.io/part-of: opentelemetry
config:
scrape_configs:
- job_name: otel-collector
Expand All @@ -84,11 +91,6 @@ config:
- targets:
- 0.0.0.0:8888
- 0.0.0.0:9999
label_selector:
app.kubernetes.io/component: opentelemetry-collector
app.kubernetes.io/instance: default.my-instance
app.kubernetes.io/managed-by: opentelemetry-operator
app.kubernetes.io/part-of: opentelemetry
pod_monitor_selector:
release: my-instance
service_monitor_selector:
Expand Down Expand Up @@ -122,6 +124,12 @@ service_monitor_selector:

expectedData := map[string]string{
"targetallocator.yaml": `allocation_strategy: least-weighted
collector_selector:
matchlabels:
app.kubernetes.io/component: opentelemetry-collector
app.kubernetes.io/instance: default.my-instance
app.kubernetes.io/managed-by: opentelemetry-operator
app.kubernetes.io/part-of: opentelemetry
config:
scrape_configs:
- job_name: otel-collector
Expand All @@ -130,11 +138,6 @@ config:
- targets:
- 0.0.0.0:8888
- 0.0.0.0:9999
label_selector:
app.kubernetes.io/component: opentelemetry-collector
app.kubernetes.io/instance: default.my-instance
app.kubernetes.io/managed-by: opentelemetry-operator
app.kubernetes.io/part-of: opentelemetry
prometheus_cr:
scrape_interval: 30s
`,
Expand Down

0 comments on commit f50e03e

Please sign in to comment.