diff --git a/api/dash0monitoring/v1alpha1/operator_configuration_types.go b/api/dash0monitoring/v1alpha1/operator_configuration_types.go index 663fad1e..95171354 100644 --- a/api/dash0monitoring/v1alpha1/operator_configuration_types.go +++ b/api/dash0monitoring/v1alpha1/operator_configuration_types.go @@ -146,6 +146,29 @@ func (d *Dash0OperatorConfiguration) EnsureResourceIsMarkedAsDegraded( }) } +func (d *Dash0OperatorConfiguration) HasDash0ApiAccessConfigured() bool { + return d.Spec.Export != nil && + d.Spec.Export.Dash0 != nil && + d.Spec.Export.Dash0.ApiEndpoint != "" && + (d.Spec.Export.Dash0.Authorization.Token != nil || d.Spec.Export.Dash0.Authorization.SecretRef != nil) +} + +func (d *Dash0OperatorConfiguration) GetDash0AuthorizationIfConfigured() *Authorization { + if d.Spec.Export == nil { + return nil + } + if d.Spec.Export.Dash0 == nil { + return nil + } + + authorization := d.Spec.Export.Dash0.Authorization + if (authorization.Token != nil && *authorization.Token != "") || + (authorization.SecretRef != nil && authorization.SecretRef.Name != "" && authorization.SecretRef.Key != "") { + return &authorization + } + return nil +} + func (d *Dash0OperatorConfiguration) GetResourceTypeName() string { return "Dash0OperatorConfiguration" } diff --git a/api/dash0monitoring/v1alpha1/types_common.go b/api/dash0monitoring/v1alpha1/types_common.go index ee108cfb..6d0b1d22 100644 --- a/api/dash0monitoring/v1alpha1/types_common.go +++ b/api/dash0monitoring/v1alpha1/types_common.go @@ -52,6 +52,15 @@ type Dash0Configuration struct { // // +kubebuilder:validation:Required Authorization Authorization `json:"authorization"` + + // The base URL of the Dash0 API to talk to. This is not where telemetry will be sent, but it is used for managing + // dashboards and check rules via the operator. This property is optional. The value needs to be the API endpoint + // of your Dash0 organization. The correct API endpoint can be copied fom https://app.dash0.com -> organization + // settings -> "Endpoints" -> "API". The correct endpoint value will always start with "https://api." and end in + // ".dash0.com" + // + // +kubebuilder:validation:Optional + ApiEndpoint string `json:"apiEndpoint,omitempty"` } // Authorization contains the authorization settings for Dash0. diff --git a/cmd/main.go b/cmd/main.go index 721b7b8a..51482767 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -16,10 +16,12 @@ import ( _ "k8s.io/client-go/plugin/pkg/client/auth" "github.com/go-logr/logr" + persesv1alpha1 "github.com/perses/perses-operator/api/v1alpha1" semconv "go.opentelemetry.io/collector/semconv/v1.27.0" otelmetric "go.opentelemetry.io/otel/metric" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes" @@ -40,7 +42,7 @@ import ( "github.com/dash0hq/dash0-operator/internal/dash0/controller" "github.com/dash0hq/dash0-operator/internal/dash0/instrumentation" "github.com/dash0hq/dash0-operator/internal/dash0/predelete" - "github.com/dash0hq/dash0-operator/internal/dash0/selfmonitoring" + "github.com/dash0hq/dash0-operator/internal/dash0/selfmonitoringapiaccess" "github.com/dash0hq/dash0-operator/internal/dash0/startup" "github.com/dash0hq/dash0-operator/internal/dash0/util" "github.com/dash0hq/dash0-operator/internal/dash0/webhooks" @@ -60,6 +62,7 @@ type environmentVariables struct { configurationReloaderImagePullPolicy corev1.PullPolicy filelogOffsetSynchImage string filelogOffsetSynchImagePullPolicy corev1.PullPolicy + selfMonitoringAndApiAuthToken string } const ( @@ -100,9 +103,11 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(dash0v1alpha1.AddToScheme(scheme)) - //+kubebuilder:scaffold:scheme + + // for perses dashboard controller, prometheus scrape config controller etc. + utilruntime.Must(apiextensionsv1.AddToScheme(scheme)) + utilruntime.Must(persesv1alpha1.AddToScheme(scheme)) } func main() { @@ -111,6 +116,7 @@ func main() { var operatorConfigurationToken string var operatorConfigurationSecretRefName string var operatorConfigurationSecretRefKey string + var operatorConfigurationApiEndpoint string var isUninstrumentAll bool var metricsAddr string var enableLeaderElection bool @@ -132,6 +138,8 @@ func main() { flag.StringVar(&operatorConfigurationSecretRefKey, "operator-configuration-secret-ref-key", "", "The key in an existing Kubernetes secret containing the Dash0 auth token, used to creating an operator "+ "configuration resource.") + flag.StringVar(&operatorConfigurationApiEndpoint, "operator-configuration-api-endpoint", "", + "The Dash0 API endpoint for managing dashboards and check rules via the operator.") flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") flag.BoolVar(&enableLeaderElection, "leader-elect", false, @@ -223,6 +231,9 @@ func main() { Key: operatorConfigurationSecretRefKey, }, } + if len(operatorConfigurationApiEndpoint) > 0 { + operatorConfiguration.ApiEndpoint = operatorConfigurationApiEndpoint + } } if err = startOperatorManager( @@ -389,6 +400,8 @@ func readEnvironmentVariables() error { filelogOffsetSynchImagePullPolicy := readOptionalPullPolicyFromEnvironmentVariable(filelogOffsetSynchImagePullPolicyEnvVarName) + selfMonitoringAndApiAuthToken := os.Getenv(util.SelfMonitoringAndApiAuthTokenEnvVarName) + envVars = environmentVariables{ operatorNamespace: operatorNamespace, deploymentName: deploymentName, @@ -402,6 +415,7 @@ func readEnvironmentVariables() error { configurationReloaderImagePullPolicy: configurationReloaderImagePullPolicy, filelogOffsetSynchImage: filelogOffsetSynchImage, filelogOffsetSynchImagePullPolicy: filelogOffsetSynchImagePullPolicy, + selfMonitoringAndApiAuthToken: selfMonitoringAndApiAuthToken, } return nil @@ -504,14 +518,27 @@ func startDash0Controllers( return fmt.Errorf("unable to set up the backend connection reconciler: %w", err) } + persesDashboardCrdReconciler := &controller.PersesDashboardCrdReconciler{ + AuthToken: envVars.selfMonitoringAndApiAuthToken, + } + if err := persesDashboardCrdReconciler.SetupWithManager(ctx, mgr, startupTasksK8sClient, &setupLog); err != nil { + return fmt.Errorf("unable to set up the Perses dashboard reconciler: %w", err) + } + persesDashboardCrdReconciler.InitializeSelfMonitoringMetrics( + meter, + metricNamePrefix, + &setupLog, + ) + operatorConfigurationReconciler := &controller.OperatorConfigurationReconciler{ - Client: k8sClient, - Clientset: clientset, - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("dash0-operator-configuration-controller"), - DeploymentSelfReference: deploymentSelfReference, - Images: images, - DevelopmentMode: developmentMode, + Client: k8sClient, + Clientset: clientset, + PersesDashboardCrdReconciler: persesDashboardCrdReconciler, + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("dash0-operator-configuration-controller"), + DeploymentSelfReference: deploymentSelfReference, + Images: images, + DevelopmentMode: developmentMode, } if err := operatorConfigurationReconciler.SetupWithManager(mgr); err != nil { return fmt.Errorf("unable to set up the operator configuration reconciler: %w", err) @@ -647,31 +674,27 @@ func instrumentAtStartup( } func logCurrentSelfMonitoringSettings(deploymentSelfReference *appsv1.Deployment) { - selfMonitoringConfiguration, err := - selfmonitoring.GetSelfMonitoringConfigurationFromControllerDeployment( + selfMonitoringAndApiAccessConfiguration, err := + selfmonitoringapiaccess.GetSelfMonitoringAndApiAccessConfigurationFromControllerDeployment( deploymentSelfReference, - controller.ManagerContainerName, + controller.ControllerContainerName, ) if err != nil { setupLog.Error(err, "cannot determine whether self-monitoring is enabled in the controller deployment") } - if selfMonitoringConfiguration.Enabled { - endpointAndHeaders := selfmonitoring.ConvertExportConfigurationToEnvVarSettings(selfMonitoringConfiguration.Export) - setupLog.Info( - "Self-monitoring settings on controller deployment:", - "enabled", - selfMonitoringConfiguration.Enabled, - "endpoint", - endpointAndHeaders.Endpoint, - ) - } else { - setupLog.Info( - "Self-monitoring settings on controller deployment:", - "enabled", - selfMonitoringConfiguration.Enabled, - ) - } + endpointAndHeaders := + selfmonitoringapiaccess.ConvertExportConfigurationToEnvVarSettings( + selfMonitoringAndApiAccessConfiguration.Export) + setupLog.Info( + "Self-monitoring/API access settings on controller deployment:", + "self-monitoring enabled", + selfMonitoringAndApiAccessConfiguration.SelfMonitoringEnabled, + "self-monitoring endpoint", + endpointAndHeaders.Endpoint, + "access to Dash0 API (API endpoint & authorization via token or secret-ref)", + selfMonitoringAndApiAccessConfiguration.HasDash0ApiAccessConfigured(), + ) } func createOperatorConfiguration( diff --git a/config/crd/bases/operator.dash0.com_dash0monitorings.yaml b/config/crd/bases/operator.dash0.com_dash0monitorings.yaml index ae4b5604..6d0a2d90 100644 --- a/config/crd/bases/operator.dash0.com_dash0monitorings.yaml +++ b/config/crd/bases/operator.dash0.com_dash0monitorings.yaml @@ -59,6 +59,14 @@ spec: description: The configuration of the Dash0 ingress endpoint to which telemetry data will be sent. properties: + apiEndpoint: + description: |- + The base URL of the Dash0 API to talk to. This is not where telemetry will be sent, but it is used for managing + dashboards and check rules via the operator. This property is optional. The value needs to be the API endpoint + of your Dash0 organization. The correct API endpoint can be copied fom https://app.dash0.com -> organization + settings -> "Endpoints" -> "API". The correct endpoint value will always start with "https://api." and end in + ".dash0.com" + type: string authorization: description: Mandatory authorization settings for sending data to Dash0. diff --git a/config/crd/bases/operator.dash0.com_dash0operatorconfigurations.yaml b/config/crd/bases/operator.dash0.com_dash0operatorconfigurations.yaml index 56fcfa3c..edb3fec1 100644 --- a/config/crd/bases/operator.dash0.com_dash0operatorconfigurations.yaml +++ b/config/crd/bases/operator.dash0.com_dash0operatorconfigurations.yaml @@ -59,6 +59,14 @@ spec: description: The configuration of the Dash0 ingress endpoint to which telemetry data will be sent. properties: + apiEndpoint: + description: |- + The base URL of the Dash0 API to talk to. This is not where telemetry will be sent, but it is used for managing + dashboards and check rules via the operator. This property is optional. The value needs to be the API endpoint + of your Dash0 organization. The correct API endpoint can be copied fom https://app.dash0.com -> organization + settings -> "Endpoints" -> "API". The correct endpoint value will always start with "https://api." and end in + ".dash0.com" + type: string authorization: description: Mandatory authorization settings for sending data to Dash0. diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 9d798ebc..180edd7a 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -4,6 +4,14 @@ kind: ClusterRole metadata: name: manager-role rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch - apiGroups: - apps resources: @@ -111,3 +119,11 @@ rules: - get - patch - update +- apiGroups: + - perses.dev + resources: + - persesdashboards + verbs: + - get + - list + - watch diff --git a/go.mod b/go.mod index 58a1c473..4c5e9fac 100644 --- a/go.mod +++ b/go.mod @@ -9,21 +9,29 @@ require ( github.com/dash0hq/dash0-operator/images/pkg/common v0.0.0-00010101000000-000000000000 github.com/go-logr/logr v1.4.2 github.com/google/uuid v1.6.0 + github.com/h2non/gock v1.2.0 + github.com/json-iterator/go v1.1.12 github.com/onsi/ginkgo/v2 v2.20.2 github.com/onsi/gomega v1.34.2 + github.com/perses/perses v0.44.0 + github.com/perses/perses-operator v0.0.0-20240402153734-4ccf03f6c8e6 + github.com/wI2L/jsondiff v0.6.0 go.opentelemetry.io/collector/pdata v1.16.0 go.opentelemetry.io/collector/semconv v0.110.0 go.opentelemetry.io/otel/metric v1.30.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.31.1 + k8s.io/apiextensions-apiserver v0.31.0 k8s.io/apimachinery v0.31.1 k8s.io/client-go v0.31.1 k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 sigs.k8s.io/controller-runtime v0.19.0 + sigs.k8s.io/yaml v1.4.0 ) require ( emperror.dev/errors v0.8.1 // indirect + github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -33,6 +41,7 @@ require ( github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-jose/go-jose/v3 v3.0.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -47,20 +56,32 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect + github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect + github.com/jpillora/backoff v1.0.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/muhlemmer/gu v0.3.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/nexucis/lamenv v0.5.2 // indirect + github.com/perses/common v0.23.1 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_golang v1.19.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/tidwall/gjson v1.17.1 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tidwall/sjson v1.2.5 // indirect github.com/x448/float16 v0.8.4 // indirect + github.com/zitadel/oidc/v3 v3.18.0 // indirect + github.com/zitadel/schema v1.3.0 // indirect go.opentelemetry.io/otel v1.30.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.30.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.30.0 // indirect @@ -70,6 +91,7 @@ require ( go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect + golang.org/x/crypto v0.27.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/net v0.29.0 // indirect golang.org/x/oauth2 v0.22.0 // indirect @@ -86,12 +108,10 @@ require ( gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/apiextensions-apiserver v0.31.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240620174524-b456828f718b // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect ) replace github.com/dash0hq/dash0-operator/images/pkg/common => ./images/pkg/common diff --git a/go.sum b/go.sum index 602f5d5b..b86ff6e5 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,7 @@ emperror.dev/errors v0.8.1 h1:UavXZ5cSX/4u9iyvH6aDcuGkVjeexUGJ7Ij7G4VfQT0= emperror.dev/errors v0.8.1/go.mod h1:YcRvLPh626Ubn2xqtoprejnA5nFha+TJ+2vew48kWuE= +github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df h1:GSoSVRLoBaFpOOds6QyY1L8AX7uoY+Ln3BHc22W40X0= +github.com/barkimedes/go-deepcopy v0.0.0-20220514131651-17c30cfc62df/go.mod h1:hiVxq5OP2bUGBRNS3Z/bt/reCLFNbdcST6gISi1fiOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= @@ -22,6 +24,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= +github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -57,10 +61,16 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= +github.com/h2non/gock v1.2.0 h1:K6ol8rfrRkUOefooBC8elXoaNGYkpp7y2qcxGG6BzUE= +github.com/h2non/gock v1.2.0/go.mod h1:tNhoxHYW2W42cYkYb1WqzdbYIieALC99kpYr7rH/BQk= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -76,12 +86,26 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/muhlemmer/gu v0.3.1 h1:7EAqmFrW7n3hETvuAdmFmn4hS8W+z3LgKtrnow+YzNM= +github.com/muhlemmer/gu v0.3.1/go.mod h1:YHtHR+gxM+bKEIIs7Hmi9sPT3ZDUvTN/i88wQpZkrdM= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4= +github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms= +github.com/nexucis/lamenv v0.5.2 h1:tK/u3XGhCq9qIoVNcXsK9LZb8fKopm0A5weqSRvHd7M= +github.com/nexucis/lamenv v0.5.2/go.mod h1:HusJm6ltmmT7FMG8A750mOLuME6SHCsr2iFYxp5fFi0= github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= +github.com/perses/common v0.23.1 h1:agRsLOOFMoecwATZ0sb414ulOYC70z5vUvsi2RCdWQM= +github.com/perses/common v0.23.1/go.mod h1:CZ4xpwLMZ61vBM32dfSflIBpqxbnHOlNknIziFAjU0c= +github.com/perses/perses v0.44.0 h1:5OZRiT4+mfFyQ3fK2p89WIJzSseB5oQLr4ZWXG3kO4g= +github.com/perses/perses v0.44.0/go.mod h1:vSJpzFS1gjolahl+Of7buj38xohOC4euuLTjUWPnlOY= +github.com/perses/perses-operator v0.0.0-20240402153734-4ccf03f6c8e6 h1:wHOV+H/fjZiTVxdYhLa/DU6PIdaOxMgjOYFD33Bs5V8= +github.com/perses/perses-operator v0.0.0-20240402153734-4ccf03f6c8e6/go.mod h1:FTIGrH1+0cqFVeCY+XksI65CGmmAsgdqt2OiEyPrPOM= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -97,16 +121,36 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.17.1 h1:wlYEnwqAHgzmhNUFfw7Xalt2JzQvsMx2Se4PcoFCT/U= +github.com/tidwall/gjson v1.17.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/wI2L/jsondiff v0.6.0 h1:zrsH3FbfVa3JO9llxrcDy/XLkYPLgoMX6Mz3T2PP2AI= +github.com/wI2L/jsondiff v0.6.0/go.mod h1:D6aQ5gKgPF9g17j+E9N7aasmU1O+XvfmWm1y8UMmNpw= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zitadel/oidc/v3 v3.18.0 h1:NGdxLIYbuvaIqc/Na1fu61wBXIbqufp7LsFNV1bXOQw= +github.com/zitadel/oidc/v3 v3.18.0/go.mod h1:tY75hMcm07McpPXzvgvFTNPefPYDnHRYZQZVn9gtAps= +github.com/zitadel/schema v1.3.0 h1:kQ9W9tvIwZICCKWcMvCEweXET1OcOyGEuFbHs4o5kg0= +github.com/zitadel/schema v1.3.0/go.mod h1:NptN6mkBDFvERUCvZHlvWmmME+gmZ44xzwRXwhzsbtc= go.opentelemetry.io/collector/pdata v1.16.0 h1:g02K8jlRnmQ7TQDuXpdgVL6vIxIVqr5Gbb1qIR27rto= go.opentelemetry.io/collector/pdata v1.16.0/go.mod h1:YZZJIt2ehxosYf/Y1pbvexjNWsIGNNrzzlCTO9jC1F4= go.opentelemetry.io/collector/semconv v0.110.0 h1:KHQnOHe3gUz0zsxe8ph9kN5OTypCFD4V+06AiBTfeNk= @@ -138,14 +182,24 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= @@ -153,15 +207,34 @@ golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= @@ -170,6 +243,8 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -196,6 +271,7 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= diff --git a/helm-chart/dash0-operator/README.md b/helm-chart/dash0-operator/README.md index ff4a7327..3f51b6d1 100644 --- a/helm-chart/dash0-operator/README.md +++ b/helm-chart/dash0-operator/README.md @@ -31,7 +31,8 @@ To use the operator, you will need provide two configuration values: * `endpoint`: The URL of the Dash0 ingress endpoint backend to which telemetry data will be sent. This property is mandatory when installing the operator. This is the OTLP/gRPC endpoint of your Dash0 organization. - The correct OTLP/gRPC endpoint can be copied fom https://app.dash0.com -> organization settings -> "Endpoints". + The correct OTLP/gRPC endpoint can be copied fom https://app.dash0.com -> organization settings -> "Endpoints" + -> "OTLP/gRPC". Note that the correct endpoint value will always start with `ingress.` and end in `dash0.com:4317`. Including a protocol prefix (e.g. `https://`) is optional. * Either `token` or `secretRef`: Exactly one of these two properties needs to be provided when installing the operator. @@ -63,6 +64,7 @@ helm install \ --create-namespace \ --set operator.dash0Export.enabled=true \ --set operator.dash0Export.endpoint=REPLACE THIS WITH YOUR DASH0 INGRESS ENDPOINT \ + --set operator.dash0Export.apiEndpoint=REPLACE THIS WITH YOUR DASH0 API ENDPOINT \ --set operator.dash0Export.token=REPLACE THIS WITH YOUR DASH0 AUTH TOKEN \ dash0-operator \ dash0-operator/dash0-operator @@ -76,6 +78,7 @@ helm install \ --create-namespace \ --set operator.dash0Export.enabled=true \ --set operator.dash0Export.endpoint=REPLACE THIS WITH YOUR DASH0 INGRESS ENDPOINT \ + --set operator.dash0Export.apiEndpoint=REPLACE THIS WITH YOUR DASH0 API ENDPOINT \ --set operator.dash0Export.secretRef.name=REPLACE THIS WITH THE NAME OF AN EXISTING KUBERNETES SECRET \ --set operator.dash0Export.secretRef.key=REPLACE THIS WITH THE PROPERTY KEY IN THAT SECRET \ dash0-operator \ @@ -133,13 +136,16 @@ spec: authorization: # Provide the Dash0 authorization token as a string via the token property: token: auth_... # TODO needs to be replaced with the actual value, see below + + apiEndpoint: https://api.....dash0.com # TODO needs to be replaced with the actual value, see below ``` -You need to provide two configuration settings: -* `spec.export.dash0.endpoint`: The URL of the Dash0 ingress endpoint backend to which telemetry data will be sent. +You need to provide two mandatory configuration settings: +* `spec.export.dash0.endpoint`: The URL of the Dash0 ingress endpoint to which telemetry data will be sent. This property is mandatory. Replace the value in the example above with the OTLP/gRPC endpoint of your Dash0 organization. - The correct OTLP/gRPC endpoint can be copied fom https://app.dash0.com -> organization settings -> "Endpoints". + The correct OTLP/gRPC endpoint can be copied fom https://app.dash0.com -> organization settings -> "Endpoints" + -> "OTLP/gRPC". Note that the correct endpoint value will always start with `ingress.` and end in `dash0.com:4317`. Including a protocol prefix (e.g. `https://`) is optional. * `spec.export.dash0.authorization.token` or `spec.export.dash0.authorization.secretRef`: Exactly one of these two @@ -163,6 +169,11 @@ You need to provide two configuration settings: cluster will be able to read the value. Additional steps are required to make sure secret values are encrypted. See https://kubernetes.io/docs/concepts/configuration/secret/ for more information on Kubernetes secrets. +* `spec.export.dash0.apiEndpoint`: The base URL of the Dash0 API to talk to. This is not where telemetry will be sent, + but it is used for managing dashboards and check rules via the operator. This property is optional. The value needs + to be the API endpoint of your Dash0 organization. The correct API endpoint can be copied fom https://app.dash0.com + -> organization settings -> "Endpoints" -> "API". The correct endpoint value will always start with "https://api." and + end in ".dash0.com". If this property is omitted, managing dashboards and check rules via the operator will not work. After providing the required values, save the file and apply the resource to the Kubernetes cluster you want to monitor: @@ -294,6 +305,7 @@ helm install \ --namespace dash0-system \ --set operator.dash0Export.enabled=true \ --set operator.dash0Export.endpoint=REPLACE THIS WITH YOUR DASH0 INGRESS ENDPOINT \ + --set operator.dash0Export.apiEndpoint=REPLACE THIS WITH YOUR DASH0 API ENDPOINT \ --set operator.dash0Export.secretRef.name=dash0-authorization-secret \ --set operator.dash0Export.secretRef.key=token \ dash0-operator \ @@ -318,6 +330,8 @@ spec: secretRef: name: dash0-authorization-secret key: token + + apiEndpoint=https://api... # optional, see above ``` When deploying the operator configuration resource via `kubectl`, the following defaults apply: @@ -338,6 +352,8 @@ spec: authorization: secretRef: {} + + apiEndpoint=https://api... # optional, see above ``` Note: There are no defaults when using `--set operator.dash0Export.secretRef.name` and @@ -369,6 +385,8 @@ spec: authorization: # see above ... + + apiEndpoint=https://api... # optional, see above ``` ### Exporting Data to Other Observability Backends @@ -526,3 +544,38 @@ steps again: 1. set up a [Dash0 backend connection](#configuring-the-dash0-backend-connection) and 2. enable Dash0 monitoring in each namespace you want to monitor, see [Enable Dash0 Monitoring For a Namespace](#enable-dash0-monitoring-for-a-namespace). + +## Managing Dash0 Dashboards with the Operator + +You can manage your Dash0 dashboards via the Dash0 Kubernetes operator. + +Pre-requisites for this feature: +* A Dash0 operator configuration resource has to be installed in the cluster. +* The operator configuration resource must have the `apiEndpoint` property. +* The operator configuration resource must have a Dash0 export configured with authorization + (either `token` or `secret-ref`). + +Furthermore, the custom resource definition for Perses dashboards needs to be installed in the cluster. There are two +ways to achieve this: +* Install the Perses dashboard custom resource definition with the following command: +```console +kubectl apply --server-side -f https://raw.githubusercontent.com/perses/perses-operator/main/config/crd/bases/perses.dev_persesdashboards.yaml +``` +* Alternatively, install the full Perses operator: Go to and follow the installation + instructions there. + +Note that the custom resource definition needs to be installed before the Dash0 operator is started. +If you have installed the Dash0 operator before installing the Perses dashboard custom resource definition, you need to +restart the Dash0 operator once, for example by deleting the operator's controller pod: +`kubectl --namespace dash0-system delete pod -l app.kubernetes.io/component=controller` + +With the prerequisites in place, you can manage Dash0 dashboards via the operator. +The Dash0 operator will watch for Perses dashboard resources in the cluster and synchronize them with the Dash0 backend: +* When a new Perses dashboard resource is created, the operator will create a corresponding dashboard via Dash0's API. +* When a Perses dashboard resource is changed, the operator will update the corresponding dashboard via Dash0's API. +* When a Perses dashboard resource is deleted, the operator will delete the corresponding dashboard via Dash0's API. + +The dashboards created by the operator will be in read-only mode in the Dash0 UI. + +If the Dash0 operator configuration resource has the `dataset` property set, the operator will create the dashboards +in that specified dataset, otherwise they will be created in the `default` dataset. diff --git a/helm-chart/dash0-operator/templates/operator/cluster-roles.yaml b/helm-chart/dash0-operator/templates/operator/cluster-roles.yaml index 4d51afcf..792eae3f 100644 --- a/helm-chart/dash0-operator/templates/operator/cluster-roles.yaml +++ b/helm-chart/dash0-operator/templates/operator/cluster-roles.yaml @@ -10,6 +10,16 @@ metadata: rules: +# Permissions required to watch for the foreign CRD (Perses dashboards, Prometheus scrape configs). +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch + # Permissions required to instrument workloads in the apps API group. - apiGroups: - apps @@ -75,6 +85,16 @@ rules: - get - list +# Permissions required to watch for the Perses dashboard resources. +- apiGroups: + - perses.dev + resources: + - persesdashboards + verbs: + - get + - list + - watch + # Permissions required to manage the Dash0 monitoring resource, its finalizers and status. - apiGroups: - operator.dash0.com diff --git a/helm-chart/dash0-operator/templates/operator/custom-resource-definition-monitoring.yaml b/helm-chart/dash0-operator/templates/operator/custom-resource-definition-monitoring.yaml index 32c49f12..9719fc3b 100644 --- a/helm-chart/dash0-operator/templates/operator/custom-resource-definition-monitoring.yaml +++ b/helm-chart/dash0-operator/templates/operator/custom-resource-definition-monitoring.yaml @@ -59,6 +59,14 @@ spec: description: The configuration of the Dash0 ingress endpoint to which telemetry data will be sent. properties: + apiEndpoint: + description: |- + The base URL of the Dash0 API to talk to. This is not where telemetry will be sent, but it is used for managing + dashboards and check rules via the operator. This property is optional. The value needs to be the API endpoint + of your Dash0 organization. The correct API endpoint can be copied fom https://app.dash0.com -> organization + settings -> "Endpoints" -> "API". The correct endpoint value will always start with "https://api." and end in + ".dash0.com" + type: string authorization: description: Mandatory authorization settings for sending data to Dash0. @@ -69,7 +77,7 @@ spec: description: |- A reference to a Kubernetes secret containing the Dash0 authorization token. This property is optional, and is ignored if the token property is set. The authorization token for your Dash0 organization can be copied from - https://app.dash0.com/settings. + https://app.dash0.com -> organization settings -> "Auth Tokens". properties: key: default: token @@ -89,7 +97,8 @@ spec: description: |- The Dash0 authorization token. This property is optional, but either this property or the SecretRef property has to be provided. If both are provided, the token will be used and SecretRef will be ignored. The authorization - token for your Dash0 organization can be copied from https://app.dash0.com/settings. + token for your Dash0 organization can be copied from https://app.dash0.com -> organization settings -> + "Auth Tokens". type: string type: object dataset: @@ -102,8 +111,8 @@ spec: description: |- The URL of the Dash0 ingress endpoint to which telemetry data will be sent. This property is mandatory. The value needs to be the OTLP/gRPC endpoint of your Dash0 organization. The correct OTLP/gRPC endpoint can be copied fom - https://app.dash0.com/settings. The correct endpoint value will always start with `ingress.` and end in - `dash0.com:4317`. + https://app.dash0.com -> organization settings -> "Endpoints". The correct endpoint value will always start with + `ingress.` and end in `dash0.com:4317`. type: string required: - authorization diff --git a/helm-chart/dash0-operator/templates/operator/custom-resource-definition-operator-configuration.yaml b/helm-chart/dash0-operator/templates/operator/custom-resource-definition-operator-configuration.yaml index ea02b997..3f366e06 100644 --- a/helm-chart/dash0-operator/templates/operator/custom-resource-definition-operator-configuration.yaml +++ b/helm-chart/dash0-operator/templates/operator/custom-resource-definition-operator-configuration.yaml @@ -59,6 +59,14 @@ spec: description: The configuration of the Dash0 ingress endpoint to which telemetry data will be sent. properties: + apiEndpoint: + description: |- + The base URL of the Dash0 API to talk to. This is not where telemetry will be sent, but it is used for managing + dashboards and check rules via the operator. This property is optional. The value needs to be the API endpoint + of your Dash0 organization. The correct API endpoint can be copied fom https://app.dash0.com -> organization + settings -> "Endpoints" -> "API". The correct endpoint value will always start with "https://api." and end in + ".dash0.com" + type: string authorization: description: Mandatory authorization settings for sending data to Dash0. @@ -69,7 +77,7 @@ spec: description: |- A reference to a Kubernetes secret containing the Dash0 authorization token. This property is optional, and is ignored if the token property is set. The authorization token for your Dash0 organization can be copied from - https://app.dash0.com/settings. + https://app.dash0.com -> organization settings -> "Auth Tokens". properties: key: default: token @@ -89,7 +97,8 @@ spec: description: |- The Dash0 authorization token. This property is optional, but either this property or the SecretRef property has to be provided. If both are provided, the token will be used and SecretRef will be ignored. The authorization - token for your Dash0 organization can be copied from https://app.dash0.com/settings. + token for your Dash0 organization can be copied from https://app.dash0.com -> organization settings -> + "Auth Tokens". type: string type: object dataset: @@ -102,8 +111,8 @@ spec: description: |- The URL of the Dash0 ingress endpoint to which telemetry data will be sent. This property is mandatory. The value needs to be the OTLP/gRPC endpoint of your Dash0 organization. The correct OTLP/gRPC endpoint can be copied fom - https://app.dash0.com/settings. The correct endpoint value will always start with `ingress.` and end in - `dash0.com:4317`. + https://app.dash0.com -> organization settings -> "Endpoints". The correct endpoint value will always start with + `ingress.` and end in `dash0.com:4317`. type: string required: - authorization diff --git a/helm-chart/dash0-operator/templates/operator/deployment-and-webhooks.yaml b/helm-chart/dash0-operator/templates/operator/deployment-and-webhooks.yaml index ef25b1ba..6673c4d1 100644 --- a/helm-chart/dash0-operator/templates/operator/deployment-and-webhooks.yaml +++ b/helm-chart/dash0-operator/templates/operator/deployment-and-webhooks.yaml @@ -100,6 +100,9 @@ spec: {{- else }} {{- fail "Error: operator.dash0Export.enabled is set to true, but neither operator.dash0Export.token nor operator.dash0Export.secretRef.name & operator.dash0Export.secretRef.key have been provided. Please refer to the installation instructions at https://github.com/dash0hq/dash0-operator/tree/main/helm-chart/dash0-operator." -}} {{- end }} +{{- if .Values.operator.dash0Export.apiEndpoint }} + - --operator-configuration-api-endpoint={{ .Values.operator.dash0Export.apiEndpoint }} +{{- end }} {{- end }} env: - name: DASH0_OPERATOR_NAMESPACE diff --git a/helm-chart/dash0-operator/tests/operator/__snapshot__/cluster-roles_test.yaml.snap b/helm-chart/dash0-operator/tests/operator/__snapshot__/cluster-roles_test.yaml.snap index c80d9dda..c8f1048b 100644 --- a/helm-chart/dash0-operator/tests/operator/__snapshot__/cluster-roles_test.yaml.snap +++ b/helm-chart/dash0-operator/tests/operator/__snapshot__/cluster-roles_test.yaml.snap @@ -13,6 +13,14 @@ cluster roles should match snapshot: helm.sh/chart: dash0-operator-0.0.0 name: dash0-operator-manager-role rules: + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch - apiGroups: - apps resources: @@ -66,6 +74,14 @@ cluster roles should match snapshot: - delete - get - list + - apiGroups: + - perses.dev + resources: + - persesdashboards + verbs: + - get + - list + - watch - apiGroups: - operator.dash0.com resources: diff --git a/helm-chart/dash0-operator/tests/operator/__snapshot__/custom-resource-definition-monitoring_test.yaml.snap b/helm-chart/dash0-operator/tests/operator/__snapshot__/custom-resource-definition-monitoring_test.yaml.snap index a8f7cace..d2c90328 100644 --- a/helm-chart/dash0-operator/tests/operator/__snapshot__/custom-resource-definition-monitoring_test.yaml.snap +++ b/helm-chart/dash0-operator/tests/operator/__snapshot__/custom-resource-definition-monitoring_test.yaml.snap @@ -59,6 +59,14 @@ custom resource definition should match snapshot: dash0: description: The configuration of the Dash0 ingress endpoint to which telemetry data will be sent. properties: + apiEndpoint: + description: |- + The base URL of the Dash0 API to talk to. This is not where telemetry will be sent, but it is used for managing + dashboards and check rules via the operator. This property is optional. The value needs to be the API endpoint + of your Dash0 organization. The correct API endpoint can be copied fom https://app.dash0.com -> organization + settings -> "Endpoints" -> "API". The correct endpoint value will always start with "https://api." and end in + ".dash0.com" + type: string authorization: description: Mandatory authorization settings for sending data to Dash0. maxProperties: 1 @@ -68,7 +76,7 @@ custom resource definition should match snapshot: description: |- A reference to a Kubernetes secret containing the Dash0 authorization token. This property is optional, and is ignored if the token property is set. The authorization token for your Dash0 organization can be copied from - https://app.dash0.com/settings. + https://app.dash0.com -> organization settings -> "Auth Tokens". properties: key: default: token @@ -86,7 +94,8 @@ custom resource definition should match snapshot: description: |- The Dash0 authorization token. This property is optional, but either this property or the SecretRef property has to be provided. If both are provided, the token will be used and SecretRef will be ignored. The authorization - token for your Dash0 organization can be copied from https://app.dash0.com/settings. + token for your Dash0 organization can be copied from https://app.dash0.com -> organization settings -> + "Auth Tokens". type: string type: object dataset: @@ -99,8 +108,8 @@ custom resource definition should match snapshot: description: |- The URL of the Dash0 ingress endpoint to which telemetry data will be sent. This property is mandatory. The value needs to be the OTLP/gRPC endpoint of your Dash0 organization. The correct OTLP/gRPC endpoint can be copied fom - https://app.dash0.com/settings. The correct endpoint value will always start with `ingress.` and end in - `dash0.com:4317`. + https://app.dash0.com -> organization settings -> "Endpoints". The correct endpoint value will always start with + `ingress.` and end in `dash0.com:4317`. type: string required: - authorization diff --git a/helm-chart/dash0-operator/tests/operator/__snapshot__/custom-resource-definition-operator-configuration_test.yaml.snap b/helm-chart/dash0-operator/tests/operator/__snapshot__/custom-resource-definition-operator-configuration_test.yaml.snap index eacd0707..c186eff9 100644 --- a/helm-chart/dash0-operator/tests/operator/__snapshot__/custom-resource-definition-operator-configuration_test.yaml.snap +++ b/helm-chart/dash0-operator/tests/operator/__snapshot__/custom-resource-definition-operator-configuration_test.yaml.snap @@ -57,6 +57,14 @@ custom resource definition should match snapshot: dash0: description: The configuration of the Dash0 ingress endpoint to which telemetry data will be sent. properties: + apiEndpoint: + description: |- + The base URL of the Dash0 API to talk to. This is not where telemetry will be sent, but it is used for managing + dashboards and check rules via the operator. This property is optional. The value needs to be the API endpoint + of your Dash0 organization. The correct API endpoint can be copied fom https://app.dash0.com -> organization + settings -> "Endpoints" -> "API". The correct endpoint value will always start with "https://api." and end in + ".dash0.com" + type: string authorization: description: Mandatory authorization settings for sending data to Dash0. maxProperties: 1 @@ -66,7 +74,7 @@ custom resource definition should match snapshot: description: |- A reference to a Kubernetes secret containing the Dash0 authorization token. This property is optional, and is ignored if the token property is set. The authorization token for your Dash0 organization can be copied from - https://app.dash0.com/settings. + https://app.dash0.com -> organization settings -> "Auth Tokens". properties: key: default: token @@ -84,7 +92,8 @@ custom resource definition should match snapshot: description: |- The Dash0 authorization token. This property is optional, but either this property or the SecretRef property has to be provided. If both are provided, the token will be used and SecretRef will be ignored. The authorization - token for your Dash0 organization can be copied from https://app.dash0.com/settings. + token for your Dash0 organization can be copied from https://app.dash0.com -> organization settings -> + "Auth Tokens". type: string type: object dataset: @@ -97,8 +106,8 @@ custom resource definition should match snapshot: description: |- The URL of the Dash0 ingress endpoint to which telemetry data will be sent. This property is mandatory. The value needs to be the OTLP/gRPC endpoint of your Dash0 organization. The correct OTLP/gRPC endpoint can be copied fom - https://app.dash0.com/settings. The correct endpoint value will always start with `ingress.` and end in - `dash0.com:4317`. + https://app.dash0.com -> organization settings -> "Endpoints". The correct endpoint value will always start with + `ingress.` and end in `dash0.com:4317`. type: string required: - authorization diff --git a/helm-chart/dash0-operator/tests/operator/deployment-and-webhooks_test.yaml b/helm-chart/dash0-operator/tests/operator/deployment-and-webhooks_test.yaml index 2ade23c6..ba2625cb 100644 --- a/helm-chart/dash0-operator/tests/operator/deployment-and-webhooks_test.yaml +++ b/helm-chart/dash0-operator/tests/operator/deployment-and-webhooks_test.yaml @@ -245,6 +245,7 @@ tests: enabled: true endpoint: https://ingress.dash0.com token: "very-secret-dash0-auth-token" + apiEndpoint: https://api.dash0.com asserts: - equal: path: spec.template.spec.containers[0].args[3] @@ -252,6 +253,9 @@ tests: - equal: path: spec.template.spec.containers[0].args[4] value: --operator-configuration-token=very-secret-dash0-auth-token + - equal: + path: spec.template.spec.containers[0].args[5] + value: --operator-configuration-api-endpoint=https://api.dash0.com # Note: We deliberately do not have a test for the operator.dash0Export.secretRef variant, since this would trigger # a check whether the secret actually exists in the cluster, which of course would fail when runnig helm chart unit # tests. diff --git a/helm-chart/dash0-operator/values.yaml b/helm-chart/dash0-operator/values.yaml index cca4c402..869ccd48 100644 --- a/helm-chart/dash0-operator/values.yaml +++ b/helm-chart/dash0-operator/values.yaml @@ -5,23 +5,31 @@ operator: # Use the operator.dash0Export settings to configure the connection to the Dash0 backend; telemetry will be sent to - # the configured Dash0 backend by default. Under the hood, this will create a Dash0OperatorConfiguration resource - # right away, when starting the operator. If left empty, you can always create a Dash0OperatorConfiguration resource - # manually later. + # the configured Dash0 backend by default. Under the hood, this will make sure a Dash0OperatorConfiguration resource + # will be created right away, when starting the operator. If left empty, you can always create a + # Dash0OperatorConfiguration resource manually later. dash0Export: # Set this to true to enable the creation of a Dash0OperatorConfiguration resource at startup. If a # Dash0OperatorConfiguration already exists in the cluster, no action will be taken. Note that if this is set to - # true, you will also need to provide a valid endpoint (operator.dash0Export.endpoint), and either or an auth - # token (operator.dash0Export.token) or a reference to a Kubernetes secret containing that token + # true, you will also need to provide a valid endpoint (operator.dash0Export.endpoint), and either an auth token + # (operator.dash0Export.token) or a reference to a Kubernetes secret containing that token # (operator.dash0Export.secretRef). enabled: false # The URL of the Dash0 ingress endpoint to which telemetry data will be sent. This property is mandatory if # operator.dash0Export.enabled is true, otherwise it will be ignored. The value needs to be the OTLP/gRPC endpoint # of your Dash0 organization. The correct OTLP/gRPC endpoint can be copied fom https://app.dash0.com -> organization - # settings -> "Endpoints". The correct endpoint value will always start with `ingress.` and end in `dash0.com:4317`. + # settings -> "Endpoints" -> "OTLP/gRPC". The correct endpoint value will always start with `ingress.` and end in + # `dash0.com:4317`. endpoint: + # The base URL of the Dash0 API to talk to. This is not where telemetry will be sent, but it is used for managing + # dashboards and check rules via the operator. This property is mandatory. The value needs to be the API endpoint + # of your Dash0 organization. The correct API endpoint can be copied fom https://app.dash0.com -> organization + # settings -> "Endpoints" -> "API". The correct endpoint value will always start with "https://api." and end in + # ".dash0.com" + apiEndpoint: + # The Dash0 authorization token. This property is optional, but either this property or the secretRef configuration # has to be provided if operator.dash0Export.enabled is true. If operator.dash0Export.enabled is false, this # property will be ignored. diff --git a/images/instrumentation/Dockerfile b/images/instrumentation/Dockerfile index 3ec7a5a1..1515a8d0 100644 --- a/images/instrumentation/Dockerfile +++ b/images/instrumentation/Dockerfile @@ -2,7 +2,7 @@ FROM node:20.13.1-alpine3.19 AS build-node.js RUN mkdir -p /dash0-init-container/instrumentation/node.js WORKDIR /dash0-init-container/instrumentation/node.js -COPY node.js/package* . +COPY node.js/package* ./ COPY node.js/dash0hq-opentelemetry-*.tgz . RUN NPM_CONFIG_UPDATE_NOTIFIER=false \ npm ci \ diff --git a/internal/backendconnection/backendconnection_manager_test.go b/internal/backendconnection/backendconnection_manager_test.go index 02f0f650..1306b02c 100644 --- a/internal/backendconnection/backendconnection_manager_test.go +++ b/internal/backendconnection/backendconnection_manager_test.go @@ -21,7 +21,7 @@ import ( ) var ( - operatorNamespace = Dash0OperatorNamespace + operatorNamespace = OperatorNamespace dash0MonitoringResource = &dash0v1alpha1.Dash0Monitoring{ Spec: dash0v1alpha1.Dash0MonitoringSpec{ @@ -46,7 +46,7 @@ var _ = Describe("The backend connection manager", Ordered, func() { var manager *BackendConnectionManager BeforeAll(func() { - EnsureDash0OperatorNamespaceExists(ctx, k8sClient) + EnsureOperatorNamespaceExists(ctx, k8sClient) EnsureTestNamespaceExists(ctx, k8sClient) }) diff --git a/internal/backendconnection/otelcolresources/desired_state.go b/internal/backendconnection/otelcolresources/desired_state.go index 2c794167..4f7769e6 100644 --- a/internal/backendconnection/otelcolresources/desired_state.go +++ b/internal/backendconnection/otelcolresources/desired_state.go @@ -18,17 +18,17 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" dash0v1alpha1 "github.com/dash0hq/dash0-operator/api/dash0monitoring/v1alpha1" - "github.com/dash0hq/dash0-operator/internal/dash0/selfmonitoring" + "github.com/dash0hq/dash0-operator/internal/dash0/selfmonitoringapiaccess" "github.com/dash0hq/dash0-operator/internal/dash0/util" ) type oTelColConfig struct { - Namespace string - NamePrefix string - Export dash0v1alpha1.Export - SelfMonitoringConfiguration selfmonitoring.SelfMonitoringConfiguration - Images util.Images - DevelopmentMode bool + Namespace string + NamePrefix string + Export dash0v1alpha1.Export + SelfMonitoringAndApiAccessConfiguration selfmonitoringapiaccess.SelfMonitoringAndApiAccessConfiguration + Images util.Images + DevelopmentMode bool } type collectorConfigurationTemplateValues struct { @@ -407,7 +407,7 @@ func assembleCollectorDaemonSet(config *oTelColConfig, resourceSpecs *OTelColRes SecurityContext: &corev1.PodSecurityContext{}, // This setting is required to enable the configuration reloader process to send Unix signals to the // collector process. - ShareProcessNamespace: &util.True, + ShareProcessNamespace: ptr.To(true), InitContainers: []corev1.Container{assembleFileLogOffsetSynchInitContainer( config, resourceSpecs.CollectorDaemonSetFileLogOffsetSynchContainerResources, @@ -430,10 +430,10 @@ func assembleCollectorDaemonSet(config *oTelColConfig, resourceSpecs *OTelColRes }, } - if config.SelfMonitoringConfiguration.Enabled { - err = selfmonitoring.EnableSelfMonitoringInCollectorDaemonSet( + if config.SelfMonitoringAndApiAccessConfiguration.SelfMonitoringEnabled { + err = selfmonitoringapiaccess.EnableSelfMonitoringInCollectorDaemonSet( collectorDaemonSet, - config.SelfMonitoringConfiguration, + config.SelfMonitoringAndApiAccessConfiguration, config.Images.GetOperatorVersion(), config.DevelopmentMode, ) @@ -581,7 +581,7 @@ func assembleCollectorEnvVars(config *oTelColConfig, goMemLimit string) ([]corev if config.Export.Dash0 != nil { authTokenEnvVar, err := util.CreateEnvVarForAuthorization( - *config.Export.Dash0, + (*(config.Export.Dash0)).Authorization, authTokenEnvVarName, ) if err != nil { @@ -860,7 +860,7 @@ func assembleCollectorDeployment( SecurityContext: &corev1.PodSecurityContext{}, // This setting is required to enable the configuration reloader process to send Unix signals to the // collector process. - ShareProcessNamespace: &util.True, + ShareProcessNamespace: ptr.To(true), Containers: []corev1.Container{ collectorContainer, assembleConfigurationReloaderContainer( @@ -875,10 +875,10 @@ func assembleCollectorDeployment( }, } - if config.SelfMonitoringConfiguration.Enabled { - err = selfmonitoring.EnableSelfMonitoringInCollectorDeployment( + if config.SelfMonitoringAndApiAccessConfiguration.SelfMonitoringEnabled { + err = selfmonitoringapiaccess.EnableSelfMonitoringInCollectorDeployment( collectorDeployment, - config.SelfMonitoringConfiguration, + config.SelfMonitoringAndApiAccessConfiguration, config.Images.GetOperatorVersion(), config.DevelopmentMode, ) diff --git a/internal/backendconnection/otelcolresources/desired_state_test.go b/internal/backendconnection/otelcolresources/desired_state_test.go index d0e22170..a45c547f 100644 --- a/internal/backendconnection/otelcolresources/desired_state_test.go +++ b/internal/backendconnection/otelcolresources/desired_state_test.go @@ -11,7 +11,7 @@ import ( corev1 "k8s.io/api/core/v1" dash0v1alpha1 "github.com/dash0hq/dash0-operator/api/dash0monitoring/v1alpha1" - "github.com/dash0hq/dash0-operator/internal/dash0/selfmonitoring" + "github.com/dash0hq/dash0-operator/internal/dash0/selfmonitoringapiaccess" "github.com/dash0hq/dash0-operator/internal/dash0/util" . "github.com/onsi/ginkgo/v2" @@ -228,9 +228,9 @@ var _ = Describe("The desired state of the OpenTelemetry Collector resources", f Namespace: namespace, NamePrefix: namePrefix, Export: Dash0ExportWithEndpointAndToken(), - SelfMonitoringConfiguration: selfmonitoring.SelfMonitoringConfiguration{ - Enabled: true, - Export: Dash0ExportWithEndpointTokenAndInsightsDataset(), + SelfMonitoringAndApiAccessConfiguration: selfmonitoringapiaccess.SelfMonitoringAndApiAccessConfiguration{ + SelfMonitoringEnabled: true, + Export: Dash0ExportWithEndpointTokenAndInsightsDataset(), }, Images: TestImages, }, &DefaultOTelColResourceSpecs, false) @@ -239,7 +239,7 @@ var _ = Describe("The desired state of the OpenTelemetry Collector resources", f daemonSet := getDaemonSet(desiredState) selfMonitoringConfiguration, err := parseBackSelfMonitoringEnvVarsFromCollectorDaemonSet(daemonSet) Expect(err).NotTo(HaveOccurred()) - Expect(selfMonitoringConfiguration.Enabled).To(BeTrue()) + Expect(selfMonitoringConfiguration.SelfMonitoringEnabled).To(BeTrue()) Expect(selfMonitoringConfiguration.Export.Dash0).ToNot(BeNil()) Expect(selfMonitoringConfiguration.Export.Dash0.Endpoint).To(Equal(EndpointDash0WithProtocolTest)) Expect(selfMonitoringConfiguration.Export.Dash0.Dataset).To(Equal(util.DatasetInsights)) @@ -253,9 +253,9 @@ var _ = Describe("The desired state of the OpenTelemetry Collector resources", f Namespace: namespace, NamePrefix: namePrefix, Export: Dash0ExportWithEndpointAndToken(), - SelfMonitoringConfiguration: selfmonitoring.SelfMonitoringConfiguration{ - Enabled: false, - Export: Dash0ExportWithEndpointTokenAndInsightsDataset(), + SelfMonitoringAndApiAccessConfiguration: selfmonitoringapiaccess.SelfMonitoringAndApiAccessConfiguration{ + SelfMonitoringEnabled: false, + Export: Dash0ExportWithEndpointTokenAndInsightsDataset(), }, Images: TestImages, }, &DefaultOTelColResourceSpecs, false) @@ -264,7 +264,7 @@ var _ = Describe("The desired state of the OpenTelemetry Collector resources", f daemonSet := getDaemonSet(desiredState) selfMonitoringConfiguration, err := parseBackSelfMonitoringEnvVarsFromCollectorDaemonSet(daemonSet) Expect(err).NotTo(HaveOccurred()) - Expect(selfMonitoringConfiguration.Enabled).To(BeFalse()) + Expect(selfMonitoringConfiguration.SelfMonitoringEnabled).To(BeFalse()) Expect(selfMonitoringConfiguration.Export.Dash0).To(BeNil()) Expect(selfMonitoringConfiguration.Export.Grpc).To(BeNil()) Expect(selfMonitoringConfiguration.Export.Http).To(BeNil()) @@ -349,31 +349,22 @@ func findVolumeMountByName(objects []corev1.VolumeMount, name string) *corev1.Vo // However, this also tests the functionality used in // selfmonitoring.GetSelfMonitoringConfigurationFromControllerDeployment. func parseBackSelfMonitoringEnvVarsFromCollectorDaemonSet(collectorDemonSet *appsv1.DaemonSet) ( - selfmonitoring.SelfMonitoringConfiguration, + selfmonitoringapiaccess.SelfMonitoringAndApiAccessConfiguration, error, ) { - selfMonitoringConfigurations := make(map[string]selfmonitoring.SelfMonitoringConfiguration) - - // Check that we have the OTel environment variabless set on all init containers and regular containers. - // for _, container := range collectorDemonSet.Spec.Template.Spec.InitContainers { - // if selfMonitoringConfiguration, err := selfmonitoring.ParseSelfMonitoringConfigurationFromContainer(&container); err != nil { - // return selfmonitoring.SelfMonitoringConfiguration{}, err - // } else { - // selfMonitoringConfigurations[container.Name] = selfMonitoringConfiguration - // } - // } + selfMonitoringConfigurations := make(map[string]selfmonitoringapiaccess.SelfMonitoringAndApiAccessConfiguration) for _, container := range collectorDemonSet.Spec.Template.Spec.Containers { if selfMonitoringConfiguration, err := - selfmonitoring.ParseSelfMonitoringConfigurationFromContainer(&container); err != nil { - return selfmonitoring.SelfMonitoringConfiguration{}, err + selfmonitoringapiaccess.ParseSelfMonitoringConfigurationFromContainer(&container); err != nil { + return selfmonitoringapiaccess.SelfMonitoringAndApiAccessConfiguration{}, err } else { selfMonitoringConfigurations[container.Name] = selfMonitoringConfiguration } } // verify that the configurations on all init containers and regular containers are consistent - var referenceMonitoringConfiguration *selfmonitoring.SelfMonitoringConfiguration + var referenceMonitoringConfiguration *selfmonitoringapiaccess.SelfMonitoringAndApiAccessConfiguration for _, selfMonitoringConfiguration := range selfMonitoringConfigurations { // Note: Using a local var in the loop fixes golangci-lint complaint exportloopref, see // https://github.com/kyoh86/exportloopref. @@ -382,7 +373,7 @@ func parseBackSelfMonitoringEnvVarsFromCollectorDaemonSet(collectorDemonSet *app referenceMonitoringConfiguration = &loopLocalSelfMonitoringConfiguration } else { if !reflect.DeepEqual(*referenceMonitoringConfiguration, loopLocalSelfMonitoringConfiguration) { - return selfmonitoring.SelfMonitoringConfiguration{}, + return selfmonitoringapiaccess.SelfMonitoringAndApiAccessConfiguration{}, fmt.Errorf("inconsistent self-monitoring configurations: %v", selfMonitoringConfigurations) } } @@ -391,6 +382,6 @@ func parseBackSelfMonitoringEnvVarsFromCollectorDaemonSet(collectorDemonSet *app if referenceMonitoringConfiguration != nil { return *referenceMonitoringConfiguration, nil } else { - return selfmonitoring.SelfMonitoringConfiguration{}, nil + return selfmonitoringapiaccess.SelfMonitoringAndApiAccessConfiguration{}, nil } } diff --git a/internal/backendconnection/otelcolresources/otelcol_resources.go b/internal/backendconnection/otelcolresources/otelcol_resources.go index 68aff6f3..9e4bf159 100644 --- a/internal/backendconnection/otelcolresources/otelcol_resources.go +++ b/internal/backendconnection/otelcolresources/otelcol_resources.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" dash0v1alpha1 "github.com/dash0hq/dash0-operator/api/dash0monitoring/v1alpha1" - "github.com/dash0hq/dash0-operator/internal/dash0/selfmonitoring" + "github.com/dash0hq/dash0-operator/internal/dash0/selfmonitoringapiaccess" "github.com/dash0hq/dash0-operator/internal/dash0/util" ) @@ -79,23 +79,23 @@ func (m *OTelColResourceManager) CreateOrUpdateOpenTelemetryCollectorResources( } } - selfMonitoringConfiguration, err := selfmonitoring.ConvertOperatorConfigurationResourceToSelfMonitoringConfiguration( + selfMonitoringConfiguration, err := selfmonitoringapiaccess.ConvertOperatorConfigurationResourceToSelfMonitoringConfiguration( operatorConfigurationResource, logger, ) if err != nil { - selfMonitoringConfiguration = selfmonitoring.SelfMonitoringConfiguration{ - Enabled: false, + selfMonitoringConfiguration = selfmonitoringapiaccess.SelfMonitoringAndApiAccessConfiguration{ + SelfMonitoringEnabled: false, } } config := &oTelColConfig{ - Namespace: namespace, - NamePrefix: m.OTelCollectorNamePrefix, - Export: *export, - SelfMonitoringConfiguration: selfMonitoringConfiguration, - Images: images, - DevelopmentMode: m.DevelopmentMode, + Namespace: namespace, + NamePrefix: m.OTelCollectorNamePrefix, + Export: *export, + SelfMonitoringAndApiAccessConfiguration: selfMonitoringConfiguration, + Images: images, + DevelopmentMode: m.DevelopmentMode, } desiredState, err := assembleDesiredState(config, m.OTelColResourceSpecs, false) if err != nil { @@ -218,14 +218,6 @@ func (m *OTelColResourceManager) updateResource( desiredResource client.Object, logger *logr.Logger, ) (bool, error) { - if m.DevelopmentMode { - logger.Info(fmt.Sprintf( - "checking whether resource %s/%s requires update", - desiredResource.GetNamespace(), - desiredResource.GetName(), - )) - } - if err := m.setOwnerReference(desiredResource, logger); err != nil { return false, err } @@ -246,12 +238,6 @@ func (m *OTelColResourceManager) updateResource( } hasChanged := !patchResult.IsEmpty() && !isKnownIrrelevantPatch(patchResult) if !hasChanged { - if m.DevelopmentMode { - logger.Info(fmt.Sprintf("resource %s/%s is already up to date", - desiredResource.GetNamespace(), - desiredResource.GetName(), - )) - } return false, nil } @@ -338,10 +324,10 @@ func (m *OTelColResourceManager) DeleteResources( NamePrefix: m.OTelCollectorNamePrefix, // For deleting the resources, we do not need the actual export settings; we only use assembleDesiredState to // collect the kinds and names of all resources that need to be deleted. - Export: dash0v1alpha1.Export{}, - SelfMonitoringConfiguration: selfmonitoring.SelfMonitoringConfiguration{Enabled: false}, - Images: dummyImagesForDeletion, - DevelopmentMode: m.DevelopmentMode, + Export: dash0v1alpha1.Export{}, + SelfMonitoringAndApiAccessConfiguration: selfmonitoringapiaccess.SelfMonitoringAndApiAccessConfiguration{SelfMonitoringEnabled: false}, + Images: dummyImagesForDeletion, + DevelopmentMode: m.DevelopmentMode, } desiredResources, err := assembleDesiredState(config, m.OTelColResourceSpecs, true) if err != nil { diff --git a/internal/backendconnection/otelcolresources/otelcol_resources_test.go b/internal/backendconnection/otelcolresources/otelcol_resources_test.go index d06ebbb3..b672b582 100644 --- a/internal/backendconnection/otelcolresources/otelcol_resources_test.go +++ b/internal/backendconnection/otelcolresources/otelcol_resources_test.go @@ -29,7 +29,7 @@ var ( }, ObjectMeta: metav1.ObjectMeta{ Name: "test-config-map", - Namespace: Dash0OperatorNamespace, + Namespace: OperatorNamespace, Labels: map[string]string{ "label": "value", }, @@ -48,7 +48,7 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() var dash0MonitoringResource *dash0v1alpha1.Dash0Monitoring BeforeAll(func() { - EnsureDash0OperatorNamespaceExists(ctx, k8sClient) + EnsureOperatorNamespaceExists(ctx, k8sClient) EnsureTestNamespaceExists(ctx, k8sClient) dash0MonitoringResource = EnsureMonitoringResourceExists(ctx, k8sClient) }) @@ -67,13 +67,13 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() AfterEach(func() { Expect(oTelColResourceManager.DeleteResources( ctx, - Dash0OperatorNamespace, + OperatorNamespace, &logger, )).To(Succeed()) Eventually(func(g Gomega) { - VerifyCollectorResourcesDoNotExist(ctx, k8sClient, Dash0OperatorNamespace) + VerifyCollectorResourcesDoNotExist(ctx, k8sClient, OperatorNamespace) }, 500*time.Millisecond, 20*time.Millisecond).Should(Succeed()) - Expect(k8sClient.DeleteAllOf(ctx, &corev1.ConfigMap{}, client.InNamespace(Dash0OperatorNamespace))).To(Succeed()) + Expect(k8sClient.DeleteAllOf(ctx, &corev1.ConfigMap{}, client.InNamespace(OperatorNamespace))).To(Succeed()) }) Describe("when dealing with individual resources", func() { @@ -125,7 +125,7 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() resourcesHaveBeenCreated, resourcesHaveBeenUpdated, err := oTelColResourceManager.CreateOrUpdateOpenTelemetryCollectorResources( ctx, - Dash0OperatorNamespace, + OperatorNamespace, TestImages, dash0MonitoringResource, &logger, @@ -134,7 +134,7 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() Expect(resourcesHaveBeenCreated).To(BeTrue()) Expect(resourcesHaveBeenUpdated).To(BeFalse()) - VerifyCollectorResources(ctx, k8sClient, Dash0OperatorNamespace) + VerifyCollectorResources(ctx, k8sClient, OperatorNamespace) }) It("should fall back to the operator configuration export settings if the monitoring resource has no export", func() { @@ -144,7 +144,7 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() ) resourcesHaveBeenCreated, resourcesHaveBeenUpdated, err := oTelColResourceManager.CreateOrUpdateOpenTelemetryCollectorResources( ctx, - Dash0OperatorNamespace, + OperatorNamespace, TestImages, &dash0v1alpha1.Dash0Monitoring{ Spec: dash0v1alpha1.Dash0MonitoringSpec{}, @@ -154,13 +154,13 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() Expect(err).ToNot(HaveOccurred()) Expect(resourcesHaveBeenCreated).To(BeTrue()) Expect(resourcesHaveBeenUpdated).To(BeFalse()) - VerifyCollectorResources(ctx, k8sClient, Dash0OperatorNamespace) + VerifyCollectorResources(ctx, k8sClient, OperatorNamespace) }) It("should fail if the monitoring resource has no export and there is no operator configuration resource", func() { _, _, err := oTelColResourceManager.CreateOrUpdateOpenTelemetryCollectorResources( ctx, - Dash0OperatorNamespace, + OperatorNamespace, TestImages, &dash0v1alpha1.Dash0Monitoring{ Spec: dash0v1alpha1.Dash0MonitoringSpec{}, @@ -171,7 +171,7 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() MatchError( "the provided Dash0Monitoring resource does not have an export configuration and no " + "Dash0OperatorConfiguration resource has been found")) - VerifyCollectorResourcesDoNotExist(ctx, k8sClient, Dash0OperatorNamespace) + VerifyCollectorResourcesDoNotExist(ctx, k8sClient, OperatorNamespace) }) It("should fail if the monitoring resource has no export and the existing operator configuration "+ @@ -183,7 +183,7 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() ) _, _, err := oTelColResourceManager.CreateOrUpdateOpenTelemetryCollectorResources( ctx, - Dash0OperatorNamespace, + OperatorNamespace, TestImages, &dash0v1alpha1.Dash0Monitoring{ Spec: dash0v1alpha1.Dash0MonitoringSpec{}, @@ -192,7 +192,7 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() ) Expect(err).To(MatchError("the provided Dash0Monitoring resource does not have an export configuration " + "and the Dash0OperatorConfiguration resource does not have one either")) - VerifyCollectorResourcesDoNotExist(ctx, k8sClient, Dash0OperatorNamespace) + VerifyCollectorResourcesDoNotExist(ctx, k8sClient, OperatorNamespace) }) It("should delete outdated resources from older operator versions", func() { @@ -200,13 +200,13 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() Expect(k8sClient.Create(ctx, &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: nameOfOutdatedResources, - Namespace: Dash0OperatorNamespace, + Namespace: OperatorNamespace, }, })).To(Succeed()) Expect(k8sClient.Create(ctx, &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: nameOfOutdatedResources, - Namespace: Dash0OperatorNamespace, + Namespace: OperatorNamespace, }, Spec: appsv1.DaemonSetSpec{ Selector: &metav1.LabelSelector{ @@ -231,7 +231,7 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() _, _, err := oTelColResourceManager.CreateOrUpdateOpenTelemetryCollectorResources( ctx, - Dash0OperatorNamespace, + OperatorNamespace, TestImages, dash0MonitoringResource, &logger, @@ -241,14 +241,14 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() VerifyResourceDoesNotExist( ctx, k8sClient, - Dash0OperatorNamespace, + OperatorNamespace, nameOfOutdatedResources, &corev1.ConfigMap{}, ) VerifyResourceDoesNotExist( ctx, k8sClient, - Dash0OperatorNamespace, + OperatorNamespace, nameOfOutdatedResources, &appsv1.DaemonSet{}, ) @@ -260,7 +260,7 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() _, _, err := oTelColResourceManager.CreateOrUpdateOpenTelemetryCollectorResources( ctx, - Dash0OperatorNamespace, + OperatorNamespace, TestImages, dash0MonitoringResource, &logger, @@ -270,12 +270,12 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() // Change some arbitrary fields in some resources, then simulate a reconcile cycle and verify that all // resources are back in their desired state. - daemonSetConifgMap := GetOTelColDaemonSetConfigMap(ctx, k8sClient, Dash0OperatorNamespace) + daemonSetConifgMap := GetOTelColDaemonSetConfigMap(ctx, k8sClient, OperatorNamespace) daemonSetConifgMap.Data["config.yaml"] = "{}" daemonSetConifgMap.Data["bogus-key"] = "" Expect(k8sClient.Update(ctx, daemonSetConifgMap)).To(Succeed()) - daemonSet := GetOTelColDaemonSet(ctx, k8sClient, Dash0OperatorNamespace) + daemonSet := GetOTelColDaemonSet(ctx, k8sClient, OperatorNamespace) daemonSet.Spec.Template.Spec.InitContainers = []corev1.Container{} daemonSet.Spec.Template.Spec.Containers[0].Image = "wrong-collector-image:latest" daemonSet.Spec.Template.Spec.Containers[0].Ports = []corev1.ContainerPort{ @@ -284,12 +284,12 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() } Expect(k8sClient.Update(ctx, daemonSet)).To(Succeed()) - deploymentConfigMap := GetOTelColDeploymentConfigMap(ctx, k8sClient, Dash0OperatorNamespace) + deploymentConfigMap := GetOTelColDeploymentConfigMap(ctx, k8sClient, OperatorNamespace) deploymentConfigMap.Data["config.yaml"] = "{}" deploymentConfigMap.Data["bogus-key"] = "" Expect(k8sClient.Update(ctx, deploymentConfigMap)).To(Succeed()) - deployment := GetOTelColDeployment(ctx, k8sClient, Dash0OperatorNamespace) + deployment := GetOTelColDeployment(ctx, k8sClient, OperatorNamespace) var changedReplicas int32 = 5 deployment.Spec.Replicas = &changedReplicas deployment.Spec.Template.Spec.Containers[0].Ports = []corev1.ContainerPort{ @@ -300,7 +300,7 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() resourcesHaveBeenCreated, resourcesHaveBeenUpdated, err := oTelColResourceManager.CreateOrUpdateOpenTelemetryCollectorResources( ctx, - Dash0OperatorNamespace, + OperatorNamespace, TestImages, dash0MonitoringResource, &logger, @@ -309,7 +309,7 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() Expect(resourcesHaveBeenCreated).To(BeFalse()) Expect(resourcesHaveBeenUpdated).To(BeTrue()) - VerifyCollectorResources(ctx, k8sClient, Dash0OperatorNamespace) + VerifyCollectorResources(ctx, k8sClient, OperatorNamespace) }) }) @@ -318,7 +318,7 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() _, _, err := oTelColResourceManager.CreateOrUpdateOpenTelemetryCollectorResources( ctx, - Dash0OperatorNamespace, + OperatorNamespace, TestImages, dash0MonitoringResource, &logger, @@ -328,19 +328,19 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() // Delete some arbitrary resources, then simulate a reconcile cycle and verify that all resources have been // recreated. - daemonSetConifgMap := GetOTelColDaemonSetConfigMap(ctx, k8sClient, Dash0OperatorNamespace) + daemonSetConifgMap := GetOTelColDaemonSetConfigMap(ctx, k8sClient, OperatorNamespace) Expect(k8sClient.Delete(ctx, daemonSetConifgMap)).To(Succeed()) - deploymentConfigMap := GetOTelColDeploymentConfigMap(ctx, k8sClient, Dash0OperatorNamespace) + deploymentConfigMap := GetOTelColDeploymentConfigMap(ctx, k8sClient, OperatorNamespace) Expect(k8sClient.Delete(ctx, deploymentConfigMap)).To(Succeed()) - deployment := GetOTelColDeployment(ctx, k8sClient, Dash0OperatorNamespace) + deployment := GetOTelColDeployment(ctx, k8sClient, OperatorNamespace) Expect(k8sClient.Delete(ctx, deployment)).To(Succeed()) resourcesHaveBeenCreated, _, err := oTelColResourceManager.CreateOrUpdateOpenTelemetryCollectorResources( ctx, - Dash0OperatorNamespace, + OperatorNamespace, TestImages, dash0MonitoringResource, &logger, @@ -348,7 +348,7 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() Expect(err).ToNot(HaveOccurred()) Expect(resourcesHaveBeenCreated).To(BeTrue()) - VerifyCollectorResources(ctx, k8sClient, Dash0OperatorNamespace) + VerifyCollectorResources(ctx, k8sClient, OperatorNamespace) }) }) @@ -357,7 +357,7 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() // create resources _, _, err := oTelColResourceManager.CreateOrUpdateOpenTelemetryCollectorResources( ctx, - Dash0OperatorNamespace, + OperatorNamespace, TestImages, dash0MonitoringResource, &logger, @@ -369,7 +369,7 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() // resources). resourcesHaveBeenCreated, resourcesHaveBeenUpdated, err := oTelColResourceManager.CreateOrUpdateOpenTelemetryCollectorResources( ctx, - Dash0OperatorNamespace, + OperatorNamespace, TestImages, dash0MonitoringResource, &logger, @@ -383,7 +383,7 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() resourcesHaveBeenCreated, resourcesHaveBeenUpdated, err = oTelColResourceManager.CreateOrUpdateOpenTelemetryCollectorResources( ctx, - Dash0OperatorNamespace, + OperatorNamespace, TestImages, dash0MonitoringResource, &logger, @@ -392,7 +392,7 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() Expect(resourcesHaveBeenCreated).To(BeFalse()) Expect(resourcesHaveBeenUpdated).To(BeFalse()) - VerifyCollectorResources(ctx, k8sClient, Dash0OperatorNamespace) + VerifyCollectorResources(ctx, k8sClient, OperatorNamespace) }) }) @@ -401,23 +401,23 @@ var _ = Describe("The OpenTelemetry Collector resource manager", Ordered, func() // create resources (so there is something to delete) _, _, err := oTelColResourceManager.CreateOrUpdateOpenTelemetryCollectorResources( ctx, - Dash0OperatorNamespace, + OperatorNamespace, TestImages, dash0MonitoringResource, &logger, ) Expect(err).ToNot(HaveOccurred()) - VerifyCollectorResources(ctx, k8sClient, Dash0OperatorNamespace) + VerifyCollectorResources(ctx, k8sClient, OperatorNamespace) // delete everything again err = oTelColResourceManager.DeleteResources( ctx, - Dash0OperatorNamespace, + OperatorNamespace, &logger, ) Expect(err).ToNot(HaveOccurred()) - VerifyCollectorResourcesDoNotExist(ctx, k8sClient, Dash0OperatorNamespace) + VerifyCollectorResourcesDoNotExist(ctx, k8sClient, OperatorNamespace) }) }) }) diff --git a/internal/dash0/controller/controller_suite_test.go b/internal/dash0/controller/controller_suite_test.go index 88388df8..2bb8fa82 100644 --- a/internal/dash0/controller/controller_suite_test.go +++ b/internal/dash0/controller/controller_suite_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + persesv1alpha1 "github.com/perses/perses-operator/api/v1alpha1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" @@ -28,12 +29,13 @@ import ( ) const ( - timeout = 10 * time.Second + timeout = 5 * time.Second consistentlyTimeout = 2 * time.Second pollingInterval = 50 * time.Millisecond ) var ( + mgr ctrl.Manager cfg *rest.Config k8sClient client.Client clientset *kubernetes.Clientset @@ -65,6 +67,7 @@ var _ = BeforeSuite(func() { Expect(cfg).NotTo(BeNil()) Expect(dash0v1alpha1.AddToScheme(scheme.Scheme)).To(Succeed()) + Expect(persesv1alpha1.AddToScheme(scheme.Scheme)).To(Succeed()) //+kubebuilder:scaffold:scheme @@ -76,7 +79,7 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) Expect(clientset).NotTo(BeNil()) - mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + mgr, err = ctrl.NewManager(cfg, ctrl.Options{ Scheme: scheme.Scheme, }) Expect(err).NotTo(HaveOccurred()) diff --git a/internal/dash0/controller/dash0_controller_test.go b/internal/dash0/controller/dash0_controller_test.go index 87c55b53..96cd48bd 100644 --- a/internal/dash0/controller/dash0_controller_test.go +++ b/internal/dash0/controller/dash0_controller_test.go @@ -36,10 +36,10 @@ import ( var ( namespace = TestNamespaceName extraDash0MonitoringResourceNames []types.NamespacedName - operatorNamespace = Dash0OperatorNamespace + operatorNamespace = OperatorNamespace ) -var _ = Describe("The Dash0 controller", Ordered, func() { +var _ = Describe("The monitoring resource controller", Ordered, func() { ctx := context.Background() var createdObjects []client.Object @@ -47,7 +47,7 @@ var _ = Describe("The Dash0 controller", Ordered, func() { BeforeAll(func() { EnsureTestNamespaceExists(ctx, k8sClient) - EnsureDash0OperatorNamespaceExists(ctx, k8sClient) + EnsureOperatorNamespaceExists(ctx, k8sClient) }) BeforeEach(func() { @@ -77,7 +77,7 @@ var _ = Describe("The Dash0 controller", Ordered, func() { Clientset: clientset, Instrumenter: instrumenter, Images: TestImages, - OperatorNamespace: Dash0OperatorNamespace, + OperatorNamespace: OperatorNamespace, BackendConnectionManager: backendConnectionManager, DanglingEventsTimeouts: &DanglingEventsTimeoutsTest, } diff --git a/internal/dash0/controller/operator_configuration_controller.go b/internal/dash0/controller/operator_configuration_controller.go index 27c3153f..40bdcf37 100644 --- a/internal/dash0/controller/operator_configuration_controller.go +++ b/internal/dash0/controller/operator_configuration_controller.go @@ -19,23 +19,24 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" dash0v1alpha1 "github.com/dash0hq/dash0-operator/api/dash0monitoring/v1alpha1" - "github.com/dash0hq/dash0-operator/internal/dash0/selfmonitoring" + "github.com/dash0hq/dash0-operator/internal/dash0/selfmonitoringapiaccess" "github.com/dash0hq/dash0-operator/internal/dash0/util" ) type OperatorConfigurationReconciler struct { client.Client - Clientset *kubernetes.Clientset - Scheme *runtime.Scheme - Recorder record.EventRecorder - DeploymentSelfReference *appsv1.Deployment - DanglingEventsTimeouts *util.DanglingEventsTimeouts - Images util.Images - DevelopmentMode bool + Clientset *kubernetes.Clientset + PersesDashboardCrdReconciler *PersesDashboardCrdReconciler + Scheme *runtime.Scheme + Recorder record.EventRecorder + DeploymentSelfReference *appsv1.Deployment + DanglingEventsTimeouts *util.DanglingEventsTimeouts + Images util.Images + DevelopmentMode bool } const ( - ManagerContainerName = "manager" + ControllerContainerName = "manager" updateStatusFailedMessageOperatorConfiguration = "Failed to update Dash0 operator configuration status " + "conditions, requeuing reconcile request." ) @@ -133,36 +134,17 @@ func (r *OperatorConfigurationReconciler) Reconcile(ctx context.Context, req ctr return ctrl.Result{}, nil } logger.Info("Reconciling the operator configuration resource", "name", req.Name) - } else { - logger.Info("Reconciling the deletion of the operator configuration resource", "name", req.Name) - } - - currentSelfMonitoringConfiguration, err := - selfmonitoring.GetSelfMonitoringConfigurationFromControllerDeployment( - r.DeploymentSelfReference, - ManagerContainerName, - ) - if err != nil { - logger.Error(err, "cannot get self-monitoring configuration from controller deployment") - return ctrl.Result{ - Requeue: true, - }, err } if resourceDeleted { - if currentSelfMonitoringConfiguration.Enabled { - if err = r.applySelfMonitoring(ctx, selfmonitoring.SelfMonitoringConfiguration{ - Enabled: false, - }); err != nil { - logger.Error(err, "cannot disable self-monitoring of the controller deployment, requeuing reconcile request.") - return ctrl.Result{ - Requeue: true, - }, nil - } else { - logger.Info("Self-monitoring of the controller deployment has been disabled") - } + logger.Info("Reconciling the deletion of the operator configuration resource", "name", req.Name) + if err = r.removeSelfMonitoringAndApiAccessAndUpdate(ctx); err != nil { + logger.Error(err, "cannot disable self-monitoring/API access of the controller deployment, requeuing reconcile request.") + return ctrl.Result{ + Requeue: true, + }, nil } else { - logger.Info("Self-monitoring configuration of the controller deployment is already disabled") + logger.Info("Self-monitoring of the controller deployment has been disabled") } return ctrl.Result{}, nil } @@ -178,69 +160,165 @@ func (r *OperatorConfigurationReconciler) Reconcile(ctx context.Context, req ctr return ctrl.Result{}, err } - newSelfMonitoringConfiguration, err := - selfmonitoring.ConvertOperatorConfigurationResourceToSelfMonitoringConfiguration(resource, &logger) + if resource.HasDash0ApiAccessConfigured() { + dataset := resource.Spec.Export.Dash0.Dataset + if dataset == "" { + dataset = "default" + } + r.PersesDashboardCrdReconciler.SetApiEndpointAndDataset(&ApiConfig{ + Endpoint: resource.Spec.Export.Dash0.ApiEndpoint, + Dataset: dataset, + }) + } else { + logger.Info("Settings required for managing dashboards via the operator are missing, the operator will not " + + "update dashboards in Dash0.") + r.PersesDashboardCrdReconciler.RemoveApiEndpointAndDataset() + } + + currentSelfMonitoringAndApiAccessConfiguration, err := + selfmonitoringapiaccess.GetSelfMonitoringAndApiAccessConfigurationFromControllerDeployment( + r.DeploymentSelfReference, + ControllerContainerName, + ) if err != nil { - logger.Error(err, "cannot generate self-monitoring configuration from operator configuration resource") + logger.Error(err, "cannot get self-monitoring/API access configuration from controller deployment") return ctrl.Result{ Requeue: true, }, err } - if reflect.DeepEqual(currentSelfMonitoringConfiguration, newSelfMonitoringConfiguration) { - logger.Info("Self-monitoring configuration of the controller deployment is up-to-date") - } else { - if err = r.applySelfMonitoring(ctx, newSelfMonitoringConfiguration); err != nil { - logger.Error(err, "Cannot apply self-monitoring configurations to the controller deployment") - resource.EnsureResourceIsMarkedAsDegraded("CannotApplySelfMonitoring", "Could not update the controller deployment to reflect the self-monitoring settings") - if statusUpdateErr := r.Status().Update(ctx, resource); statusUpdateErr != nil { - logger.Error(statusUpdateErr, "Failed to update Dash0 operator status conditions, requeuing reconcile request.") + newSelfMonitoringAndApiAccessConfiguration, err := + selfmonitoringapiaccess.ConvertOperatorConfigurationResourceToSelfMonitoringConfiguration(resource, &logger) + if err != nil { + logger.Error( + err, + "cannot generate self-monitoring/API access configuration from operator configuration resource", + ) + return ctrl.Result{}, err + } + + controllerDeployment := &appsv1.Deployment{} + if err = r.Client.Get(ctx, client.ObjectKeyFromObject(r.DeploymentSelfReference), controllerDeployment); err != nil { + return ctrl.Result{}, fmt.Errorf("cannot fetch the current controller deployment: %w", err) + } + if !reflect.DeepEqual(currentSelfMonitoringAndApiAccessConfiguration, newSelfMonitoringAndApiAccessConfiguration) { + if err = r.applySelfMonitoringAndApiAccess( + controllerDeployment, + newSelfMonitoringAndApiAccessConfiguration, + ); err != nil { + logger.Error(err, "cannot apply self-monitoring configuration to the controller deployment") + if statusUpdateErr := r.markAsDegraded( + ctx, + resource, + "CannotUpdatedControllerDeployment", + "Could not update the controller deployment to reflect the self-monitoring settings.", + &logger, + ); statusUpdateErr != nil { return ctrl.Result{}, statusUpdateErr } - return ctrl.Result{ - Requeue: true, - }, nil + return ctrl.Result{}, err } - logger.Info("Self-monitoring configurations applied to the controller deployment", "self-monitoring", newSelfMonitoringConfiguration) + if err = r.Client.Update(ctx, controllerDeployment); err != nil { + logger.Error(err, "cannot update the controller deployment") + if statusUpdateErr := r.markAsDegraded( + ctx, + resource, + "CannotUpdatedControllerDeployment", + "Could not update the controller deployment.", + &logger, + ); statusUpdateErr != nil { + return ctrl.Result{}, statusUpdateErr + } + return ctrl.Result{}, err + } + logger.Info("The controller deployment has been updated.") + } else { + logger.Info("The controller deployment is up to date.") } resource.EnsureResourceIsMarkedAsAvailable() if err = r.Status().Update(ctx, resource); err != nil { logger.Error(err, updateStatusFailedMessageOperatorConfiguration) - return ctrl.Result{}, fmt.Errorf("cannot mark Dash0 operator configuration resource as available: %w", err) + return ctrl.Result{}, fmt.Errorf("cannot mark the Dash0 operator configuration resource as available: %w", err) } return ctrl.Result{}, nil } -func (r *OperatorConfigurationReconciler) applySelfMonitoring( - ctx context.Context, - selfMonitoringConfiguration selfmonitoring.SelfMonitoringConfiguration, +func (r *OperatorConfigurationReconciler) applySelfMonitoringAndApiAccess( + controllerDeployment *appsv1.Deployment, + selfMonitoringAndApiAccessConfiguration selfmonitoringapiaccess.SelfMonitoringAndApiAccessConfiguration, ) error { - updatedDeployment := &appsv1.Deployment{} - if err := r.Client.Get(ctx, client.ObjectKeyFromObject(r.DeploymentSelfReference), updatedDeployment); err != nil { - return fmt.Errorf("cannot fetch the current controller deployment: %w", err) - } - - if selfMonitoringConfiguration.Enabled { - if err := selfmonitoring.EnableSelfMonitoringInControllerDeployment( - updatedDeployment, - ManagerContainerName, - selfMonitoringConfiguration, + if selfMonitoringAndApiAccessConfiguration.SelfMonitoringEnabled { + if err := selfmonitoringapiaccess.EnableSelfMonitoringInControllerDeployment( + controllerDeployment, + ControllerContainerName, + selfMonitoringAndApiAccessConfiguration, r.Images.GetOperatorVersion(), r.DevelopmentMode, ); err != nil { return fmt.Errorf("cannot apply settings to enable self-monitoring to the controller deployment: %w", err) } - } else { - if err := selfmonitoring.DisableSelfMonitoringInControllerDeployment( - updatedDeployment, - ManagerContainerName, + } else if selfMonitoringAndApiAccessConfiguration.HasDash0ApiAccessConfigured() { + if err := selfmonitoringapiaccess.UpdateApiTokenWithoutAddingSelfMonitoringToControllerDeployment( + controllerDeployment, + ControllerContainerName, + selfMonitoringAndApiAccessConfiguration.GetDash0Authorization(), + ); err != nil { + return fmt.Errorf("cannot add the Dash0 API token to the controller deployment: %w", err) + } + if err := selfmonitoringapiaccess.DisableSelfMonitoringInControllerDeployment( + controllerDeployment, + ControllerContainerName, + false, ); err != nil { return fmt.Errorf("cannot apply settings to disable self-monitoring to the controller deployment: %w", err) } + } else { + if err := selfmonitoringapiaccess.DisableSelfMonitoringInControllerDeployment( + controllerDeployment, + ControllerContainerName, + true, + ); err != nil { + return fmt.Errorf("cannot apply settings to the controller deployment to disable self-monitoring and API access: %w", err) + } + } + + return nil +} + +func (r *OperatorConfigurationReconciler) removeSelfMonitoringAndApiAccessAndUpdate(ctx context.Context) error { + updatedDeployment := &appsv1.Deployment{} + if err := r.Client.Get(ctx, client.ObjectKeyFromObject(r.DeploymentSelfReference), updatedDeployment); err != nil { + return fmt.Errorf("cannot fetch the current controller deployment: %w", err) + } + + if err := selfmonitoringapiaccess.DisableSelfMonitoringInControllerDeployment( + updatedDeployment, + ControllerContainerName, + true, + ); err != nil { + return fmt.Errorf("cannot apply settings to disable self-monitoring to the controller deployment: %w", err) } return r.Client.Update(ctx, updatedDeployment) } + +func (r *OperatorConfigurationReconciler) markAsDegraded( + ctx context.Context, + resource *dash0v1alpha1.Dash0OperatorConfiguration, + reason string, + message string, + logger *logr.Logger, +) error { + resource.EnsureResourceIsMarkedAsDegraded( + reason, + message, + ) + if err := r.Status().Update(ctx, resource); err != nil { + logger.Error(err, "Failed to update Dash0 operator status conditions, requeuing reconcile request.") + return err + } + return nil +} diff --git a/internal/dash0/controller/operator_configuration_controller_test.go b/internal/dash0/controller/operator_configuration_controller_test.go index 47487607..9454b723 100644 --- a/internal/dash0/controller/operator_configuration_controller_test.go +++ b/internal/dash0/controller/operator_configuration_controller_test.go @@ -5,16 +5,19 @@ package controller import ( "context" + "fmt" + "reflect" + json "github.com/json-iterator/go" + "github.com/wI2L/jsondiff" appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/reconcile" dash0v1alpha1 "github.com/dash0hq/dash0-operator/api/dash0monitoring/v1alpha1" - "github.com/dash0hq/dash0-operator/internal/dash0/selfmonitoring" + "github.com/dash0hq/dash0-operator/internal/dash0/selfmonitoringapiaccess" "github.com/dash0hq/dash0-operator/internal/dash0/util" . "github.com/onsi/ginkgo/v2" @@ -23,30 +26,335 @@ import ( . "github.com/dash0hq/dash0-operator/test/util" ) +type SelfMonitoringAndApiAccessTestConfig struct { + existingControllerDeployment func() *appsv1.Deployment + operatorConfigurationResourceSpec dash0v1alpha1.Dash0OperatorConfigurationSpec + expectedControllerDeploymentAfterReconcile func() *appsv1.Deployment + expectK8sClientUpdate bool +} + type SelfMonitoringTestConfig struct { createExport func() dash0v1alpha1.Export - verify func(Gomega, selfmonitoring.SelfMonitoringConfiguration) + verify func(Gomega, selfmonitoringapiaccess.SelfMonitoringAndApiAccessConfiguration, *appsv1.Deployment) } var ( reconciler *OperatorConfigurationReconciler ) -var _ = Describe("The Dash0 controller", Ordered, func() { +var _ = Describe("The operation configuration resource controller", Ordered, func() { ctx := context.Background() var controllerDeployment *appsv1.Deployment BeforeAll(func() { EnsureTestNamespaceExists(ctx, k8sClient) - EnsureDash0OperatorNamespaceExists(ctx, k8sClient) + EnsureOperatorNamespaceExists(ctx, k8sClient) + }) + + Describe("updates the controller deployment", func() { + AfterEach(func() { + RemoveOperatorConfigurationResource(ctx, k8sClient) + EnsureControllerDeploymentDoesNotExist(ctx, k8sClient, controllerDeployment) + }) + + DescribeTable("to reflect self-monitoring and API access auth settings:", func(config SelfMonitoringAndApiAccessTestConfig) { + controllerDeployment = config.existingControllerDeployment() + EnsureControllerDeploymentExists(ctx, k8sClient, controllerDeployment) + reconciler = createReconciler(controllerDeployment) + + initialVersion := controllerDeployment.ResourceVersion + + CreateOperatorConfigurationResourceWithSpec( + ctx, + k8sClient, + config.operatorConfigurationResourceSpec, + ) + + triggerOperatorConfigurationReconcileRequest(ctx, reconciler) + verifyOperatorConfigurationResourceIsAvailable(ctx) + + expectedDeploymentAfterReconcile := config.expectedControllerDeploymentAfterReconcile() + gomegaTimeout := timeout + gomgaWrapper := Eventually + if !config.expectK8sClientUpdate { + // For test cases where the initial controller deployment is already in the expected state (that is, it + // matches what the operator configuration resource says), we use gomega's Consistently instead of + // Eventually to make the test meaningful. We need to make sure that we do not update the controller + // deployment at all for these cases. + gomgaWrapper = Consistently + gomegaTimeout = consistentlyTimeout + } + + gomgaWrapper(func(g Gomega) { + actualDeploymentAfterReconcile := LoadOperatorDeploymentOrFail(ctx, k8sClient, g) + + expectedSpec := expectedDeploymentAfterReconcile.Spec + actualSpec := actualDeploymentAfterReconcile.Spec + + for _, spec := range []*appsv1.DeploymentSpec{&expectedSpec, &actualSpec} { + // clean up defaults set by K8s, which are not relevant for the test + cleanUpDeploymentSpecForDiff(spec) + } + + matchesExpectations := reflect.DeepEqual(expectedSpec, actualSpec) + if !matchesExpectations { + patch, err := jsondiff.Compare(expectedSpec, actualSpec) + g.Expect(err).ToNot(HaveOccurred()) + humanReadableDiff, err := json.MarshalIndent(patch, "", " ") + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(matchesExpectations).To( + BeTrue(), + fmt.Sprintf("resulting deployment does not match expectations, here is a JSON patch of the differences:\n%s", humanReadableDiff), + ) + } + + if !config.expectK8sClientUpdate { + // make sure we did not execute an unnecessary update + g.Expect(actualDeploymentAfterReconcile.ResourceVersion).To(Equal(initialVersion)) + } else { + g.Expect(actualDeploymentAfterReconcile.ResourceVersion).ToNot(Equal(initialVersion)) + } + + }, gomegaTimeout, pollingInterval).Should(Succeed()) + }, + + // | previous deployment state | operator config res | expected deployment afterwards | + // |---------------------------------------|---------------------|----------------------------------| + // | no self-monitoring, no authorization | no sm, no auth | no sm, no auth | + // | no self-monitoring, no authorization | no sm, token | no sm, auth via token | + // | no self-monitoring, no authorization | no sm, secret-ref | no sm, auth via secret-ref | + // | no self-monitoring, no authorization | with sm, token | has sm, auth via token | + // | no self-monitoring, no authorization | with sm, secret-ref | has sm, auth via secret-ref | + // | --------------------------------------|---------------------|----------------------------------| + // | no self-monitoring, but token | no sm, no auth | no sm, no auth | + // | no self-monitoring, but secret-ref | no sm, no auth | no sm, no auth | + // | no self-monitoring, but token | no sm, token | no sm, auth via token | + // | no self-monitoring, but token | no sm, secret-ref | no sm, auth via secret-ref | + // | no self-monitoring, but secret-ref | no sm, token | no sm, auth via token | + // | no self-monitoring, but secret-ref | no sm, secret-ref | no sm, auth via secret-ref | + // | no self-monitoring, but token | with sm, token | has sm, auth via token | + // | no self-monitoring, but token | with sm, secret-ref | has sm, auth via secret-ref | + // | no self-monitoring, but secret-ref | with sm, token | has sm, auth via token | + // | no self-monitoring, but secret-ref | with sm, secret-ref | has sm, auth via secret-ref | + // | --------------------------------------|---------------------|----------------------------------| + // | self-monitoring with token | no sm, no auth | no sm, no auth | + // | self-monitoring with secret-ref | no sm, no auth | no sm, no auth | + // | self-monitoring with token | no sm, token | no sm, auth via token | + // | self-monitoring with token | no sm, secret-ref | no sm, auth via secret-ref | + // | self-monitoring with secret-ref | no sm, token | no sm, auth via token | + // | self-monitoring with secret-ref | no sm, secret-ref | no sm, auth via secret-ref | + // | self-monitoring with token | with sm, token | has sm, auth va token | + // | self-monitoring with token | with sm, secret-ref | has sm, auth va secret-ref | + // | self-monitoring with secret-ref | with sm, token | has sm, auth via token | + // | self-monitoring with secret-ref | with sm, secret-ref | has sm, auth via secret-ref | + // | --------------------------------------|---------------------|----------------------------------| + + // |---------------------------------------|---------------------|----------------------------------| + // | no self-monitoring, no authorization | no sm, no auth | no sm, no auth | + Entry("no self-monitoring, no auth -> no self-monitoring, no auth", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithoutSelfMonitoringWithoutAuth, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithoutSelfMonitoringWithoutAuth, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithoutSelfMonitoringWithoutAuth, + expectK8sClientUpdate: false, + }), + // | no self-monitoring, no authorization | no sm, token | no sm, auth via token | + Entry("no self-monitoring, no auth -> no self-monitoring, token", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithoutSelfMonitoringWithoutAuth, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithoutSelfMonitoringWithToken, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithoutSelfMonitoringWithToken, + expectK8sClientUpdate: true, + }), + // | no self-monitoring, no authorization | no sm, secret-ref | no sm, auth via secret-ref | + Entry("no self-monitoring, no auth -> no self-monitoring, secret-ref", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithoutSelfMonitoringWithoutAuth, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithoutSelfMonitoringWithSecretRef, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithoutSelfMonitoringWithSecretRef, + expectK8sClientUpdate: true, + }), + // | no self-monitoring, no authorization | with sm, token | has sm, auth via token | + Entry("no self-monitoring, no auth -> self-monitoring, token", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithoutSelfMonitoringWithoutAuth, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithSelfMonitoringWithToken, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithSelfMonitoringWithToken, + expectK8sClientUpdate: true, + }), + // | no self-monitoring, no authorization | with sm, secret-ref | has sm, auth via secret-ref | + Entry("no self-monitoring, no auth -> self-monitoring, secret-ref", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithoutSelfMonitoringWithoutAuth, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithSelfMonitoringWithSecretRef, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithSelfMonitoringWithSecretRef, + expectK8sClientUpdate: true, + }), + + // | --------------------------------------|---------------------|----------------------------------| + // | no self-monitoring, but token | no sm, no auth | no sm, no auth | + Entry("no self-monitoring, token -> no self-monitoring, no auth", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithoutSelfMonitoringWithToken, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithoutSelfMonitoringWithoutAuth, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithoutSelfMonitoringWithoutAuth, + expectK8sClientUpdate: true, + }), + // | no self-monitoring, but secret-ref | no sm, no auth | no sm, no auth | + Entry("no self-monitoring, secret-ref -> no self-monitoring, no auth", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithoutSelfMonitoringWithSecretRef, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithoutSelfMonitoringWithoutAuth, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithoutSelfMonitoringWithoutAuth, + expectK8sClientUpdate: true, + }), + // | no self-monitoring, but token | no sm, token | no sm, auth via token | + Entry("no self-monitoring, token -> no self-monitoring, token", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithoutSelfMonitoringWithToken, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithoutSelfMonitoringWithToken, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithoutSelfMonitoringWithToken, + expectK8sClientUpdate: false, + }), + + // | no self-monitoring, but token | no sm, secret-ref | no sm, auth via secret-ref | + Entry("no self-monitoring, token -> no self-monitoring, secret-ref", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithoutSelfMonitoringWithToken, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithoutSelfMonitoringWithSecretRef, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithoutSelfMonitoringWithSecretRef, + expectK8sClientUpdate: true, + }), + + // | no self-monitoring, but secret-ref | no sm, token | no sm, auth via token | + Entry("no self-monitoring, secret-ref -> no self-monitoring, token", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithoutSelfMonitoringWithSecretRef, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithoutSelfMonitoringWithToken, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithoutSelfMonitoringWithToken, + expectK8sClientUpdate: true, + }), + + // | no self-monitoring, but secret-ref | no sm, secret-ref | no sm, auth via secret-ref | + Entry("no self-monitoring, secret-ref -> no self-monitoring, secret-ref", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithoutSelfMonitoringWithSecretRef, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithoutSelfMonitoringWithSecretRef, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithoutSelfMonitoringWithSecretRef, + expectK8sClientUpdate: false, + }), + + // | no self-monitoring, but token | with sm, token | has sm, auth via token | + Entry("no self-monitoring, token -> self-monitoring, token", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithoutSelfMonitoringWithToken, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithSelfMonitoringWithToken, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithSelfMonitoringWithToken, + expectK8sClientUpdate: true, + }), + + // | no self-monitoring, but token | with sm, secret-ref | has sm, auth via secret-ref | + Entry("no self-monitoring, token -> self-monitoring, secret-ref", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithoutSelfMonitoringWithToken, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithSelfMonitoringWithSecretRef, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithSelfMonitoringWithSecretRef, + expectK8sClientUpdate: true, + }), + + // | no self-monitoring, but secret-ref | with sm, token | has sm, auth via token | + Entry("no self-monitoring, secret-ref -> self-monitoring, token", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithoutSelfMonitoringWithSecretRef, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithSelfMonitoringWithToken, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithSelfMonitoringWithToken, + expectK8sClientUpdate: true, + }), + + // | no self-monitoring, but secret-ref | with sm, secret-ref | has sm, auth via secret-ref | + Entry("no self-monitoring, secret-ref -> self-monitoring, token", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithoutSelfMonitoringWithSecretRef, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithSelfMonitoringWithSecretRef, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithSelfMonitoringWithSecretRef, + expectK8sClientUpdate: true, + }), + + // | --------------------------------------|---------------------|----------------------------------| + // | self-monitoring with token | no sm, no auth | no sm, no auth | + Entry("self-monitoring, token -> no self-monitoring, no auth", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithSelfMonitoringWithToken, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithoutSelfMonitoringWithoutAuth, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithoutSelfMonitoringWithoutAuth, + expectK8sClientUpdate: true, + }), + + // | self-monitoring with secret-ref | no sm, no auth | no sm, no auth | + Entry("self-monitoring, secret-ref -> no self-monitoring, no auth", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithSelfMonitoringWithSecretRef, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithoutSelfMonitoringWithoutAuth, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithoutSelfMonitoringWithoutAuth, + expectK8sClientUpdate: true, + }), + + // | self-monitoring with token | no sm, token | no sm, auth via token | + Entry("self-monitoring, token -> no self-monitoring, token", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithSelfMonitoringWithToken, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithoutSelfMonitoringWithToken, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithoutSelfMonitoringWithToken, + expectK8sClientUpdate: true, + }), + + // | self-monitoring with token | no sm, secret-ref | no sm, auth via secret-ref | + Entry("self-monitoring, token -> no self-monitoring, secret-ref", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithSelfMonitoringWithToken, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithoutSelfMonitoringWithSecretRef, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithoutSelfMonitoringWithSecretRef, + expectK8sClientUpdate: true, + }), + + // | self-monitoring with secret-ref | no sm, token | no sm, auth via token | + Entry("self-monitoring, secret-ref -> no self-monitoring, token", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithSelfMonitoringWithSecretRef, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithoutSelfMonitoringWithToken, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithoutSelfMonitoringWithToken, + expectK8sClientUpdate: true, + }), + + // | self-monitoring with secret-ref | no sm, secret-ref | no sm, auth via secret-ref | + Entry("self-monitoring, secret-ref -> no self-monitoring, secret-ref", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithSelfMonitoringWithSecretRef, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithoutSelfMonitoringWithSecretRef, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithoutSelfMonitoringWithSecretRef, + expectK8sClientUpdate: true, + }), + + // | self-monitoring with token | with sm, token | has sm, auth via token | + Entry("self-monitoring, token -> self-monitoring, token", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithSelfMonitoringWithToken, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithSelfMonitoringWithToken, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithSelfMonitoringWithToken, + expectK8sClientUpdate: false, + }), + + // | self-monitoring with token | with sm, secret-ref | has sm, auth via secret-ref | + Entry("self-monitoring, token -> self-monitoring, secret-ref", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithSelfMonitoringWithToken, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithSelfMonitoringWithSecretRef, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithSelfMonitoringWithSecretRef, + expectK8sClientUpdate: true, + }), + + // | self-monitoring with secret-ref | with sm, token | has sm, auth via token | + Entry("self-monitoring, secret-ref -> self-monitoring, token", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithSelfMonitoringWithSecretRef, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithSelfMonitoringWithToken, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithSelfMonitoringWithToken, + expectK8sClientUpdate: true, + }), + + // | self-monitoring with secret-ref | with sm, secret-ref | has sm, auth via secret-ref | + Entry("self-monitoring, secret-ref -> self-monitoring, secret-ref", SelfMonitoringAndApiAccessTestConfig{ + existingControllerDeployment: CreateControllerDeploymentWithSelfMonitoringWithSecretRef, + operatorConfigurationResourceSpec: OperatorConfigurationResourceWithSelfMonitoringWithSecretRef, + expectedControllerDeploymentAfterReconcile: CreateControllerDeploymentWithSelfMonitoringWithSecretRef, + expectK8sClientUpdate: false, + }), + ) }) - Describe("when creating the Dash0Operator resource", func() { + Describe("when creating the operator configuration resource", func() { BeforeEach(func() { // When creating the resource, we assume the operator has no // self-monitoring enabled - controllerDeployment = controllerDeploymentWithoutSelfMonitoring() + controllerDeployment = CreateControllerDeploymentWithoutSelfMonitoringWithoutAuth() EnsureControllerDeploymentExists(ctx, k8sClient, controllerDeployment) reconciler = createReconciler(controllerDeployment) }) @@ -75,14 +383,14 @@ var _ = Describe("The Dash0 controller", Ordered, func() { verifyOperatorConfigurationResourceIsAvailable(ctx) Eventually(func(g Gomega) { updatedDeployment := LoadOperatorDeploymentOrFail(ctx, k8sClient, g) - selfMonitoringConfiguration, err := - selfmonitoring.GetSelfMonitoringConfigurationFromControllerDeployment( + selfMonitoringAndApiAccessConfiguration, err := + selfmonitoringapiaccess.GetSelfMonitoringAndApiAccessConfigurationFromControllerDeployment( updatedDeployment, - ManagerContainerName, + ControllerContainerName, ) - Expect(err).NotTo(HaveOccurred()) - Expect(selfMonitoringConfiguration.Enabled).To(BeTrue()) - config.verify(g, selfMonitoringConfiguration) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(selfMonitoringAndApiAccessConfiguration.SelfMonitoringEnabled).To(BeTrue()) + config.verify(g, selfMonitoringAndApiAccessConfiguration, updatedDeployment) }, timeout, pollingInterval).Should(Succeed()) }, Entry("with a Dash0 export with a token", SelfMonitoringTestConfig{ @@ -104,9 +412,46 @@ var _ = Describe("The Dash0 controller", Ordered, func() { ) }) + DescribeTable("it adds the auth token to the controller deployment even if self-monitoring is not enabled", + func(config SelfMonitoringTestConfig) { + CreateOperatorConfigurationResourceWithSpec( + ctx, + k8sClient, + dash0v1alpha1.Dash0OperatorConfigurationSpec{ + Export: ExportToPrt(config.createExport()), + SelfMonitoring: dash0v1alpha1.SelfMonitoring{ + Enabled: false, + }, + }, + ) + + triggerOperatorConfigurationReconcileRequest(ctx, reconciler) + verifyOperatorConfigurationResourceIsAvailable(ctx) + Eventually(func(g Gomega) { + updatedDeployment := LoadOperatorDeploymentOrFail(ctx, k8sClient, g) + selfMonitoringAndApiAccessConfiguration, err := + selfmonitoringapiaccess.GetSelfMonitoringAndApiAccessConfigurationFromControllerDeployment( + updatedDeployment, + ControllerContainerName, + ) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(selfMonitoringAndApiAccessConfiguration.SelfMonitoringEnabled).To(BeFalse()) + config.verify(g, selfMonitoringAndApiAccessConfiguration, updatedDeployment) + }, timeout, pollingInterval).Should(Succeed()) + }, + Entry("with a Dash0 export with a token", SelfMonitoringTestConfig{ + createExport: Dash0ExportWithEndpointAndTokenAndApiEndpoint, + verify: verifyNoSelfMontoringButAuthTokenEnvVarFromToken, + }), + Entry("with a Dash0 export with a secret ref", SelfMonitoringTestConfig{ + createExport: Dash0ExportWithEndpointAndSecretRefAndApiEndpoint, + verify: verifyNoSelfMonitoringButAuthTokenEnvVarFromSecretRef, + }), + ) + Describe("disabling self-monitoring", func() { - It("it does not change the controller deployment", func() { + It("it does not change the controller deployment (because self-monitoring was not enabled in the first place)", func() { CreateOperatorConfigurationResourceWithSpec( ctx, k8sClient, @@ -122,19 +467,19 @@ var _ = Describe("The Dash0 controller", Ordered, func() { verifyOperatorConfigurationResourceIsAvailable(ctx) Consistently(func(g Gomega) { updatedDeployment := LoadOperatorDeploymentOrFail(ctx, k8sClient, g) - selfMonitoringConfiguration, err := - selfmonitoring.GetSelfMonitoringConfigurationFromControllerDeployment( + selfMonitoringAndApiAccessConfiguration, err := + selfmonitoringapiaccess.GetSelfMonitoringAndApiAccessConfigurationFromControllerDeployment( updatedDeployment, - ManagerContainerName, + ControllerContainerName, ) - Expect(err).NotTo(HaveOccurred()) - Expect(selfMonitoringConfiguration.Enabled).To(BeFalse()) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(selfMonitoringAndApiAccessConfiguration.SelfMonitoringEnabled).To(BeFalse()) }, consistentlyTimeout, pollingInterval).Should(Succeed()) }) }) }) - Describe("when updating the Dash0Operator resource", func() { + Describe("when updating the operator configuration resource", func() { Describe("enabling self-monitoring", func() { @@ -143,7 +488,7 @@ var _ = Describe("The Dash0 controller", Ordered, func() { BeforeEach(func() { // When creating the resource, we assume the operator has // self-monitoring enabled - controllerDeployment = controllerDeploymentWithSelfMonitoring() + controllerDeployment = CreateControllerDeploymentWithSelfMonitoringWithToken() EnsureControllerDeploymentExists(ctx, k8sClient, controllerDeployment) reconciler = createReconciler(controllerDeployment) }) @@ -170,21 +515,21 @@ var _ = Describe("The Dash0 controller", Ordered, func() { Consistently(func(g Gomega) { updatedDeployment := LoadOperatorDeploymentOrFail(ctx, k8sClient, g) - selfMonitoringConfiguration, err := - selfmonitoring.GetSelfMonitoringConfigurationFromControllerDeployment( + selfMonitoringAndApiAccessConfiguration, err := + selfmonitoringapiaccess.GetSelfMonitoringAndApiAccessConfigurationFromControllerDeployment( updatedDeployment, - ManagerContainerName, + ControllerContainerName, ) - Expect(err).NotTo(HaveOccurred()) - Expect(selfMonitoringConfiguration.Enabled).To(BeTrue()) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(selfMonitoringAndApiAccessConfiguration.SelfMonitoringEnabled).To(BeTrue()) }, consistentlyTimeout, pollingInterval).Should(Succeed()) }) }) - Describe("when self-monitoring is disabled", func() { + Describe("when self-monitoring was previously disabled", func() { BeforeEach(func() { - controllerDeployment = controllerDeploymentWithoutSelfMonitoring() + controllerDeployment = CreateControllerDeploymentWithoutSelfMonitoringWithoutAuth() EnsureControllerDeploymentExists(ctx, k8sClient, controllerDeployment) reconciler = createReconciler(controllerDeployment) @@ -217,13 +562,13 @@ var _ = Describe("The Dash0 controller", Ordered, func() { verifyOperatorConfigurationResourceIsAvailable(ctx) Eventually(func(g Gomega) { updatedDeployment := LoadOperatorDeploymentOrFail(ctx, k8sClient, g) - selfMonitoringConfiguration, err := - selfmonitoring.GetSelfMonitoringConfigurationFromControllerDeployment( + selfMonitoringAndApiAccessConfiguration, err := + selfmonitoringapiaccess.GetSelfMonitoringAndApiAccessConfigurationFromControllerDeployment( updatedDeployment, - ManagerContainerName, + ControllerContainerName, ) - Expect(err).NotTo(HaveOccurred()) - Expect(selfMonitoringConfiguration.Enabled).To(BeTrue()) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(selfMonitoringAndApiAccessConfiguration.SelfMonitoringEnabled).To(BeTrue()) }, timeout, pollingInterval).Should(Succeed()) }) }) @@ -245,7 +590,7 @@ var _ = Describe("The Dash0 controller", Ordered, func() { }, ) - controllerDeployment = controllerDeploymentWithSelfMonitoring() + controllerDeployment = CreateControllerDeploymentWithSelfMonitoringWithToken() EnsureControllerDeploymentExists(ctx, k8sClient, controllerDeployment) reconciler = createReconciler(controllerDeployment) }) @@ -267,13 +612,13 @@ var _ = Describe("The Dash0 controller", Ordered, func() { verifyOperatorConfigurationResourceIsAvailable(ctx) Eventually(func(g Gomega) { updatedDeployment := LoadOperatorDeploymentOrFail(ctx, k8sClient, g) - selfMonitoringConfiguration, err := - selfmonitoring.GetSelfMonitoringConfigurationFromControllerDeployment( + selfMonitoringAndApiAccessConfiguration, err := + selfmonitoringapiaccess.GetSelfMonitoringAndApiAccessConfigurationFromControllerDeployment( updatedDeployment, - ManagerContainerName, + ControllerContainerName, ) - Expect(err).NotTo(HaveOccurred()) - Expect(selfMonitoringConfiguration.Enabled).To(BeFalse()) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(selfMonitoringAndApiAccessConfiguration.SelfMonitoringEnabled).To(BeFalse()) }, timeout, pollingInterval).Should(Succeed()) }) }) @@ -292,7 +637,7 @@ var _ = Describe("The Dash0 controller", Ordered, func() { }, ) - controllerDeployment = controllerDeploymentWithoutSelfMonitoring() + controllerDeployment = CreateControllerDeploymentWithoutSelfMonitoringWithoutAuth() EnsureControllerDeploymentExists(ctx, k8sClient, controllerDeployment) reconciler = createReconciler(controllerDeployment) }) @@ -314,20 +659,20 @@ var _ = Describe("The Dash0 controller", Ordered, func() { verifyOperatorConfigurationResourceIsAvailable(ctx) Consistently(func(g Gomega) { updatedDeployment := LoadOperatorDeploymentOrFail(ctx, k8sClient, g) - selfMonitoringConfiguration, err := - selfmonitoring.GetSelfMonitoringConfigurationFromControllerDeployment( + selfMonitoringAndApiAccessConfiguration, err := + selfmonitoringapiaccess.GetSelfMonitoringAndApiAccessConfigurationFromControllerDeployment( updatedDeployment, - ManagerContainerName, + ControllerContainerName, ) - Expect(err).NotTo(HaveOccurred()) - Expect(selfMonitoringConfiguration.Enabled).To(BeFalse()) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(selfMonitoringAndApiAccessConfiguration.SelfMonitoringEnabled).To(BeFalse()) }, consistentlyTimeout, pollingInterval).Should(Succeed()) }) }) }) }) - Describe("when deleting the Dash0Operator resource", func() { + Describe("when deleting the operator configuration resource", func() { Describe("when self-monitoring is enabled", func() { @@ -343,7 +688,7 @@ var _ = Describe("The Dash0 controller", Ordered, func() { }, ) - controllerDeployment = controllerDeploymentWithSelfMonitoring() + controllerDeployment = CreateControllerDeploymentWithSelfMonitoringWithToken() EnsureControllerDeploymentExists(ctx, k8sClient, controllerDeployment) reconciler = createReconciler(controllerDeployment) }) @@ -354,13 +699,13 @@ var _ = Describe("The Dash0 controller", Ordered, func() { }) It("it disables self-monitoring in the controller deployment", func() { - selfMonitoringConfiguration, err := - selfmonitoring.GetSelfMonitoringConfigurationFromControllerDeployment( + selfMonitoringAndApiAccessConfiguration, err := + selfmonitoringapiaccess.GetSelfMonitoringAndApiAccessConfigurationFromControllerDeployment( controllerDeployment, - ManagerContainerName, + ControllerContainerName, ) Expect(err).NotTo(HaveOccurred()) - Expect(selfMonitoringConfiguration.Enabled).To(BeTrue()) + Expect(selfMonitoringAndApiAccessConfiguration.SelfMonitoringEnabled).To(BeTrue()) resource := LoadOperatorConfigurationResourceOrFail(ctx, k8sClient, Default) Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) @@ -370,13 +715,13 @@ var _ = Describe("The Dash0 controller", Ordered, func() { Eventually(func(g Gomega) { updatedDeployment := LoadOperatorDeploymentOrFail(ctx, k8sClient, g) - selfMonitoringConfiguration, err := - selfmonitoring.GetSelfMonitoringConfigurationFromControllerDeployment( + selfMonitoringAndApiAccessConfiguration, err := + selfmonitoringapiaccess.GetSelfMonitoringAndApiAccessConfigurationFromControllerDeployment( updatedDeployment, - ManagerContainerName, + ControllerContainerName, ) - Expect(err).NotTo(HaveOccurred()) - Expect(selfMonitoringConfiguration.Enabled).To(BeFalse()) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(selfMonitoringAndApiAccessConfiguration.SelfMonitoringEnabled).To(BeFalse()) }, timeout, pollingInterval).Should(Succeed()) }) }) @@ -394,7 +739,7 @@ var _ = Describe("The Dash0 controller", Ordered, func() { }, ) - controllerDeployment = controllerDeploymentWithoutSelfMonitoring() + controllerDeployment = CreateControllerDeploymentWithoutSelfMonitoringWithoutAuth() EnsureControllerDeploymentExists(ctx, k8sClient, controllerDeployment) reconciler = createReconciler(controllerDeployment) }) @@ -405,13 +750,13 @@ var _ = Describe("The Dash0 controller", Ordered, func() { }) It("it does not change the controller deployment", func() { - selfMonitoringConfiguration, err := - selfmonitoring.GetSelfMonitoringConfigurationFromControllerDeployment( + selfMonitoringAndApiAccessConfiguration, err := + selfmonitoringapiaccess.GetSelfMonitoringAndApiAccessConfigurationFromControllerDeployment( controllerDeployment, - ManagerContainerName, + ControllerContainerName, ) Expect(err).NotTo(HaveOccurred()) - Expect(selfMonitoringConfiguration.Enabled).To(BeFalse()) + Expect(selfMonitoringAndApiAccessConfiguration.SelfMonitoringEnabled).To(BeFalse()) resource := LoadOperatorConfigurationResourceOrFail(ctx, k8sClient, Default) Expect(resource.Spec.SelfMonitoring.Enabled).To(BeFalse()) @@ -423,367 +768,46 @@ var _ = Describe("The Dash0 controller", Ordered, func() { Consistently(func(g Gomega) { updatedDeployment := LoadOperatorDeploymentOrFail(ctx, k8sClient, g) - selfMonitoringConfiguration, err := - selfmonitoring.GetSelfMonitoringConfigurationFromControllerDeployment( + selfMonitoringAndApiAccessConfiguration, err := + selfmonitoringapiaccess.GetSelfMonitoringAndApiAccessConfigurationFromControllerDeployment( updatedDeployment, - ManagerContainerName, + ControllerContainerName, ) - Expect(err).NotTo(HaveOccurred()) - Expect(selfMonitoringConfiguration.Enabled).To(BeFalse()) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(selfMonitoringAndApiAccessConfiguration.SelfMonitoringEnabled).To(BeFalse()) }, consistentlyTimeout, pollingInterval).Should(Succeed()) }) }) }) }) -func controllerDeploymentWithoutSelfMonitoring() *appsv1.Deployment { - replicaCount := int32(2) - falsy := false - truthy := true - terminationGracePeriodSeconds := int64(10) - secretMode := corev1.SecretVolumeSourceDefaultMode - - return &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: Dash0OperatorDeploymentName, - Namespace: Dash0OperatorNamespace, - Labels: map[string]string{ - "app.kubernetes.io/name": "dash0monitoring-operator", - "app.kubernetes.io/component": "controller", - "app.kubernetes.io/instance": "deployment", - "dash0.com/enable": "false", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicaCount, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app.kubernetes.io/name": "dash0monitoring-operator", - "app.kubernetes.io/component": "controller", - }, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - "kubectl.kubernetes.io/default-container": "manager", - }, - Labels: map[string]string{ - "app.kubernetes.io/name": "dash0monitoring-operator", - "app.kubernetes.io/component": "controller", - "dash0.cert-digest": "1234567890", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "manager", - Image: "ghcr.io/dash0hq/operator-controller@latest", - Command: []string{"/manager"}, - Args: []string{ - "--health-probe-bind-address=:8081", - "--metrics-bind-address=127.0.0.1:8080", - "--leader-elect", - }, - Env: []corev1.EnvVar{ - { - Name: "DASH0_OPERATOR_NAMESPACE", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "metadata.namespace", - }, - }, - }, - { - Name: "DASH0_DEPLOYMENT_NAME", - Value: Dash0OperatorDeploymentName, - }, - { - Name: "OTEL_COLLECTOR_NAME_PREFIX", - Value: "dash0monitoring-system", - }, - { - Name: "DASH0_INIT_CONTAINER_IMAGE", - Value: "ghcr.io/dash0hq/instrumentation", - }, - { - Name: "DASH0_INIT_CONTAINER_IMAGE_PULL_POLICY", - Value: "", - }, - { - Name: "DASH0_COLLECTOR_IMAGE", - Value: "ghcr.io/dash0hq/collector", - }, - { - Name: "DASH0_COLLECTOR_IMAGE_PULL_POLICY", - Value: "", - }, - { - Name: "DASH0_CONFIGURATION_RELOADER_IMAGE", - Value: "ghcr.io/dash0hq/configuration-reloader@latest", - }, - { - Name: "DASH0_CONFIGURATION_RELOADER_IMAGE_PULL_POLICY", - Value: "", - }, - { - Name: "DASH0_FILELOG_OFFSET_SYNCH_IMAGE", - Value: "ghcr.io/dash0hq/filelog-offset-synch", - }, - { - Name: "DASH0_FILELOG_OFFSET_SYNCH_IMAGE_PULL_POLICY", - Value: "", - }, - { - Name: "DASH0_DEVELOPMENT_MODE", - Value: "false", - }, - }, - Ports: []corev1.ContainerPort{ - { - Name: "webhook-server", - ContainerPort: 9443, - Protocol: "TCP", - }, - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "certificates", - MountPath: "/tmp/k8s-webhook-server/serving-certs", - ReadOnly: true, - }, - }, - }, - { - Name: "kube-rbac-proxy", - Image: "quay.io/brancz/kube-rbac-proxy:v0.18.0", - Args: []string{ - "--secure-listen-address=0.0.0.0:8443", - "--upstream=http://127.0.0.1:8080/", - "--logtostderr=true", - "--v=0", - }, - Ports: []corev1.ContainerPort{ - { - Name: "https", - ContainerPort: 8443, - Protocol: "TCP", - }, - }, - SecurityContext: &corev1.SecurityContext{ - AllowPrivilegeEscalation: &falsy, - Capabilities: &corev1.Capabilities{ - Drop: []corev1.Capability{"ALL"}, - }, - }, - }, - }, - SecurityContext: &corev1.PodSecurityContext{ - RunAsNonRoot: &truthy, - SeccompProfile: &corev1.SeccompProfile{ - Type: corev1.SeccompProfileTypeRuntimeDefault, - }, - }, - ServiceAccountName: "dash0-operator-service-account", - AutomountServiceAccountToken: &truthy, - TerminationGracePeriodSeconds: &terminationGracePeriodSeconds, - Volumes: []corev1.Volume{ - { - Name: "certificates", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - DefaultMode: &secretMode, - SecretName: "dash0-operator-certificates", - }, - }, - }, - }, - }, - }, - }, - } -} - -func controllerDeploymentWithSelfMonitoring() *appsv1.Deployment { - replicaCount := int32(2) - falsy := false - truthy := true - terminationGracePeriodSeconds := int64(10) - secretMode := corev1.SecretVolumeSourceDefaultMode - - return &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: Dash0OperatorDeploymentName, - Namespace: Dash0OperatorNamespace, - Labels: map[string]string{ - "app.kubernetes.io/name": "dash0monitoring-operator", - "app.kubernetes.io/component": "controller", - "app.kubernetes.io/instance": "deployment", - "dash0monitoring.com/enable": "false", - }, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicaCount, - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app.kubernetes.io/name": "dash0monitoring-operator", - "app.kubernetes.io/component": "controller", - }, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - "kubectl.kubernetes.io/default-container": "manager", - }, - Labels: map[string]string{ - "app.kubernetes.io/name": "dash0monitoring-operator", - "app.kubernetes.io/component": "controller", - "dash0monitoring.cert-digest": "1234567890", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "manager", - Image: "ghcr.io/dash0hq/operator-controller@latest", - Command: []string{"/manager"}, - Args: []string{ - "--health-probe-bind-address=:8081", - "--metrics-bind-address=127.0.0.1:8080", - "--leader-elect", - }, - Env: []corev1.EnvVar{ - { - Name: "DASH0_OPERATOR_NAMESPACE", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "metadata.namespace", - }, - }, - }, - { - Name: "DASH0_DEPLOYMENT_NAME", - Value: Dash0OperatorNamespace, - }, { - Name: "OTEL_COLLECTOR_NAME_PREFIX", - Value: "dash0monitoring-system", - }, - { - Name: "DASH0_INIT_CONTAINER_IMAGE", - Value: "ghcr.io/dash0hq/instrumentation", - }, - { - Name: "DASH0_INIT_CONTAINER_IMAGE_PULL_POLICY", - Value: "", - }, - { - Name: "DASH0_COLLECTOR_IMAGE", - Value: "ghcr.io/dash0hq/collector", - }, - { - Name: "DASH0_COLLECTOR_IMAGE_PULL_POLICY", - Value: "", - }, - { - Name: "DASH0_CONFIGURATION_RELOADER_IMAGE", - Value: "ghcr.io/dash0hq/configuration-reloader@latest", - }, - { - Name: "DASH0_CONFIGURATION_RELOADER_IMAGE_PULL_POLICY", - Value: "", - }, - { - Name: "DASH0_FILELOG_OFFSET_SYNCH_IMAGE", - Value: "ghcr.io/dash0hq/filelog-offset-synch", - }, - { - Name: "DASH0_FILELOG_OFFSET_SYNCH_IMAGE_PULL_POLICY", - Value: "", - }, - { - Name: "DASH0_DEVELOPMENT_MODE", - Value: "false", - }, - { - Name: "OTEL_EXPORTER_OTLP_ENDPOINT", - Value: "ingress.eu-west-1.aws.dash0monitoring-dev.com:4317", - }, - { - Name: "OTEL_EXPORTER_OTLP_HEADERS", - Value: "Authorization=Bearer 1234567890", - }, - }, - Ports: []corev1.ContainerPort{ - { - Name: "webhook-server", - ContainerPort: 9443, - Protocol: "TCP", - }, - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "certificates", - MountPath: "/tmp/k8s-webhook-server/serving-certs", - ReadOnly: true, - }, - }, - }, - { - Name: "kube-rbac-proxy", - Image: "quay.io/brancz/kube-rbac-proxy:v0.18.0", - Args: []string{ - "--secure-listen-address=0.0.0.0:8443", - "--upstream=http://127.0.0.1:8080/", - "--logtostderr=true", - "--v=0", - }, - Ports: []corev1.ContainerPort{ - { - Name: "https", - ContainerPort: 8443, - Protocol: "TCP", - }, - }, - SecurityContext: &corev1.SecurityContext{ - AllowPrivilegeEscalation: &falsy, - Capabilities: &corev1.Capabilities{ - Drop: []corev1.Capability{"ALL"}, - }, - }, - }, - }, - SecurityContext: &corev1.PodSecurityContext{ - RunAsNonRoot: &truthy, - SeccompProfile: &corev1.SeccompProfile{ - Type: corev1.SeccompProfileTypeRuntimeDefault, - }, - }, - ServiceAccountName: "dash0monitoring-operator-service-account", - AutomountServiceAccountToken: &truthy, - TerminationGracePeriodSeconds: &terminationGracePeriodSeconds, - Volumes: []corev1.Volume{ - { - Name: "certificates", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - DefaultMode: &secretMode, - SecretName: "dash0monitoring-operator-certificates", - }, - }, - }, - }, - }, - }, - }, +func cleanUpDeploymentSpecForDiff(spec *appsv1.DeploymentSpec) { + for i := range spec.Template.Spec.Containers { + spec.Template.Spec.Containers[i].TerminationMessagePath = "" + spec.Template.Spec.Containers[i].TerminationMessagePolicy = "" + spec.Template.Spec.Containers[i].ImagePullPolicy = "" } + spec.Template.Spec.RestartPolicy = "" + spec.Template.Spec.DNSPolicy = "" + spec.Template.Spec.DeprecatedServiceAccount = "" + spec.Template.Spec.SchedulerName = "" + spec.Strategy = appsv1.DeploymentStrategy{} + spec.RevisionHistoryLimit = nil + spec.ProgressDeadlineSeconds = nil } func createReconciler(controllerDeployment *appsv1.Deployment) *OperatorConfigurationReconciler { + persesDashboardCrdReconciler := &PersesDashboardCrdReconciler{ + persesDashboardReconciler: &PersesDashboardReconciler{}, + } return &OperatorConfigurationReconciler{ - Client: k8sClient, - Clientset: clientset, - Recorder: recorder, - DeploymentSelfReference: controllerDeployment, - DanglingEventsTimeouts: &DanglingEventsTimeoutsTest, + Client: k8sClient, + Clientset: clientset, + Recorder: recorder, + PersesDashboardCrdReconciler: persesDashboardCrdReconciler, + DeploymentSelfReference: controllerDeployment, + DanglingEventsTimeouts: &DanglingEventsTimeoutsTest, + Images: TestImages, } } @@ -817,9 +841,10 @@ func verifyOperatorConfigurationResourceIsAvailable(ctx context.Context) { func verifySelfMonitoringConfigurationDash0Token( g Gomega, - selfMonitoringConfiguration selfmonitoring.SelfMonitoringConfiguration, + selfMonitoringAndApiAccessConfiguration selfmonitoringapiaccess.SelfMonitoringAndApiAccessConfiguration, + _ *appsv1.Deployment, ) { - dash0ExportConfiguration := selfMonitoringConfiguration.Export.Dash0 + dash0ExportConfiguration := selfMonitoringAndApiAccessConfiguration.Export.Dash0 g.Expect(dash0ExportConfiguration).NotTo(BeNil()) g.Expect(dash0ExportConfiguration.Endpoint).To(Equal(EndpointDash0WithProtocolTest)) g.Expect(dash0ExportConfiguration.Dataset).To(Equal(util.DatasetInsights)) @@ -827,15 +852,16 @@ func verifySelfMonitoringConfigurationDash0Token( g.Expect(authorization).ToNot(BeNil()) g.Expect(*authorization.Token).To(Equal(AuthorizationTokenTest)) g.Expect(authorization.SecretRef).To(BeNil()) - g.Expect(selfMonitoringConfiguration.Export.Grpc).To(BeNil()) - g.Expect(selfMonitoringConfiguration.Export.Http).To(BeNil()) + g.Expect(selfMonitoringAndApiAccessConfiguration.Export.Grpc).To(BeNil()) + g.Expect(selfMonitoringAndApiAccessConfiguration.Export.Http).To(BeNil()) } func verifySelfMonitoringConfigurationDash0SecretRef( g Gomega, - selfMonitoringConfiguration selfmonitoring.SelfMonitoringConfiguration, + selfMonitoringAndApiAccessConfiguration selfmonitoringapiaccess.SelfMonitoringAndApiAccessConfiguration, + _ *appsv1.Deployment, ) { - dash0ExportConfiguration := selfMonitoringConfiguration.Export.Dash0 + dash0ExportConfiguration := selfMonitoringAndApiAccessConfiguration.Export.Dash0 g.Expect(dash0ExportConfiguration).NotTo(BeNil()) g.Expect(dash0ExportConfiguration.Endpoint).To(Equal(EndpointDash0WithProtocolTest)) g.Expect(dash0ExportConfiguration.Dataset).To(Equal(util.DatasetInsights)) @@ -844,15 +870,16 @@ func verifySelfMonitoringConfigurationDash0SecretRef( g.Expect(authorization.SecretRef).ToNot(BeNil()) g.Expect(authorization.SecretRef.Name).To(Equal(SecretRefTest.Name)) g.Expect(authorization.SecretRef.Key).To(Equal(SecretRefTest.Key)) - g.Expect(selfMonitoringConfiguration.Export.Grpc).To(BeNil()) - g.Expect(selfMonitoringConfiguration.Export.Http).To(BeNil()) + g.Expect(selfMonitoringAndApiAccessConfiguration.Export.Grpc).To(BeNil()) + g.Expect(selfMonitoringAndApiAccessConfiguration.Export.Http).To(BeNil()) } func verifySelfMonitoringConfigurationGrpc( g Gomega, - selfMonitoringConfiguration selfmonitoring.SelfMonitoringConfiguration, + selfMonitoringAndApiAccessConfiguration selfmonitoringapiaccess.SelfMonitoringAndApiAccessConfiguration, + _ *appsv1.Deployment, ) { - grpcExportConfiguration := selfMonitoringConfiguration.Export.Grpc + grpcExportConfiguration := selfMonitoringAndApiAccessConfiguration.Export.Grpc g.Expect(grpcExportConfiguration).NotTo(BeNil()) g.Expect(grpcExportConfiguration.Endpoint).To(Equal("dns://" + EndpointGrpcTest)) headers := grpcExportConfiguration.Headers @@ -861,15 +888,16 @@ func verifySelfMonitoringConfigurationGrpc( g.Expect(headers[0].Value).To(Equal("Value")) g.Expect(headers[1].Name).To(Equal(util.Dash0DatasetHeaderName)) g.Expect(headers[1].Value).To(Equal(util.DatasetInsights)) - g.Expect(selfMonitoringConfiguration.Export.Dash0).To(BeNil()) - g.Expect(selfMonitoringConfiguration.Export.Http).To(BeNil()) + g.Expect(selfMonitoringAndApiAccessConfiguration.Export.Dash0).To(BeNil()) + g.Expect(selfMonitoringAndApiAccessConfiguration.Export.Http).To(BeNil()) } func verifySelfMonitoringConfigurationHttp( g Gomega, - selfMonitoringConfiguration selfmonitoring.SelfMonitoringConfiguration, + selfMonitoringAndApiAccessConfiguration selfmonitoringapiaccess.SelfMonitoringAndApiAccessConfiguration, + _ *appsv1.Deployment, ) { - httpExportConfiguration := selfMonitoringConfiguration.Export.Http + httpExportConfiguration := selfMonitoringAndApiAccessConfiguration.Export.Http g.Expect(httpExportConfiguration).NotTo(BeNil()) g.Expect(httpExportConfiguration.Endpoint).To(Equal(EndpointHttpTest)) g.Expect(httpExportConfiguration.Encoding).To(Equal(dash0v1alpha1.Proto)) @@ -879,6 +907,28 @@ func verifySelfMonitoringConfigurationHttp( g.Expect(headers[0].Value).To(Equal("Value")) g.Expect(headers[1].Name).To(Equal(util.Dash0DatasetHeaderName)) g.Expect(headers[1].Value).To(Equal(util.DatasetInsights)) - g.Expect(selfMonitoringConfiguration.Export.Dash0).To(BeNil()) - g.Expect(selfMonitoringConfiguration.Export.Grpc).To(BeNil()) + g.Expect(selfMonitoringAndApiAccessConfiguration.Export.Dash0).To(BeNil()) + g.Expect(selfMonitoringAndApiAccessConfiguration.Export.Grpc).To(BeNil()) +} + +func verifyNoSelfMontoringButAuthTokenEnvVarFromToken( + g Gomega, + selfMonitoringAndApiAccessConfiguration selfmonitoringapiaccess.SelfMonitoringAndApiAccessConfiguration, + controllerDeployment *appsv1.Deployment, +) { + g.Expect(selfMonitoringAndApiAccessConfiguration.SelfMonitoringEnabled).To(BeFalse()) + container := controllerDeployment.Spec.Template.Spec.Containers[0] + g.Expect(container.Env).To( + ContainElement(MatchEnvVar("SELF_MONITORING_AND_API_AUTH_TOKEN", AuthorizationTokenTest))) +} + +func verifyNoSelfMonitoringButAuthTokenEnvVarFromSecretRef( + g Gomega, + selfMonitoringAndApiAccessConfiguration selfmonitoringapiaccess.SelfMonitoringAndApiAccessConfiguration, + controllerDeployment *appsv1.Deployment, +) { + g.Expect(selfMonitoringAndApiAccessConfiguration.SelfMonitoringEnabled).To(BeFalse()) + container := controllerDeployment.Spec.Template.Spec.Containers[0] + g.Expect(container.Env).To( + ContainElement(MatchEnvVarValueFrom("SELF_MONITORING_AND_API_AUTH_TOKEN", "secret-ref", "key"))) } diff --git a/internal/dash0/controller/perses_dashboards_controller.go b/internal/dash0/controller/perses_dashboards_controller.go new file mode 100644 index 00000000..39e85420 --- /dev/null +++ b/internal/dash0/controller/perses_dashboards_controller.go @@ -0,0 +1,475 @@ +// SPDX-FileCopyrightText: Copyright 2024 Dash0 Inc. +// SPDX-License-Identifier: Apache-2.0 + +package controller + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "sync/atomic" + "time" + + "github.com/go-logr/logr" + persesv1alpha1 "github.com/perses/perses-operator/api/v1alpha1" + persesv1common "github.com/perses/perses/pkg/model/api/v1/common" + otelmetric "go.opentelemetry.io/otel/metric" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/dash0hq/dash0-operator/internal/dash0/util" +) + +type PersesDashboardCrdReconciler struct { + AuthToken string + mgr ctrl.Manager + skipNameValidation bool + persesDashboardReconciler *PersesDashboardReconciler +} + +//+kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get;list;watch + +var ( + // persesDashboardCrdReconcileRequestMetric otelmetric.Int64Counter + persesDashboardReconcileRequestMetric otelmetric.Int64Counter + + retrySettings = wait.Backoff{ + Duration: 5 * time.Second, + Factor: 1.5, + Steps: 3, + } +) + +func (r *PersesDashboardCrdReconciler) SetupWithManager( + ctx context.Context, + mgr ctrl.Manager, + startupK8sClient client.Client, + logger *logr.Logger, +) error { + kubeSystemNamespace := &corev1.Namespace{} + if err := startupK8sClient.Get(ctx, client.ObjectKey{Name: "kube-system"}, kubeSystemNamespace); err != nil { + msg := "unable to get the kube-system namespace uid" + logger.Error(err, msg) + return fmt.Errorf("%s: %w", msg, err) + } + + r.mgr = mgr + r.persesDashboardReconciler = &PersesDashboardReconciler{ + pseudoClusterUid: kubeSystemNamespace.UID, + httpClient: &http.Client{}, + authToken: r.AuthToken, + } + + if err := startupK8sClient.Get(ctx, client.ObjectKey{ + Name: "persesdashboards.perses.dev", + }, &apiextensionsv1.CustomResourceDefinition{}); err != nil { + if apierrors.IsNotFound(err) { + logger.Info("The persesdashboards.perses.dev custom resource definition does not exist in this " + + "cluster, the operator will not watch for Perses dashboard resources.") + } else { + logger.Error(err, "unable to call get the persesdashboards.perses.dev custom resource definition") + return err + } + } else { + logger.Info("The persesdashboards.perses.dev custom resource definition is present in this " + + "cluster, the operator will watch for Perses dashboard resources.") + if err = r.startWatchingPersesDashboardResources(ctx, logger); err != nil { + return err + } + } + + // For now, we are not watching for the PersesDashboard CRD. Watching for a foreign CRD and reacting appropriately + // to its creation/deletion is work in progress in the prometheus scraping branch. Once that is finished, we can + // employ the same approach here. + return nil +} + +//+kubebuilder:rbac:groups=perses.dev,resources=persesdashboards,verbs=get;list;watch + +func (r *PersesDashboardCrdReconciler) InitializeSelfMonitoringMetrics( + meter otelmetric.Meter, + metricNamePrefix string, + logger *logr.Logger, +) { + // Note: The persesDashboardCrdReconcileRequestMetric is unused until we actually implement watching the + // PersesDashboard _CRD_, see comment above in SetupWithManager. + + // reconcileRequestMetricName := fmt.Sprintf("%s%s", metricNamePrefix, "persesdashboardcrd.reconcile_requests") + // var err error + // if persesDashboardCrdReconcileRequestMetric, err = meter.Int64Counter( + // reconcileRequestMetricName, + // otelmetric.WithUnit("1"), + // otelmetric.WithDescription("Counter for persesdashboard CRD reconcile requests"), + // ); err != nil { + // logger.Error(err, "Cannot initialize the metric %s.") + // } + + r.persesDashboardReconciler.InitializeSelfMonitoringMetrics( + meter, + metricNamePrefix, + logger, + ) +} + +func (r *PersesDashboardCrdReconciler) startWatchingPersesDashboardResources( + _ context.Context, + logger *logr.Logger, +) error { + logger.Info("Setting up a watch for Perses dashboard custom resources.") + + controllerBuilder := ctrl.NewControllerManagedBy(r.mgr). + Named("dash0_perses_dashboard_controller"). + Watches( + &persesv1alpha1.PersesDashboard{}, + // Deliberately not using a convenience mechanism like &handler.EnqueueRequestForObject{} (which would + // feed all events into the Reconcile method) here, since using the lower-level TypedEventHandler interface + // directly allows us to distinguish between create and delete events more easily. + r.persesDashboardReconciler, + ) + if r.skipNameValidation { + controllerBuilder = controllerBuilder.WithOptions(controller.TypedOptions[reconcile.Request]{ + SkipNameValidation: ptr.To(true), + }) + } + if err := controllerBuilder.Complete(r.persesDashboardReconciler); err != nil { + logger.Error(err, "unable to create a new controller for watching Perses Dashboards") + return err + } + r.persesDashboardReconciler.isWatching = true + + return nil +} + +func (r *PersesDashboardCrdReconciler) SetApiEndpointAndDataset(apiConfig *ApiConfig) { + r.persesDashboardReconciler.apiConfig.Store(apiConfig) +} + +func (r *PersesDashboardCrdReconciler) RemoveApiEndpointAndDataset() { + r.persesDashboardReconciler.apiConfig.Store(nil) +} + +type ApiConfig struct { + Endpoint string + Dataset string +} + +type PersesDashboardReconciler struct { + isWatching bool + pseudoClusterUid types.UID + httpClient *http.Client + apiConfig atomic.Pointer[ApiConfig] + authToken string +} + +func (r *PersesDashboardReconciler) InitializeSelfMonitoringMetrics( + meter otelmetric.Meter, + metricNamePrefix string, + logger *logr.Logger, +) { + reconcileRequestMetricName := fmt.Sprintf("%s%s", metricNamePrefix, "persesdashboard.reconcile_requests") + var err error + if persesDashboardReconcileRequestMetric, err = meter.Int64Counter( + reconcileRequestMetricName, + otelmetric.WithUnit("1"), + otelmetric.WithDescription("Counter for perses dashboard reconcile requests"), + ); err != nil { + logger.Error(err, "Cannot initialize the metric %s.") + } +} + +func (r *PersesDashboardReconciler) Create( + ctx context.Context, + e event.TypedCreateEvent[client.Object], + _ workqueue.TypedRateLimitingInterface[reconcile.Request], +) { + if persesDashboardReconcileRequestMetric != nil { + persesDashboardReconcileRequestMetric.Add(ctx, 1) + } + + logger := log.FromContext(ctx) + logger.Info( + "Detected a new Perses dashboard resource", + "namespace", + e.Object.GetNamespace(), + "name", + e.Object.GetName(), + ) + if err := r.UpsertDashboard(e.Object.(*persesv1alpha1.PersesDashboard), &logger); err != nil { + logger.Error(err, "unable to upsert the dashboard") + } +} + +func (r *PersesDashboardReconciler) Update( + ctx context.Context, + e event.TypedUpdateEvent[client.Object], + _ workqueue.TypedRateLimitingInterface[reconcile.Request], +) { + if persesDashboardReconcileRequestMetric != nil { + persesDashboardReconcileRequestMetric.Add(ctx, 1) + } + + logger := log.FromContext(ctx) + logger.Info( + "Detected a change for a Perses dashboard resource", + "namespace", + e.ObjectNew.GetNamespace(), + "name", + e.ObjectNew.GetName(), + ) + + _ = util.RetryWithCustomBackoff( + "upsert dashboard", + func() error { + return r.UpsertDashboard(e.ObjectNew.(*persesv1alpha1.PersesDashboard), &logger) + }, + retrySettings, + true, + &logger, + ) +} + +func (r *PersesDashboardReconciler) Delete( + ctx context.Context, + e event.TypedDeleteEvent[client.Object], + _ workqueue.TypedRateLimitingInterface[reconcile.Request], +) { + if persesDashboardReconcileRequestMetric != nil { + persesDashboardReconcileRequestMetric.Add(ctx, 1) + } + + logger := log.FromContext(ctx) + logger.Info( + "Detected the deletion of a Perses dashboard resource", + "namespace", + e.Object.GetNamespace(), + "name", + e.Object.GetName(), + ) + + _ = util.RetryWithCustomBackoff( + "delete dashboard", + func() error { + return r.DeleteDashboard(e.Object.(*persesv1alpha1.PersesDashboard), &logger) + }, + retrySettings, + true, + &logger, + ) +} + +func (r *PersesDashboardReconciler) Generic( + _ context.Context, + _ event.TypedGenericEvent[client.Object], + _ workqueue.TypedRateLimitingInterface[reconcile.Request], +) { + // ignoring generic events +} + +func (r *PersesDashboardReconciler) Reconcile( + context.Context, + reconcile.Request, +) (reconcile.Result, error) { + // Reconcile should not be called on the PersesDashboardReconciler, as we are using the TypedEventHandler interface + // directly when setting up the watch. We still need to implement the method, as the controller builder's Complete + // method requires implementing the Reconciler interface. + return reconcile.Result{}, nil +} + +func (r *PersesDashboardReconciler) UpsertDashboard( + persesDashboard *persesv1alpha1.PersesDashboard, + logger *logr.Logger, +) error { + apiConfig := r.apiConfig.Load() + dashboardUrl, dashboardOrigin, authToken, executeRequest := r.validateConfigAndRenderUrl( + persesDashboard, + apiConfig, + logger, + ) + if !executeRequest { + return nil + } + + if persesDashboard.Spec.Display == nil { + persesDashboard.Spec.Display = &persesv1common.Display{} + } + if persesDashboard.Spec.Display.Name == "" { + // Let the dashboard name default to the perses dashboard resource's namespace + name, if unset. + persesDashboard.Spec.Display.Name = fmt.Sprintf("%s/%s", persesDashboard.Namespace, persesDashboard.Name) + } + + serializedDashboard, _ := json.Marshal( + map[string]interface{}{ + "kind": "Dashboard", + "spec": persesDashboard.Spec, + "metadata": map[string]interface{}{ + "dash0Extensions": map[string]interface{}{ + "origin": dashboardOrigin, + }, + }, + }) + requestPayload := bytes.NewBuffer(serializedDashboard) + + req, err := http.NewRequest( + http.MethodPut, + dashboardUrl, + requestPayload, + ) + if err != nil { + logger.Error(err, "unable to create a new HTTP request to upsert the dashboard") + return err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", authToken)) + logger.Info(fmt.Sprintf("Updating/creating dashboard %s in Dash0", dashboardOrigin)) + res, err := r.httpClient.Do(req) + if err != nil { + logger.Error(err, fmt.Sprintf("unable to execute the HTTP request to update the dashboard %s", dashboardOrigin)) + return err + } + + if res.StatusCode < http.StatusOK || res.StatusCode >= http.StatusMultipleChoices { + return r.handleNon2xxStatusCode(res, dashboardOrigin, logger) + } + + // http status code was 2xx, discard the response body and close it + defer func() { + _, _ = io.Copy(io.Discard, res.Body) + _ = res.Body.Close() + }() + + return nil +} + +func (r *PersesDashboardReconciler) DeleteDashboard( + persesDashboard *persesv1alpha1.PersesDashboard, + logger *logr.Logger, +) error { + apiConfig := r.apiConfig.Load() + dashboardUrl, dashboardOrigin, authToken, executeRequest := r.validateConfigAndRenderUrl( + persesDashboard, + apiConfig, + logger, + ) + if !executeRequest { + return nil + } + + req, err := http.NewRequest( + http.MethodDelete, + dashboardUrl, + nil, + ) + if err != nil { + logger.Error(err, "unable to create a new HTTP request to delete the dashboard") + return err + } + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", authToken)) + logger.Info(fmt.Sprintf("Deleting dashboard %s in Dash0", dashboardOrigin)) + res, err := r.httpClient.Do(req) + if err != nil { + logger.Error(err, fmt.Sprintf("unable to execute the HTTP request to delete the dashboard %s", dashboardOrigin)) + return err + } + + if res.StatusCode < http.StatusOK || res.StatusCode >= http.StatusMultipleChoices { + return r.handleNon2xxStatusCode(res, dashboardOrigin, logger) + } + + // http status code was 2xx, discard the response body and close it + defer func() { + _, _ = io.Copy(io.Discard, res.Body) + _ = res.Body.Close() + }() + + return nil +} + +func (r *PersesDashboardReconciler) validateConfigAndRenderUrl( + persesDashboard *persesv1alpha1.PersesDashboard, + apiConfig *ApiConfig, + logger *logr.Logger, +) (string, string, string, bool) { + if apiConfig == nil || apiConfig.Endpoint == "" { + logger.Info("No Dash0 API endpoint has been provided via the operator configuration resource, the dashboard " + + "will not be updated in Dash0.") + return "", "", "", false + } + if r.authToken == "" { + logger.Info("No auth token is set on the controller deployment, the dashboard will not be updated " + + "in Dash0.") + return "", "", "", false + } + + dataset := apiConfig.Dataset + if dataset == "" { + dataset = "default" + } + dashboardUrl, dashboardOrigin := r.renderDashboardUrl(apiConfig.Endpoint, persesDashboard, dataset) + return dashboardUrl, dashboardOrigin, r.authToken, true +} + +func (r *PersesDashboardReconciler) renderDashboardUrl( + dash0ApiEndpoint string, + persesDashboard *persesv1alpha1.PersesDashboard, + dataset string, +) (string, string) { + dashboardOrigin := fmt.Sprintf( + // we deliberately use _ as the separator, since that is an illegal character in Kubernetes names. This avoids + // any potential naming collisions (e.g. namespace="abc" & name="def-ghi" vs. namespace="abc-def" & name="ghi"). + "dash0-operator_%s_%s_%s_%s", + r.pseudoClusterUid, + dataset, + persesDashboard.Namespace, + persesDashboard.Name, + ) + if !strings.HasSuffix(dash0ApiEndpoint, "/") { + dash0ApiEndpoint += "/" + } + return fmt.Sprintf( + "%sapi/dashboards/%s?dataset=%s", + dash0ApiEndpoint, + dashboardOrigin, + dataset, + ), dashboardOrigin +} + +func (r *PersesDashboardReconciler) handleNon2xxStatusCode( + res *http.Response, + dashboardOrigin string, + logger *logr.Logger, +) error { + defer func() { + _ = res.Body.Close() + }() + responseBody, readErr := io.ReadAll(res.Body) + if readErr != nil { + readBodyErr := fmt.Errorf("unable to read the API response payload after receiving status code %d when "+ + "trying to udpate/create/delete the dashboard %s", res.StatusCode, dashboardOrigin) + logger.Error(readBodyErr, "unable to read the API response payload") + return readBodyErr + } + + statusCodeErr := fmt.Errorf( + "unexpected status code %d when updating/creating/deleting the dashboard %s, response body is %s", + res.StatusCode, + dashboardOrigin, + string(responseBody), + ) + logger.Error(statusCodeErr, "unexpected status code") + return statusCodeErr +} diff --git a/internal/dash0/controller/perses_dashboards_controller_test.go b/internal/dash0/controller/perses_dashboards_controller_test.go new file mode 100644 index 00000000..62beb4ca --- /dev/null +++ b/internal/dash0/controller/perses_dashboards_controller_test.go @@ -0,0 +1,261 @@ +// SPDX-FileCopyrightText: Copyright 2024 Dash0 Inc. +// SPDX-License-Identifier: Apache-2.0 + +package controller + +import ( + "context" + + "github.com/h2non/gock" + persesv1alpha1 "github.com/perses/perses-operator/api/v1alpha1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + . "github.com/dash0hq/dash0-operator/test/util" +) + +var ( + crdReconciler *PersesDashboardCrdReconciler + crd *apiextensionsv1.CustomResourceDefinition + + crdQualifiedName = types.NamespacedName{ + Name: "persesdashboards.perses.dev", + } +) + +var _ = Describe("The Perses dashboard controller", Ordered, func() { + ctx := context.Background() + logger := log.FromContext(ctx) + + BeforeAll(func() { + EnsureTestNamespaceExists(ctx, k8sClient) + EnsureOperatorNamespaceExists(ctx, k8sClient) + }) + + Describe("the Perses dashboard CRD reconciler", func() { + BeforeEach(func() { + crdReconciler = &PersesDashboardCrdReconciler{ + AuthToken: AuthorizationTokenTest, + + // We create the controller multiple times in tests, this option is required, otherwise the controller + // runtime will complain. + skipNameValidation: true, + } + }) + + AfterEach(func() { + ensurePersesDashboardCrdDoesNotExist(ctx) + }) + + It("does not start watching Perses dashboards if the CRD does not exist", func() { + Expect(crdReconciler.SetupWithManager(ctx, mgr, k8sClient, &logger)).To(Succeed()) + Expect(crdReconciler.persesDashboardReconciler.isWatching).To(BeFalse()) + }) + + It("starts watching Perses dashboards if the CRD exists", func() { + ensurePersesDashboardCrdExists(ctx) + Expect(crdReconciler.SetupWithManager(ctx, mgr, k8sClient, &logger)).To(Succeed()) + Expect(crdReconciler.persesDashboardReconciler.isWatching).To(BeTrue()) + }) + }) + + Describe("the Perses dashboard resource reconciler", func() { + var persesDashboardReconciler *PersesDashboardReconciler + + BeforeAll(func() { + crdReconciler = &PersesDashboardCrdReconciler{ + AuthToken: AuthorizationTokenTest, + + // We create the controller multiple times in tests, this option is required, otherwise the controller + // runtime will complain. + skipNameValidation: true, + } + ensurePersesDashboardCrdExists(ctx) + + Expect(crdReconciler.SetupWithManager(ctx, mgr, k8sClient, &logger)).To(Succeed()) + Expect(crdReconciler.persesDashboardReconciler.isWatching).To(BeTrue()) + }) + + BeforeEach(func() { + crdReconciler.SetApiEndpointAndDataset(&ApiConfig{ + Endpoint: ApiEndpointTest, + Dataset: DatasetTest, + }) + + persesDashboardReconciler = crdReconciler.persesDashboardReconciler + }) + + AfterAll(func() { + ensurePersesDashboardCrdDoesNotExist(ctx) + }) + + It("creates a Perses dashboard resource", func() { + expectPutRequest() + defer gock.Off() + + dashboardResource := createDashboardResource() + persesDashboardReconciler.Create( + ctx, + event.TypedCreateEvent[client.Object]{ + Object: &dashboardResource, + }, + &controllertest.TypedQueue[reconcile.Request]{}, + ) + + Expect(gock.IsDone()).To(BeTrue()) + }) + + It("updates a Perses dashboard resource", func() { + expectPutRequest() + defer gock.Off() + + dashboardResource := createDashboardResource() + persesDashboardReconciler.Update( + ctx, + event.TypedUpdateEvent[client.Object]{ + ObjectNew: &dashboardResource, + }, + &controllertest.TypedQueue[reconcile.Request]{}, + ) + + Expect(gock.IsDone()).To(BeTrue()) + }) + + It("deletes a Perses dashboard resource", func() { + expectDeleteRequest() + defer gock.Off() + + dashboardResource := createDashboardResource() + persesDashboardReconciler.Delete( + ctx, + event.TypedDeleteEvent[client.Object]{ + Object: &dashboardResource, + }, + &controllertest.TypedQueue[reconcile.Request]{}, + ) + + Expect(gock.IsDone()).To(BeTrue()) + }) + + It("it ignores Perses dashboard resource changes if API endpoint is not configured", func() { + expectPutRequest() + defer gock.Off() + + crdReconciler.SetApiEndpointAndDataset(nil) + + dashboardResource := createDashboardResource() + persesDashboardReconciler.Create( + ctx, + event.TypedCreateEvent[client.Object]{ + Object: &dashboardResource, + }, + &controllertest.TypedQueue[reconcile.Request]{}, + ) + + Expect(gock.IsPending()).To(BeTrue()) + }) + }) +}) + +func expectPutRequest() { + gock.New(ApiEndpointTest). + Put("/api/dashboards/.*"). + MatchParam("dataset", DatasetTest). + Reply(200). + JSON(map[string]string{}) +} + +func expectDeleteRequest() { + gock.New(ApiEndpointTest). + Delete("/api/dashboards/.*"). + MatchParam("dataset", DatasetTest). + Reply(200). + JSON(map[string]string{}) +} + +func createDashboardResource() persesv1alpha1.PersesDashboard { + return persesv1alpha1.PersesDashboard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-dashboard", + Namespace: TestNamespaceName, + }, + Spec: persesv1alpha1.Dashboard{}, + } +} + +func ensurePersesDashboardCrdExists(ctx context.Context) { + crd_ := EnsureKubernetesObjectExists( + ctx, + k8sClient, + crdQualifiedName, + &apiextensionsv1.CustomResourceDefinition{}, + &apiextensionsv1.CustomResourceDefinition{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apiextensions.k8s.io/v1", + Kind: "CustomResourceDefinition", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "persesdashboards.perses.dev", + }, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "perses.dev", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Kind: "PersesDashboard", + ListKind: "PersesDashboardList", + Plural: "persesdashboards", + Singular: "persesdashboard", + }, + Scope: "Namespaced", + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]apiextensionsv1.JSONSchemaProps{ + "apiVersion": {Type: "string"}, + "kind": {Type: "string"}, + "metadata": {Type: "object"}, + "spec": {Type: "object"}, + }, + Required: []string{ + "kind", + "spec", + }, + }, + }, + Served: true, + Storage: true, + }, + }, + }, + }, + ) + + crd = crd_.(*apiextensionsv1.CustomResourceDefinition) +} + +func ensurePersesDashboardCrdDoesNotExist(ctx context.Context) { + if crd != nil { + Expect(k8sClient.Delete(ctx, crd, &client.DeleteOptions{ + GracePeriodSeconds: new(int64), + })).To(Succeed()) + + Eventually(func(g Gomega) { + err := k8sClient.Get(ctx, crdQualifiedName, &apiextensionsv1.CustomResourceDefinition{}) + g.Expect(err).To(HaveOccurred()) + g.Expect(apierrors.IsNotFound(err)).To(BeTrue()) + }).Should(Succeed()) + } + +} diff --git a/internal/dash0/instrumentation/instrumenter_test.go b/internal/dash0/instrumentation/instrumenter_test.go index d62df002..ff300762 100644 --- a/internal/dash0/instrumentation/instrumenter_test.go +++ b/internal/dash0/instrumentation/instrumenter_test.go @@ -40,7 +40,7 @@ var _ = Describe("The instrumenter", Ordered, func() { BeforeAll(func() { EnsureTestNamespaceExists(ctx, k8sClient) - EnsureDash0OperatorNamespaceExists(ctx, k8sClient) + EnsureOperatorNamespaceExists(ctx, k8sClient) }) BeforeEach(func() { diff --git a/internal/dash0/predelete/operator_pre_delete_handler_test.go b/internal/dash0/predelete/operator_pre_delete_handler_test.go index bf738365..a854aeec 100644 --- a/internal/dash0/predelete/operator_pre_delete_handler_test.go +++ b/internal/dash0/predelete/operator_pre_delete_handler_test.go @@ -49,7 +49,7 @@ var _ = Describe("Uninstalling the Dash0 Kubernetes operator", Ordered, func() { ) BeforeAll(func() { - EnsureDash0OperatorNamespaceExists(ctx, k8sClient) + EnsureOperatorNamespaceExists(ctx, k8sClient) }) BeforeEach(func() { diff --git a/internal/dash0/predelete/pre_delete_suite_test.go b/internal/dash0/predelete/pre_delete_suite_test.go index ffbc2097..a69b78d5 100644 --- a/internal/dash0/predelete/pre_delete_suite_test.go +++ b/internal/dash0/predelete/pre_delete_suite_test.go @@ -113,7 +113,7 @@ var _ = BeforeSuite(func() { Clientset: clientset, Images: TestImages, Instrumenter: instrumenter, - OperatorNamespace: Dash0OperatorNamespace, + OperatorNamespace: OperatorNamespace, BackendConnectionManager: backendConnectionManager, DanglingEventsTimeouts: &DanglingEventsTimeoutsTest, } diff --git a/internal/dash0/selfmonitoring/self_monitoring.go b/internal/dash0/selfmonitoringapiaccess/self_monitoring_and_api_access.go similarity index 66% rename from internal/dash0/selfmonitoring/self_monitoring.go rename to internal/dash0/selfmonitoringapiaccess/self_monitoring_and_api_access.go index a2fa87bf..4b726f4e 100644 --- a/internal/dash0/selfmonitoring/self_monitoring.go +++ b/internal/dash0/selfmonitoringapiaccess/self_monitoring_and_api_access.go @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: Copyright 2024 Dash0 Inc. // SPDX-License-Identifier: Apache-2.0 -package selfmonitoring +package selfmonitoringapiaccess import ( "fmt" @@ -20,9 +20,9 @@ import ( type OtlpProtocol string -type SelfMonitoringConfiguration struct { - Enabled bool - Export dash0v1alpha1.Export +type SelfMonitoringAndApiAccessConfiguration struct { + SelfMonitoringEnabled bool + Export dash0v1alpha1.Export } type EndpointAndHeaders struct { @@ -37,59 +37,57 @@ const ( otelExporterOtlpProtocolEnvVarName = "OTEL_EXPORTER_OTLP_PROTOCOL" otelResourceAttribtuesEnvVarName = "OTEL_RESOURCE_ATTRIBUTES" otelLogLevelEnvVarName = "OTEL_LOG_LEVEL" - - selfMonitoringauthTokenEnvVarName = "SELF_MONITORING_AUTH_TOKEN" ) var ( dash0IngressEndpointRegex = regexp.MustCompile(`dash0(?:-dev)?\.com`) // See https://kubernetes.io/docs/tasks/inject-data-application/define-interdependent-environment-variables/ - authHeaderValue = fmt.Sprintf("Bearer $(%s)", selfMonitoringauthTokenEnvVarName) + authHeaderValue = fmt.Sprintf("Bearer $(%s)", util.SelfMonitoringAndApiAuthTokenEnvVarName) ) +func (c *SelfMonitoringAndApiAccessConfiguration) HasDash0ApiAccessConfigured() bool { + return c.Export.Dash0 != nil && + c.Export.Dash0.ApiEndpoint != "" && + (c.Export.Dash0.Authorization.Token != nil || c.Export.Dash0.Authorization.SecretRef != nil) +} + +func (c *SelfMonitoringAndApiAccessConfiguration) GetDash0Authorization() dash0v1alpha1.Authorization { + return c.Export.Dash0.Authorization +} + func ConvertOperatorConfigurationResourceToSelfMonitoringConfiguration( resource *dash0v1alpha1.Dash0OperatorConfiguration, logger *logr.Logger, -) (SelfMonitoringConfiguration, error) { +) (SelfMonitoringAndApiAccessConfiguration, error) { if resource == nil { - return SelfMonitoringConfiguration{ - Enabled: false, - }, nil - } - - if !resource.Spec.SelfMonitoring.Enabled { - return SelfMonitoringConfiguration{ - Enabled: false, - }, nil + return SelfMonitoringAndApiAccessConfiguration{}, nil } export := resource.Spec.Export if export == nil { logger.Info("Invalid configuration of Dash0OperatorConfiguration resource: Self-monitoring is enabled but no " + "export configuration is set. Self-monitoring telemetry will not be sent.") - return SelfMonitoringConfiguration{ - Enabled: false, - }, nil + return SelfMonitoringAndApiAccessConfiguration{}, nil } if export.Dash0 != nil { - return convertResourceToDash0ExportConfiguration(export, logger) + return convertResourceToDash0ExportConfiguration(export, resource.Spec.SelfMonitoring.Enabled, logger) } if export.Grpc != nil { - return convertResourceToGrpcExportConfiguration(export, logger) + return convertResourceToGrpcExportConfiguration(export, resource.Spec.SelfMonitoring.Enabled, logger) } if export.Http != nil { - return convertResourceToHttpExportConfiguration(export) + return convertResourceToHttpExportConfiguration(export, resource.Spec.SelfMonitoring.Enabled) } - return SelfMonitoringConfiguration{ - Enabled: false, - }, fmt.Errorf("no export configuration for self-monitoring has been provided, no self-monitoring telemetry will be sent") + return SelfMonitoringAndApiAccessConfiguration{}, + fmt.Errorf("no export configuration for self-monitoring has been provided, no self-monitoring telemetry will be sent") } func convertResourceToDash0ExportConfiguration( export *dash0v1alpha1.Export, + selfMonitoringEnabled bool, logger *logr.Logger, -) (SelfMonitoringConfiguration, error) { +) (SelfMonitoringAndApiAccessConfiguration, error) { if export.Grpc != nil { logger.Info( fmt.Sprintf( @@ -104,13 +102,14 @@ func convertResourceToDash0ExportConfiguration( } dash0Export := export.Dash0 - return SelfMonitoringConfiguration{ - Enabled: true, + return SelfMonitoringAndApiAccessConfiguration{ + SelfMonitoringEnabled: selfMonitoringEnabled, Export: dash0v1alpha1.Export{ Dash0: &dash0v1alpha1.Dash0Configuration{ Endpoint: dash0Export.Endpoint, Dataset: util.DatasetInsights, Authorization: dash0Export.Authorization, + ApiEndpoint: dash0Export.ApiEndpoint, }, }, }, nil @@ -118,8 +117,9 @@ func convertResourceToDash0ExportConfiguration( func convertResourceToGrpcExportConfiguration( export *dash0v1alpha1.Export, + selfMonitoringEnabled bool, logger *logr.Logger, -) (SelfMonitoringConfiguration, error) { +) (SelfMonitoringAndApiAccessConfiguration, error) { if export.Http != nil { logger.Info( fmt.Sprintf( @@ -128,8 +128,8 @@ func convertResourceToGrpcExportConfiguration( } grpcExport := export.Grpc - return SelfMonitoringConfiguration{ - Enabled: true, + return SelfMonitoringAndApiAccessConfiguration{ + SelfMonitoringEnabled: selfMonitoringEnabled, Export: dash0v1alpha1.Export{ Grpc: &dash0v1alpha1.GrpcConfiguration{ Endpoint: grpcExport.Endpoint, @@ -147,15 +147,16 @@ func convertResourceToGrpcExportConfiguration( func convertResourceToHttpExportConfiguration( export *dash0v1alpha1.Export, -) (SelfMonitoringConfiguration, error) { + selfMonitoringEnabled bool, +) (SelfMonitoringAndApiAccessConfiguration, error) { httpExport := export.Http if httpExport.Encoding == dash0v1alpha1.Json { - return SelfMonitoringConfiguration{ - Enabled: false, + return SelfMonitoringAndApiAccessConfiguration{ + SelfMonitoringEnabled: false, }, fmt.Errorf("using an HTTP exporter with JSON encoding self-monitoring is not supported") } - return SelfMonitoringConfiguration{ - Enabled: true, + return SelfMonitoringAndApiAccessConfiguration{ + SelfMonitoringEnabled: selfMonitoringEnabled, Export: dash0v1alpha1.Export{ Http: &dash0v1alpha1.HttpConfiguration{ Endpoint: httpExport.Endpoint, @@ -185,7 +186,7 @@ func (c *cannotFindContainerByNameError) Error() string { func EnableSelfMonitoringInCollectorDaemonSet( collectorDaemonSet *appsv1.DaemonSet, - selfMonitoringConfiguration SelfMonitoringConfiguration, + selfMonitoringConfiguration SelfMonitoringAndApiAccessConfiguration, operatorVersion string, developmentMode bool, ) error { @@ -199,7 +200,7 @@ func EnableSelfMonitoringInCollectorDaemonSet( func EnableSelfMonitoringInCollectorDeployment( collectorDeployment *appsv1.Deployment, - selfMonitoringConfiguration SelfMonitoringConfiguration, + selfMonitoringConfiguration SelfMonitoringAndApiAccessConfiguration, operatorVersion string, developmentMode bool, ) error { @@ -213,7 +214,7 @@ func EnableSelfMonitoringInCollectorDeployment( func enableSelfMonitoringInCollector( collectorContainers []corev1.Container, - selfMonitoringConfiguration SelfMonitoringConfiguration, + selfMonitoringConfiguration SelfMonitoringAndApiAccessConfiguration, operatorVersion string, developmentMode bool, ) error { @@ -221,8 +222,8 @@ func enableSelfMonitoringInCollector( var authTokenEnvVar *corev1.EnvVar if selfMonitoringExport.Dash0 != nil { envVar, err := util.CreateEnvVarForAuthorization( - *selfMonitoringExport.Dash0, - selfMonitoringauthTokenEnvVarName, + (*(selfMonitoringExport.Dash0)).Authorization, + util.SelfMonitoringAndApiAuthTokenEnvVarName, ) if err != nil { return err @@ -259,127 +260,221 @@ func enableSelfMonitoringInCollector( return nil } -func GetSelfMonitoringConfigurationFromControllerDeployment( +func GetSelfMonitoringAndApiAccessConfigurationFromControllerDeployment( controllerDeployment *appsv1.Deployment, - managerContainerName string, -) (SelfMonitoringConfiguration, error) { - managerContainerIdx := slices.IndexFunc(controllerDeployment.Spec.Template.Spec.Containers, func(c corev1.Container) bool { - return c.Name == managerContainerName - }) + controllerContainerName string, +) (SelfMonitoringAndApiAccessConfiguration, error) { + controllerContainerIdx, err := findControllerContainer(controllerDeployment, controllerContainerName) + if err != nil { + return SelfMonitoringAndApiAccessConfiguration{}, &cannotFindContainerByNameError{ + ContainerName: controllerContainerName, + WorkloadGKV: controllerDeployment.GroupVersionKind(), + WorkloadNamespace: controllerDeployment.Namespace, + WorkloadName: controllerDeployment.Name, + } + } + + return ParseSelfMonitoringConfigurationFromContainer(&controllerDeployment.Spec.Template.Spec.Containers[controllerContainerIdx]) +} + +func ParseSelfMonitoringConfigurationFromContainer(controllerContainer *corev1.Container) (SelfMonitoringAndApiAccessConfiguration, error) { + endpoint, err := parseEndpoint(controllerContainer) + if err != nil { + return SelfMonitoringAndApiAccessConfiguration{}, err + } + + dash0Authorization := parseDash0AuthorizationFromEnvVars(controllerContainer) + if endpoint == "" { + if dash0Authorization != nil { + return SelfMonitoringAndApiAccessConfiguration{ + SelfMonitoringEnabled: false, + Export: dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + Endpoint: "", + Authorization: *dash0Authorization, + }, + }, + }, nil + } else { + return SelfMonitoringAndApiAccessConfiguration{}, nil + } + } + + protocolFromEnvVar := "grpc" + otelExporterOtlpProtocolEnvVarIdx := slices.IndexFunc(controllerContainer.Env, matchOtelExporterOtlpProtocolEnvVar) + if otelExporterOtlpProtocolEnvVarIdx >= 0 { + protocolFromEnvVar = controllerContainer.Env[otelExporterOtlpProtocolEnvVarIdx].Value + } + + headers := parseHeadersFromEnvVar(controllerContainer) + + switch protocolFromEnvVar { + case "grpc": + return createDash0OrGrpcConfigurationFromContainer(controllerContainer, endpoint, headers), nil + case "http/json": + return createHttpJsonConfigurationFromContainer(endpoint, headers), nil + case "http/protobuf": + return createHttpProtobufConfigurationFromContainer(endpoint, headers), nil + + default: + return SelfMonitoringAndApiAccessConfiguration{}, fmt.Errorf("unsupported protocol %v", protocolFromEnvVar) + } +} + +func parseEndpoint(container *corev1.Container) (string, error) { + otelExporterOtlpEndpointEnvVarIdx := slices.IndexFunc(container.Env, matchOtelExporterOtlpEndpointEnvVar) + if otelExporterOtlpEndpointEnvVarIdx < 0 { + return "", nil + } + otelExporterOtlpEndpointEnvVar := container.Env[otelExporterOtlpEndpointEnvVarIdx] + if otelExporterOtlpEndpointEnvVar.Value == "" && otelExporterOtlpEndpointEnvVar.ValueFrom != nil { + return "", fmt.Errorf("retrieving the endpoint from OTEL_EXPORTER_OTLP_ENDPOINT with a ValueFrom source is not supported") + } else if otelExporterOtlpEndpointEnvVar.Value == "" { + return "", fmt.Errorf("no OTEL_EXPORTER_OTLP_ENDPOINT is set") + } + return otelExporterOtlpEndpointEnvVar.Value, nil +} - if managerContainerIdx < 0 { - return SelfMonitoringConfiguration{ - Enabled: false, - }, &cannotFindContainerByNameError{ - ContainerName: managerContainerName, - WorkloadGKV: controllerDeployment.GroupVersionKind(), - WorkloadNamespace: controllerDeployment.Namespace, - WorkloadName: controllerDeployment.Name, +func parseHeadersFromEnvVar(container *corev1.Container) []dash0v1alpha1.Header { + otelExporterOtlpHeadersEnvVarValue := "" + var headers []dash0v1alpha1.Header + if otelExporterOtlpHeadersEnvVarIdx := + slices.IndexFunc(container.Env, matchOtelExporterOtlpHeadersEnvVar); otelExporterOtlpHeadersEnvVarIdx >= 0 { + otelExporterOtlpHeadersEnvVarValue = container.Env[otelExporterOtlpHeadersEnvVarIdx].Value + keyValuePairs := strings.Split(otelExporterOtlpHeadersEnvVarValue, ",") + for _, keyValuePair := range keyValuePairs { + parts := strings.Split(keyValuePair, "=") + if len(parts) == 2 { + headers = append(headers, dash0v1alpha1.Header{ + Name: parts[0], + Value: parts[1], + }) } + } } - return ParseSelfMonitoringConfigurationFromContainer(&controllerDeployment.Spec.Template.Spec.Containers[managerContainerIdx]) + return headers +} + +func parseDash0AuthorizationFromEnvVars(container *corev1.Container) *dash0v1alpha1.Authorization { + if idx := slices.IndexFunc(container.Env, matchSelfMonitoringAndApiAccessAuthTokenEnvVar); idx >= 0 { + authTokenEnvVar := container.Env[idx] + if authTokenEnvVar.Value != "" { + return &dash0v1alpha1.Authorization{ + Token: &authTokenEnvVar.Value, + } + } else if authTokenEnvVar.ValueFrom != nil && + authTokenEnvVar.ValueFrom.SecretKeyRef != nil && + authTokenEnvVar.ValueFrom.SecretKeyRef.LocalObjectReference.Name != "" && + authTokenEnvVar.ValueFrom.SecretKeyRef.Key != "" { + return &dash0v1alpha1.Authorization{ + SecretRef: &dash0v1alpha1.SecretRef{ + Name: authTokenEnvVar.ValueFrom.SecretKeyRef.LocalObjectReference.Name, + Key: authTokenEnvVar.ValueFrom.SecretKeyRef.Key, + }, + } + } + } + return nil } func DisableSelfMonitoringInControllerDeployment( controllerDeployment *appsv1.Deployment, - managerContainerName string, + controllerContainerName string, + removeAuthToken bool, ) error { - managerContainerIdx := slices.IndexFunc(controllerDeployment.Spec.Template.Spec.Containers, func(c corev1.Container) bool { - return c.Name == managerContainerName - }) - - if managerContainerIdx < 0 { - return &cannotFindContainerByNameError{ - ContainerName: managerContainerName, - WorkloadGKV: controllerDeployment.GroupVersionKind(), - WorkloadNamespace: controllerDeployment.Namespace, - WorkloadName: controllerDeployment.Name, - } + controllerContainerIdx, err := findControllerContainer(controllerDeployment, controllerContainerName) + if err != nil { + return err } - managerContainer := controllerDeployment.Spec.Template.Spec.Containers[managerContainerIdx] - disableSelfMonitoringInContainer(&managerContainer) - controllerDeployment.Spec.Template.Spec.Containers[managerContainerIdx] = managerContainer + controllerContainer := controllerDeployment.Spec.Template.Spec.Containers[controllerContainerIdx] + disableSelfMonitoringInContainer(&controllerContainer, removeAuthToken) + controllerDeployment.Spec.Template.Spec.Containers[controllerContainerIdx] = controllerContainer return nil } func EnableSelfMonitoringInControllerDeployment( controllerDeployment *appsv1.Deployment, - managerContainerName string, - selfMonitoringConfiguration SelfMonitoringConfiguration, + controllerContainerName string, + selfMonitoringConfiguration SelfMonitoringAndApiAccessConfiguration, operatorVersion string, developmentMode bool, ) error { - managerContainerIdx := slices.IndexFunc( - controllerDeployment.Spec.Template.Spec.Containers, - func(c corev1.Container) bool { - return c.Name == managerContainerName - }) - - if managerContainerIdx < 0 { - return &cannotFindContainerByNameError{ - ContainerName: managerContainerName, - WorkloadGKV: controllerDeployment.GroupVersionKind(), - WorkloadNamespace: controllerDeployment.Namespace, - WorkloadName: controllerDeployment.Name, - } + controllerContainerIdx, err := findControllerContainer(controllerDeployment, controllerContainerName) + if err != nil { + return err } selfMonitoringExport := selfMonitoringConfiguration.Export var authTokenEnvVar *corev1.EnvVar if selfMonitoringExport.Dash0 != nil { envVar, err := util.CreateEnvVarForAuthorization( - *selfMonitoringExport.Dash0, - selfMonitoringauthTokenEnvVarName, + (*(selfMonitoringExport.Dash0)).Authorization, + util.SelfMonitoringAndApiAuthTokenEnvVarName, ) if err != nil { return err } authTokenEnvVar = &envVar } - managerContainer := controllerDeployment.Spec.Template.Spec.Containers[managerContainerIdx] + controllerContainer := controllerDeployment.Spec.Template.Spec.Containers[controllerContainerIdx] enableSelfMonitoringInContainer( - &managerContainer, + &controllerContainer, selfMonitoringExport, authTokenEnvVar, operatorVersion, developmentMode, ) - controllerDeployment.Spec.Template.Spec.Containers[managerContainerIdx] = managerContainer + controllerDeployment.Spec.Template.Spec.Containers[controllerContainerIdx] = controllerContainer return nil } -func ParseSelfMonitoringConfigurationFromContainer(container *corev1.Container) (SelfMonitoringConfiguration, error) { - endpoint, err := parseEndpoint(container) +func UpdateApiTokenWithoutAddingSelfMonitoringToControllerDeployment( + controllerDeployment *appsv1.Deployment, + controllerContainerName string, + authorization dash0v1alpha1.Authorization, +) error { + controllerContainerIdx, err := findControllerContainer(controllerDeployment, controllerContainerName) if err != nil { - return SelfMonitoringConfiguration{}, err - } else if endpoint == "" { - return SelfMonitoringConfiguration{ - Enabled: false, - }, nil + return err } - protocolFromEnvVar := "grpc" - otelExporterOtlpProtocolEnvVarIdx := slices.IndexFunc(container.Env, matchOtelExporterOtlpProtocolEnvVar) - if otelExporterOtlpProtocolEnvVarIdx >= 0 { - protocolFromEnvVar = container.Env[otelExporterOtlpProtocolEnvVarIdx].Value + var authTokenEnvVar *corev1.EnvVar + envVar, err := util.CreateEnvVarForAuthorization( + authorization, + util.SelfMonitoringAndApiAuthTokenEnvVarName, + ) + if err != nil { + return err } + authTokenEnvVar = &envVar - headers := parseHeadersFromEnvVar(container) + controllerContainer := controllerDeployment.Spec.Template.Spec.Containers[controllerContainerIdx] + addAuthTokenToContainer( + &controllerContainer, + authTokenEnvVar, + ) + controllerDeployment.Spec.Template.Spec.Containers[controllerContainerIdx] = controllerContainer - switch protocolFromEnvVar { - case "grpc": - return createDash0OrGrpcConfigurationFromContainer(container, endpoint, headers), nil - case "http/json": - return createHttpJsonConfigurationFromContainer(endpoint, headers), nil - case "http/protobuf": - return createHttpProtobufConfigurationFromContainer(endpoint, headers), nil + return nil +} - default: - return SelfMonitoringConfiguration{}, fmt.Errorf("unsupported protocol %v", protocolFromEnvVar) +func findControllerContainer(controllerDeployment *appsv1.Deployment, controllerContainerName string) (int, error) { + controllerContainerIdx := slices.IndexFunc(controllerDeployment.Spec.Template.Spec.Containers, func(c corev1.Container) bool { + return c.Name == controllerContainerName + }) + if controllerContainerIdx >= 0 { + return controllerContainerIdx, nil + } + + return 0, &cannotFindContainerByNameError{ + ContainerName: controllerContainerName, + WorkloadGKV: controllerDeployment.GroupVersionKind(), + WorkloadNamespace: controllerDeployment.Namespace, + WorkloadName: controllerDeployment.Name, } } @@ -390,7 +485,7 @@ func isDash0Export(endpoint string, headers []dash0v1alpha1.Header) bool { }) } -func createDash0OrGrpcConfigurationFromContainer(container *corev1.Container, endpoint string, headers []dash0v1alpha1.Header) SelfMonitoringConfiguration { +func createDash0OrGrpcConfigurationFromContainer(container *corev1.Container, endpoint string, headers []dash0v1alpha1.Header) SelfMonitoringAndApiAccessConfiguration { if isDash0Export(endpoint, headers) { return createDash0ConfigurationFromContainer(container, endpoint, headers) } else { @@ -398,7 +493,7 @@ func createDash0OrGrpcConfigurationFromContainer(container *corev1.Container, en } } -func createDash0ConfigurationFromContainer(container *corev1.Container, endpoint string, headers []dash0v1alpha1.Header) SelfMonitoringConfiguration { +func createDash0ConfigurationFromContainer(container *corev1.Container, endpoint string, headers []dash0v1alpha1.Header) SelfMonitoringAndApiAccessConfiguration { referencesTokenEnvVar := false dataset := "" for _, header := range headers { @@ -419,17 +514,17 @@ func createDash0ConfigurationFromContainer(container *corev1.Container, endpoint dash0Configuration.Authorization = *authorization } } - return SelfMonitoringConfiguration{ - Enabled: true, + return SelfMonitoringAndApiAccessConfiguration{ + SelfMonitoringEnabled: true, Export: dash0v1alpha1.Export{ Dash0: dash0Configuration, }, } } -func createGrpcConfigurationFromContainer(endpoint string, headers []dash0v1alpha1.Header) SelfMonitoringConfiguration { - return SelfMonitoringConfiguration{ - Enabled: true, +func createGrpcConfigurationFromContainer(endpoint string, headers []dash0v1alpha1.Header) SelfMonitoringAndApiAccessConfiguration { + return SelfMonitoringAndApiAccessConfiguration{ + SelfMonitoringEnabled: true, Export: dash0v1alpha1.Export{ Grpc: &dash0v1alpha1.GrpcConfiguration{ Endpoint: endpoint, @@ -439,9 +534,9 @@ func createGrpcConfigurationFromContainer(endpoint string, headers []dash0v1alph } } -func createHttpProtobufConfigurationFromContainer(endpoint string, headers []dash0v1alpha1.Header) SelfMonitoringConfiguration { - return SelfMonitoringConfiguration{ - Enabled: true, +func createHttpProtobufConfigurationFromContainer(endpoint string, headers []dash0v1alpha1.Header) SelfMonitoringAndApiAccessConfiguration { + return SelfMonitoringAndApiAccessConfiguration{ + SelfMonitoringEnabled: true, Export: dash0v1alpha1.Export{ Http: &dash0v1alpha1.HttpConfiguration{ Endpoint: endpoint, @@ -452,9 +547,9 @@ func createHttpProtobufConfigurationFromContainer(endpoint string, headers []das } } -func createHttpJsonConfigurationFromContainer(endpoint string, headers []dash0v1alpha1.Header) SelfMonitoringConfiguration { - return SelfMonitoringConfiguration{ - Enabled: true, +func createHttpJsonConfigurationFromContainer(endpoint string, headers []dash0v1alpha1.Header) SelfMonitoringAndApiAccessConfiguration { + return SelfMonitoringAndApiAccessConfiguration{ + SelfMonitoringEnabled: true, Export: dash0v1alpha1.Export{ Http: &dash0v1alpha1.HttpConfiguration{ Endpoint: endpoint, @@ -465,63 +560,6 @@ func createHttpJsonConfigurationFromContainer(endpoint string, headers []dash0v1 } } -func parseEndpoint(container *corev1.Container) (string, error) { - otelExporterOtlpEndpointEnvVarIdx := slices.IndexFunc(container.Env, matchOtelExporterOtlpEndpointEnvVar) - if otelExporterOtlpEndpointEnvVarIdx < 0 { - return "", nil - } - otelExporterOtlpEndpointEnvVar := container.Env[otelExporterOtlpEndpointEnvVarIdx] - if otelExporterOtlpEndpointEnvVar.Value == "" && otelExporterOtlpEndpointEnvVar.ValueFrom != nil { - return "", fmt.Errorf("retrieving the endpoint from OTEL_EXPORTER_OTLP_ENDPOINT with a ValueFrom source is not supported") - } else if otelExporterOtlpEndpointEnvVar.Value == "" { - return "", fmt.Errorf("no OTEL_EXPORTER_OTLP_ENDPOINT is set") - } - return otelExporterOtlpEndpointEnvVar.Value, nil -} - -func parseHeadersFromEnvVar(container *corev1.Container) []dash0v1alpha1.Header { - otelExporterOtlpHeadersEnvVarValue := "" - var headers []dash0v1alpha1.Header - if otelExporterOtlpHeadersEnvVarIdx := - slices.IndexFunc(container.Env, matchOtelExporterOtlpHeadersEnvVar); otelExporterOtlpHeadersEnvVarIdx >= 0 { - otelExporterOtlpHeadersEnvVarValue = container.Env[otelExporterOtlpHeadersEnvVarIdx].Value - keyValuePairs := strings.Split(otelExporterOtlpHeadersEnvVarValue, ",") - for _, keyValuePair := range keyValuePairs { - parts := strings.Split(keyValuePair, "=") - if len(parts) == 2 { - headers = append(headers, dash0v1alpha1.Header{ - Name: parts[0], - Value: parts[1], - }) - } - } - } - - return headers -} - -func parseDash0AuthorizationFromEnvVars(container *corev1.Container) *dash0v1alpha1.Authorization { - if idx := slices.IndexFunc(container.Env, matchSelfMonitoringAuthTokenEnvVar); idx >= 0 { - authTokenEnvVar := container.Env[idx] - if authTokenEnvVar.Value != "" { - return &dash0v1alpha1.Authorization{ - Token: &authTokenEnvVar.Value, - } - } else if authTokenEnvVar.ValueFrom != nil && - authTokenEnvVar.ValueFrom.SecretKeyRef != nil && - authTokenEnvVar.ValueFrom.SecretKeyRef.LocalObjectReference.Name != "" && - authTokenEnvVar.ValueFrom.SecretKeyRef.Key != "" { - return &dash0v1alpha1.Authorization{ - SecretRef: &dash0v1alpha1.SecretRef{ - Name: authTokenEnvVar.ValueFrom.SecretKeyRef.LocalObjectReference.Name, - Key: authTokenEnvVar.ValueFrom.SecretKeyRef.Key, - }, - } - } - } - return nil -} - func enableSelfMonitoringInContainer( container *corev1.Container, selfMonitoringExport dash0v1alpha1.Export, @@ -530,21 +568,7 @@ func enableSelfMonitoringInContainer( developmentMode bool, ) { if authTokenEnvVar != nil { - authTokenEnvVarIdx := slices.IndexFunc(container.Env, matchSelfMonitoringAuthTokenEnvVar) - if authTokenEnvVarIdx == 0 { - // update the existing value - container.Env[authTokenEnvVarIdx] = *authTokenEnvVar - } else if authTokenEnvVarIdx > 0 { - // Since we reference this env var in the OTEL_EXPORTER_OTLP_HEADERS env var, we want to have this as the - // very first env var, to make sure it is defined before OTEL_EXPORTER_OTLP_HEADERS. (This is a requirement - // for using - // https://kubernetes.io/docs/tasks/inject-data-application/define-interdependent-environment-variables/.) - container.Env = slices.Delete(container.Env, authTokenEnvVarIdx, authTokenEnvVarIdx+1) - container.Env = slices.Insert(container.Env, 0, *authTokenEnvVar) - } else { - // the env var is not present yet, add it to the start of the list - container.Env = slices.Insert(container.Env, 0, *authTokenEnvVar) - } + addAuthTokenToContainer(container, authTokenEnvVar) } exportSettings := ConvertExportConfigurationToEnvVarSettings(selfMonitoringExport) @@ -584,6 +608,24 @@ func enableSelfMonitoringInContainer( } } +func addAuthTokenToContainer(container *corev1.Container, authTokenEnvVar *corev1.EnvVar) { + authTokenEnvVarIdx := slices.IndexFunc(container.Env, matchSelfMonitoringAndApiAccessAuthTokenEnvVar) + if authTokenEnvVarIdx == 0 { + // update the existing value + container.Env[authTokenEnvVarIdx] = *authTokenEnvVar + } else if authTokenEnvVarIdx > 0 { + // Since we reference this env var in the OTEL_EXPORTER_OTLP_HEADERS env var, we want to have this as the + // very first env var, to make sure it is defined before OTEL_EXPORTER_OTLP_HEADERS. (This is a requirement + // for using + // https://kubernetes.io/docs/tasks/inject-data-application/define-interdependent-environment-variables/.) + container.Env = slices.Delete(container.Env, authTokenEnvVarIdx, authTokenEnvVarIdx+1) + container.Env = slices.Insert(container.Env, 0, *authTokenEnvVar) + } else { + // the env var is not present yet, add it to the start of the list + container.Env = slices.Insert(container.Env, 0, *authTokenEnvVar) + } +} + func ConvertExportConfigurationToEnvVarSettings(selfMonitoringExport dash0v1alpha1.Export) EndpointAndHeaders { if selfMonitoringExport.Dash0 != nil { dash0Export := selfMonitoringExport.Dash0 @@ -645,11 +687,14 @@ func convertHeadersToEnvVarValue(headers []dash0v1alpha1.Header) string { return strings.Join(keyValuePairs, ",") } -func disableSelfMonitoringInContainer(container *corev1.Container) { +func disableSelfMonitoringInContainer(container *corev1.Container, removeAuthToken bool) { + if removeAuthToken { + removeEnvVar(container, util.SelfMonitoringAndApiAuthTokenEnvVarName) + } removeEnvVar(container, otelExporterOtlpEndpointEnvVarName) removeEnvVar(container, otelExporterOtlpProtocolEnvVarName) removeEnvVar(container, otelExporterOtlpHeadersEnvVarName) - removeEnvVar(container, selfMonitoringauthTokenEnvVarName) + removeEnvVar(container, otelResourceAttribtuesEnvVarName) } func updateOrAppendEnvVar(container *corev1.Container, name string, value string) { @@ -689,6 +734,6 @@ func matchOtelExporterOtlpProtocolEnvVar(e corev1.EnvVar) bool { return e.Name == otelExporterOtlpProtocolEnvVarName } -func matchSelfMonitoringAuthTokenEnvVar(e corev1.EnvVar) bool { - return e.Name == selfMonitoringauthTokenEnvVarName +func matchSelfMonitoringAndApiAccessAuthTokenEnvVar(e corev1.EnvVar) bool { + return e.Name == util.SelfMonitoringAndApiAuthTokenEnvVarName } diff --git a/internal/dash0/startup/auto_operator_configuration_handler.go b/internal/dash0/startup/auto_operator_configuration_handler.go index 48f26c86..cc929c08 100644 --- a/internal/dash0/startup/auto_operator_configuration_handler.go +++ b/internal/dash0/startup/auto_operator_configuration_handler.go @@ -29,6 +29,7 @@ type OperatorConfigurationValues struct { Endpoint string Token string SecretRef + ApiEndpoint string } type AutoOperatorConfigurationResourceHandler struct { @@ -198,7 +199,16 @@ func (r *AutoOperatorConfigurationResourceHandler) createOperatorConfigurationRe } } - if err := r.Create(ctx, &dash0v1alpha1.Dash0OperatorConfiguration{ + dash0Export := dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + Endpoint: operatorConfiguration.Endpoint, + Authorization: authorization, + }, + } + if operatorConfiguration.ApiEndpoint != "" { + dash0Export.Dash0.ApiEndpoint = operatorConfiguration.ApiEndpoint + } + operatorConfigurationResource := dash0v1alpha1.Dash0OperatorConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: operatorConfigurationAutoResourceName, }, @@ -206,14 +216,10 @@ func (r *AutoOperatorConfigurationResourceHandler) createOperatorConfigurationRe SelfMonitoring: dash0v1alpha1.SelfMonitoring{ Enabled: true, }, - Export: &dash0v1alpha1.Export{ - Dash0: &dash0v1alpha1.Dash0Configuration{ - Endpoint: operatorConfiguration.Endpoint, - Authorization: authorization, - }, - }, + Export: &dash0Export, }, - }); err != nil { + } + if err := r.Create(ctx, &operatorConfigurationResource); err != nil { return fmt.Errorf("failed to create the Dash0 operator configuration resource: %w", err) } diff --git a/internal/dash0/startup/auto_operator_configuration_handler_test.go b/internal/dash0/startup/auto_operator_configuration_handler_test.go index 9150280c..e85d1b50 100644 --- a/internal/dash0/startup/auto_operator_configuration_handler_test.go +++ b/internal/dash0/startup/auto_operator_configuration_handler_test.go @@ -39,7 +39,7 @@ var _ = Describe("Create an operator configuration resource at startup", Ordered logger := log.FromContext(ctx) BeforeAll(func() { - EnsureDash0OperatorNamespaceExists(ctx, k8sClient) + EnsureOperatorNamespaceExists(ctx, k8sClient) }) AfterEach(func() { diff --git a/internal/dash0/startup/startup_suite_test.go b/internal/dash0/startup/startup_suite_test.go index 57c24b9d..05487620 100644 --- a/internal/dash0/startup/startup_suite_test.go +++ b/internal/dash0/startup/startup_suite_test.go @@ -63,7 +63,7 @@ var _ = BeforeSuite(func() { handler = &AutoOperatorConfigurationResourceHandler{ Client: k8sClient, - OperatorNamespace: Dash0OperatorNamespace, + OperatorNamespace: OperatorNamespace, NamePrefix: OTelCollectorNamePrefixTest, bypassWebhookCheck: true, } diff --git a/internal/dash0/util/constants.go b/internal/dash0/util/constants.go index a7105d2b..d6ecbf8e 100644 --- a/internal/dash0/util/constants.go +++ b/internal/dash0/util/constants.go @@ -7,4 +7,6 @@ const ( AuthorizationHeaderName = "Authorization" Dash0DatasetHeaderName = "Dash0-Dataset" DatasetInsights = "dash0-internal" + + SelfMonitoringAndApiAuthTokenEnvVarName = "SELF_MONITORING_AND_API_AUTH_TOKEN" ) diff --git a/internal/dash0/util/controller.go b/internal/dash0/util/controller.go index 174e0e07..8b11b885 100644 --- a/internal/dash0/util/controller.go +++ b/internal/dash0/util/controller.go @@ -404,11 +404,11 @@ func addFinalizerIfNecessary( } func CreateEnvVarForAuthorization( - dash0ExportConfiguration dash0v1alpha1.Dash0Configuration, + dash0Authorization dash0v1alpha1.Authorization, envVarName string, ) (corev1.EnvVar, error) { - token := dash0ExportConfiguration.Authorization.Token - secretRef := dash0ExportConfiguration.Authorization.SecretRef + token := dash0Authorization.Token + secretRef := dash0Authorization.SecretRef if token != nil && *token != "" { return corev1.EnvVar{ Name: envVarName, diff --git a/internal/dash0/util/values.go b/internal/dash0/util/values.go deleted file mode 100644 index 245cdf3a..00000000 --- a/internal/dash0/util/values.go +++ /dev/null @@ -1,9 +0,0 @@ -// SPDX-FileCopyrightText: Copyright 2024 Dash0 Inc. -// SPDX-License-Identifier: Apache-2.0 - -package util - -var ( - True = true - False = false -) diff --git a/internal/dash0/webhooks/attach_dangling_events_test.go b/internal/dash0/webhooks/attach_dangling_events_test.go index 0418c903..3bf58813 100644 --- a/internal/dash0/webhooks/attach_dangling_events_test.go +++ b/internal/dash0/webhooks/attach_dangling_events_test.go @@ -33,7 +33,7 @@ var _ = Describe("The Dash0 webhook and the Dash0 controller", Ordered, func() { var createdObjects []client.Object BeforeAll(func() { - EnsureDash0OperatorNamespaceExists(ctx, k8sClient) + EnsureOperatorNamespaceExists(ctx, k8sClient) recorder := manager.GetEventRecorderFor("dash0-monitoring-controller") instrumenter := &instrumentation.Instrumenter{ @@ -61,7 +61,7 @@ var _ = Describe("The Dash0 webhook and the Dash0 controller", Ordered, func() { Clientset: clientset, Instrumenter: instrumenter, Images: TestImages, - OperatorNamespace: Dash0OperatorNamespace, + OperatorNamespace: OperatorNamespace, BackendConnectionManager: backendConnectionManager, DanglingEventsTimeouts: &DanglingEventsTimeoutsTest, } diff --git a/test-resources/bin/test-cleanup.sh b/test-resources/bin/test-cleanup.sh index aec4ceca..c0036e2f 100755 --- a/test-resources/bin/test-cleanup.sh +++ b/test-resources/bin/test-cleanup.sh @@ -38,8 +38,15 @@ kubectl delete secret \ kubectl delete ns dash0-system --ignore-not-found +# deliberately deleting the dashboard after undeploying the operator to avoid deleting the dashboard in Dash0 every time. +kubectl delete -n ${target_namespace} -f test-resources/customresources/persesdashboard/persesdashboard.yaml || true + kubectl delete --ignore-not-found=true customresourcedefinition dash0monitorings.operator.dash0.com kubectl delete --ignore-not-found=true customresourcedefinition dash0operatorconfigurations.operator.dash0.com +kubectl delete --ignore-not-found=true customresourcedefinition dash0operatorconfigurations.operator.dash0.com +kubectl delete --ignore-not-found=true customresourcedefinition perses.perses.dev +kubectl delete --ignore-not-found=true customresourcedefinition persesdashboards.perses.dev +kubectl delete --ignore-not-found=true customresourcedefinition persesdatasources.perses.dev # The following resources are deleted automatically with helm uninstall, unless for example when the operator manager # crashes and the helm pre-delete helm hook cannot run, then they might be left behind. diff --git a/test-resources/bin/test-scenario-01-aum-operator-cr.sh b/test-resources/bin/test-scenario-01-aum-operator-cr.sh index 65e26bd2..454a709c 100755 --- a/test-resources/bin/test-scenario-01-aum-operator-cr.sh +++ b/test-resources/bin/test-scenario-01-aum-operator-cr.sh @@ -15,42 +15,66 @@ load_env_file verify_kubectx setup_test_environment -echo "STEP 1: remove old test resources" +step_counter=1 + +echo "STEP $step_counter: remove old test resources" test-resources/bin/test-cleanup.sh ${target_namespace} false -test-resources/bin/ensure-namespace-exists.sh ${target_namespace} -echo -echo +finish_step -echo "STEP 2: creating target namespace (if necessary)" +echo "STEP $step_counter: creating target namespace (if necessary)" test-resources/bin/ensure-namespace-exists.sh ${target_namespace} -echo -echo +finish_step -echo "STEP 3: creating operator namespace and authorization token secret" +echo "STEP $step_counter: creating operator namespace and authorization token secret" test-resources/bin/ensure-namespace-exists.sh dash0-system kubectl create secret \ generic \ dash0-authorization-secret \ --namespace dash0-system \ --from-literal=token="${DASH0_AUTHORIZATION_TOKEN}" -echo -echo +finish_step -echo "STEP 4: rebuild images" +echo "STEP $step_counter: install foreign custom resource definitions" +install_foreign_crds +finish_step + +echo "STEP $step_counter: rebuild images" build_all_images -echo -echo +finish_step -echo "STEP 5: deploy application under monitoring" -test-resources/node.js/express/deploy.sh ${target_namespace} ${kind} -echo -echo +if [[ "${DEPLOY_APPLICATION_UNDER_MONITORING:-}" != false ]]; then + echo "STEP $step_counter: deploy application under monitoring" + test-resources/node.js/express/deploy.sh ${target_namespace} ${kind} + finish_step +fi -echo "STEP 6: deploy the Dash0 operator using helm" +echo "STEP $step_counter: deploy the Dash0 operator using helm" deploy_via_helm -echo -echo +finish_step + +if [[ "${DEPLOY_OPERATOR_CONFIGURATION_VIA_HELM:-}" == false ]]; then + # if no operator configuration resource has been deployed via the helm chart, deploy one now + echo "STEP $step_counter: deploy the Dash0 operator configuration resource" + install_operator_configuration_resource + finish_step +else + echo "not deploying a Dash0 operator configuration resource (has been deployed with the helm chart already)" + echo +fi -echo "STEP 7: deploy the Dash0 monitoring resource to namespace ${target_namespace}" -install_monitoring_resource +if [[ "${DEPLOY_MONITORING_RESOURCE:-}" != false ]]; then + echo "STEP $step_counter: deploy the Dash0 monitoring resource to namespace ${target_namespace}" + install_monitoring_resource + finish_step +else + echo "not deploying a Dash0 monitoring resource" + echo +fi +if [[ "${DEPLOY_PERSES_DASHBOARD:-}" == true ]]; then + echo "Waiting 30 seconds before deploying a Perses dashboard resource." + sleep 30 + echo "STEP $step_counter: deploy a Perses dashboard resource to namespace ${target_namespace}" + kubectl apply -n ${target_namespace} -f test-resources/customresources/persesdashboard/persesdashboard.yaml + finish_step +fi diff --git a/test-resources/bin/test-scenario-02-operator-cr-aum.sh b/test-resources/bin/test-scenario-02-operator-cr-aum.sh index 92237e0b..71f6f8cb 100755 --- a/test-resources/bin/test-scenario-02-operator-cr-aum.sh +++ b/test-resources/bin/test-scenario-02-operator-cr-aum.sh @@ -15,41 +15,66 @@ load_env_file verify_kubectx setup_test_environment -echo "STEP 1: remove old test resources" +step_counter=1 + +echo "STEP $step_counter: remove old test resources" test-resources/bin/test-cleanup.sh ${target_namespace} false -echo -echo +finish_step -echo "STEP 2: creating target namespace (if necessary)" +echo "STEP $step_counter: creating target namespace (if necessary)" test-resources/bin/ensure-namespace-exists.sh ${target_namespace} -echo -echo +finish_step -echo "STEP 3: creating operator namespace and authorization token secret" +echo "STEP $step_counter: creating operator namespace and authorization token secret" test-resources/bin/ensure-namespace-exists.sh dash0-system kubectl create secret \ generic \ dash0-authorization-secret \ --namespace dash0-system \ --from-literal=token="${DASH0_AUTHORIZATION_TOKEN}" -echo -echo +finish_step + +echo "STEP $step_counter: install foreign custom resource definitions" +install_foreign_crds +finish_step -echo "STEP 4: rebuild images" +echo "STEP $step_counter: rebuild images" build_all_images -echo -echo +finish_step -echo "STEP 5: deploy the Dash0 operator using helm" +echo "STEP $step_counter: deploy the Dash0 operator using helm" deploy_via_helm -echo -echo +finish_step + +if [[ "${DEPLOY_OPERATOR_CONFIGURATION_VIA_HELM:-}" == false ]]; then + # if no operator configuration resource has been deployed via the helm chart, deploy one now + echo "STEP $step_counter: deploy the Dash0 operator configuration resource" + install_operator_configuration_resource + finish_step +else + echo "not deploying a Dash0 operator configuration resource (has been deployed with the helm chart already)" + echo +fi -echo "STEP 6: deploy the Dash0 monitoring resource to namespace ${target_namespace}" -install_monitoring_resource -echo -echo +if [[ "${DEPLOY_MONITORING_RESOURCE:-}" != false ]]; then + echo "STEP $step_counter: deploy the Dash0 monitoring resource to namespace ${target_namespace}" + install_monitoring_resource + finish_step +else + echo "not deploying a Dash0 monitoring resource" + echo +fi -echo "STEP 7: deploy application under monitoring" -test-resources/node.js/express/deploy.sh ${target_namespace} ${kind} +if [[ "${DEPLOY_APPLICATION_UNDER_MONITORING:-}" != false ]]; then + echo "STEP $step_counter: deploy application under monitoring" + test-resources/node.js/express/deploy.sh ${target_namespace} ${kind} + finish_step +fi +if [[ "${DEPLOY_PERSES_DASHBOARD:-}" == true ]]; then + echo "Waiting 30 seconds before deploying a Perses dashboard resource." + sleep 30 + echo "STEP $step_counter: deploy a Perses dashboard resource to namespace ${target_namespace}" + kubectl apply -n ${target_namespace} -f test-resources/customresources/persesdashboard/persesdashboard.yaml + finish_step +fi diff --git a/test-resources/bin/util b/test-resources/bin/util index 04a249b3..18afd709 100644 --- a/test-resources/bin/util +++ b/test-resources/bin/util @@ -1,3 +1,5 @@ +#!/usr/bin/env bash + # SPDX-FileCopyrightText: Copyright 2024 Dash0 Inc. # SPDX-License-Identifier: Apache-2.0 @@ -25,6 +27,12 @@ setup_test_environment () { test-resources/bin/render-templates.sh } +finish_step() { + ((step_counter++)) + echo + echo +} + build_all_images() { make docker-build } @@ -101,10 +109,17 @@ deploy_via_helm() { fi # Deploy an operator configuration right away. - helm_install_command+=" --set operator.dash0Export.enabled=true" - helm_install_command+=" --set operator.dash0Export.endpoint=ingress.eu-west-1.aws.dash0-dev.com:4317" - helm_install_command+=" --set operator.dash0Export.secretRef.name=dash0-authorization-secret" - helm_install_command+=" --set operator.dash0Export.secretRef.key=token" + if [[ "${DEPLOY_OPERATOR_CONFIGURATION_VIA_HELM:-}" != false ]]; then + helm_install_command+=" --set operator.dash0Export.enabled=true" + helm_install_command+=" --set operator.dash0Export.endpoint=ingress.eu-west-1.aws.dash0-dev.com:4317" + if [[ "${OPERATOR_CONFIGURATION_VIA_HELM_USE_TOKEN:-}" == true ]]; then + helm_install_command+=" --set operator.dash0Export.token=${DASH0_AUTHORIZATION_TOKEN}" + else + helm_install_command+=" --set operator.dash0Export.secretRef.name=dash0-authorization-secret" + helm_install_command+=" --set operator.dash0Export.secretRef.key=token" + fi + helm_install_command+=" --set operator.dash0Export.apiEndpoint=https://api.eu-west-1.aws.dash0-dev.com" + fi helm_install_command+=" dash0-operator" helm_install_command+=" ${OPERATOR_HELM_CHART:-helm-chart/dash0-operator}" @@ -132,16 +147,18 @@ wait_for_operator_manager_and_webhook() { # We deploy an operator configuration at startup via operator.dash0Export.enabled=true, wait for that resource to # become available as well. - echo "waiting for the automatically created operator configuration resource to become available" - for ((i=0; i<=20; i++)); do - # wait until the resource has been created - if kubectl get dash0operatorconfigurations.operator.dash0.com/dash0-operator-configuration-auto-resource; then - break; - fi - sleep 1 - done - # wait until the resource has been reconciled and is marked as available - kubectl wait dash0operatorconfigurations.operator.dash0.com/dash0-operator-configuration-auto-resource --for condition=Available --timeout 30s + if [[ "${DEPLOY_OPERATOR_CONFIGURATION_VIA_HELM:-}" != false ]]; then + echo "waiting for the automatically created operator configuration resource to become available" + for ((i=0; i<=20; i++)); do + # wait until the resource has been created + if kubectl get dash0operatorconfigurations.operator.dash0.com/dash0-operator-configuration-auto-resource; then + break; + fi + sleep 1 + done + # wait until the resource has been reconciled and is marked as available + kubectl wait dash0operatorconfigurations.operator.dash0.com/dash0-operator-configuration-auto-resource --for condition=Available --timeout 30s + fi } has_been_set_to_empty_string() { @@ -175,3 +192,8 @@ install_monitoring_resource() { echo "waiting for the monitoring resource to become available" kubectl wait --namespace ${target_namespace} dash0monitorings.operator.dash0.com/dash0-monitoring-resource --for condition=Available } + +install_foreign_crds() { + kubectl apply --server-side -f https://raw.githubusercontent.com/perses/perses-operator/main/config/crd/bases/perses.dev_persesdashboards.yaml +} + diff --git a/test-resources/customresources/dash0operatorconfiguration/dash0operatorconfiguration.secret.yaml b/test-resources/customresources/dash0operatorconfiguration/dash0operatorconfiguration.secret.yaml index be2eea58..a94c8c64 100644 --- a/test-resources/customresources/dash0operatorconfiguration/dash0operatorconfiguration.secret.yaml +++ b/test-resources/customresources/dash0operatorconfiguration/dash0operatorconfiguration.secret.yaml @@ -8,3 +8,4 @@ spec: endpoint: ingress.eu-west-1.aws.dash0-dev.com:4317 authorization: secretRef: {} + apiEndpoint: https://api.eu-west-1.aws.dash0-dev.com diff --git a/test-resources/customresources/dash0operatorconfiguration/dash0operatorconfiguration.token.yaml.template b/test-resources/customresources/dash0operatorconfiguration/dash0operatorconfiguration.token.yaml.template index 7d24037f..7c2fd09d 100644 --- a/test-resources/customresources/dash0operatorconfiguration/dash0operatorconfiguration.token.yaml.template +++ b/test-resources/customresources/dash0operatorconfiguration/dash0operatorconfiguration.token.yaml.template @@ -8,3 +8,4 @@ spec: endpoint: ingress.eu-west-1.aws.dash0-dev.com:4317 authorization: token: "$DASH0_AUTHORIZATION_TOKEN" + apiEndpoint: https://api.eu-west-1.aws.dash0-dev.com diff --git a/test-resources/customresources/persesdashboard/persesdashboard.yaml b/test-resources/customresources/persesdashboard/persesdashboard.yaml new file mode 100644 index 00000000..bbeb3fae --- /dev/null +++ b/test-resources/customresources/persesdashboard/persesdashboard.yaml @@ -0,0 +1,565 @@ +apiVersion: perses.dev/v1alpha1 +kind: PersesDashboard +metadata: + name: perses-dashboard-test + labels: + app.kubernetes.io/name: perses-dashboard + app.kubernetes.io/instance: perses-dashboard-sample + app.kubernetes.io/part-of: perses-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: perses-operator +spec: + duration: 5m + datasources: + PrometheusLocal: + default: false + plugin: + kind: PrometheusDatasource + spec: + proxy: + kind: HTTPProxy + spec: + url: http://localhost:9090 + variables: + - kind: ListVariable + spec: + name: job + allowMultiple: false + allowAllValue: false + plugin: + kind: PrometheusLabelValuesVariable + spec: + labelName: job + - kind: ListVariable + spec: + name: instance + allowMultiple: false + allowAllValue: false + plugin: + kind: PrometheusLabelValuesVariable + spec: + labelName: instance + matchers: + - up{job=~"$job"} + - kind: ListVariable + spec: + name: interval + plugin: + kind: StaticListVariable + spec: + values: + - 1m + - 5m + - kind: TextVariable + spec: + name: text + value: test + constant: true + panels: + defaultTimeSeriesChart: + kind: Panel + spec: + display: + name: Default Time Series Panel + plugin: + kind: TimeSeriesChart + spec: {} + queries: + - kind: TimeSeriesQuery + spec: + plugin: + kind: PrometheusTimeSeriesQuery + spec: + query: up + seriesTest: + kind: Panel + spec: + display: + name: "~130 Series" + description: This is a line chart + plugin: + kind: TimeSeriesChart + spec: + yAxis: + format: + unit: bytes + shortValues: true + queries: + - kind: TimeSeriesQuery + spec: + plugin: + kind: PrometheusTimeSeriesQuery + spec: + query: rate(caddy_http_response_duration_seconds_sum[$interval]) + basicEx: + kind: Panel + spec: + display: + name: Single Query + plugin: + kind: TimeSeriesChart + spec: + yAxis: + format: + unit: decimal + legend: + position: right + queries: + - kind: TimeSeriesQuery + spec: + plugin: + kind: PrometheusTimeSeriesQuery + spec: + seriesNameFormat: Node memory - {{device}} {{instance}} + query: + 1 - node_filesystem_free_bytes{job='$job',instance=~'$instance',fstype!="rootfs",mountpoint!~"/(run|var).*",mountpoint!=""} + / node_filesystem_size_bytes{job='$job',instance=~'$instance'} + legendEx: + kind: Panel + spec: + display: + name: Legend Example + plugin: + kind: TimeSeriesChart + spec: + legend: + position: bottom + yAxis: + show: true + format: + unit: bytes + shortValues: true + queries: + - kind: TimeSeriesQuery + spec: + plugin: + kind: PrometheusTimeSeriesQuery + spec: + seriesNameFormat: Node memory total + query: + node_memory_MemTotal_bytes{job='$job',instance=~'$instance'} + - node_memory_MemFree_bytes{job='$job',instance=~'$instance'} - + node_memory_Buffers_bytes{job='$job',instance=~'$instance'} - node_memory_Cached_bytes{job='$job',instance=~'$instance'} + - kind: TimeSeriesQuery + spec: + plugin: + kind: PrometheusTimeSeriesQuery + spec: + seriesNameFormat: Memory (buffers) - {{instance}} + query: node_memory_Buffers_bytes{job='$job',instance=~'$instance'} + - kind: TimeSeriesQuery + spec: + plugin: + kind: PrometheusTimeSeriesQuery + spec: + seriesNameFormat: Cached Bytes + query: node_memory_Cached_bytes{job='$job',instance=~'$instance'} + - kind: TimeSeriesQuery + spec: + plugin: + kind: PrometheusTimeSeriesQuery + spec: + seriesNameFormat: MemFree Bytes + query: node_memory_MemFree_bytes{job='$job',instance=~'$instance'} + testNodeQuery: + kind: Panel + spec: + display: + name: Test Query + description: Description text + plugin: + kind: TimeSeriesChart + spec: + yAxis: + format: + unit: decimal + decimalPlaces: 2 + legend: + position: right + queries: + - kind: TimeSeriesQuery + spec: + plugin: + kind: PrometheusTimeSeriesQuery + spec: + query: node_load15{instance=~"(demo.do.prometheus.io:9100)",job='$job'} + seriesNameFormat: Test {{job}} {{instance}} + testQueryAlt: + kind: Panel + spec: + display: + name: Test Query Alt + description: Description text + plugin: + kind: TimeSeriesChart + spec: + legend: + position: right + yAxis: + format: + unit: percent-decimal + decimalPlaces: 1 + thresholds: + steps: + - value: 0.4 + name: "Alert: Warning condition example" + - value: 0.75 + name: "Alert: Critical condition example" + queries: + - kind: TimeSeriesQuery + spec: + plugin: + kind: PrometheusTimeSeriesQuery + spec: + query: node_load1{instance=~"(demo.do.prometheus.io:9100)",job='$job'} + cpuLine: + kind: Panel + spec: + display: + name: CPU - Line (Multi Series) + description: This is a line chart test + plugin: + kind: TimeSeriesChart + spec: + yAxis: + show: false + label: CPU Label + format: + unit: percent-decimal + decimalPlaces: 0 + legend: + position: bottom + thresholds: + steps: + - value: 0.2 + - value: 0.35 + queries: + - kind: TimeSeriesQuery + spec: + plugin: + kind: PrometheusTimeSeriesQuery + spec: + seriesNameFormat: "{{mode}} mode - {{job}} {{instance}}" + query: avg without (cpu)(rate(node_cpu_seconds_total{job='$job',instance=~'$instance',mode!="nice",mode!="steal",mode!="irq"}[$interval])) + cpuGauge: + kind: Panel + spec: + display: + name: CPU - Gauge (Multi Series) + description: This is a gauge chart test + plugin: + kind: GaugeChart + spec: + calculation: last-number + format: + unit: percent-decimal + thresholds: + steps: + - value: 0.2 + - value: 0.35 + queries: + - kind: TimeSeriesQuery + spec: + plugin: + kind: PrometheusTimeSeriesQuery + spec: + seriesNameFormat: "{{mode}} mode - {{job}} {{instance}}" + query: avg without (cpu)(rate(node_cpu_seconds_total{job='$job',instance=~'$instance',mode!="nice",mode!="steal",mode!="irq"}[$interval])) + statSm: + kind: Panel + spec: + display: + name: Stat Sm + plugin: + kind: StatChart + spec: + calculation: mean + format: + unit: decimal + decimalPlaces: 1 + shortValues: true + queries: + - kind: TimeSeriesQuery + spec: + plugin: + kind: PrometheusTimeSeriesQuery + spec: + query: node_time_seconds{job='$job',instance=~'$instance'} - node_boot_time_seconds{job='$job',instance=~'$instance'} + gaugeRAM: + kind: Panel + spec: + display: + name: RAM Used + description: This is a stat chart + plugin: + kind: GaugeChart + spec: + calculation: last-number + format: + unit: percent + thresholds: + steps: + - value: 85 + - value: 95 + queries: + - kind: TimeSeriesQuery + spec: + plugin: + kind: PrometheusTimeSeriesQuery + spec: + query: + 100 - ((node_memory_MemAvailable_bytes{job='$job',instance=~'$instance'} + * 100) / node_memory_MemTotal_bytes{job='$job',instance=~'$instance'}) + statRAM: + kind: Panel + spec: + display: + name: RAM Used + description: This is a stat chart + plugin: + kind: StatChart + spec: + calculation: last-number + format: + unit: percent + queries: + - kind: TimeSeriesQuery + spec: + plugin: + kind: PrometheusTimeSeriesQuery + spec: + query: + 100 - ((node_memory_MemAvailable_bytes{job='$job',instance=~'$instance'} + * 100) / node_memory_MemTotal_bytes{job='$job',instance=~'$instance'}) + statTotalRAM: + kind: Panel + spec: + display: + name: RAM Total + description: This is a stat chart + plugin: + kind: StatChart + spec: + calculation: last-number + format: + unit: bytes + decimalPlaces: 1 + queries: + - kind: TimeSeriesQuery + spec: + plugin: + kind: PrometheusTimeSeriesQuery + spec: + query: node_memory_MemTotal_bytes{job='$job',instance=~'$instance'} + statMd: + kind: Panel + spec: + display: + name: Stat Md + plugin: + kind: StatChart + spec: + calculation: sum + format: + unit: decimal + decimalPlaces: 2 + shortValues: true + sparkline: + color: "#e65013" + width: 1.5 + queries: + - kind: TimeSeriesQuery + spec: + plugin: + kind: PrometheusTimeSeriesQuery + spec: + query: + avg(node_load15{job='node',instance=~'$instance'}) / count(count(node_cpu_seconds_total{job='node',instance=~'$instance'}) + by (cpu)) * 100 + statLg: + kind: Panel + spec: + display: + name: Stat Lg + description: This is a stat chart + plugin: + kind: StatChart + spec: + calculation: mean + format: + unit: percent + queries: + - kind: TimeSeriesQuery + spec: + plugin: + kind: PrometheusTimeSeriesQuery + spec: + query: + (((count(count(node_cpu_seconds_total{job='$job',instance=~'$instance'}) + by (cpu))) - avg(sum by (mode)(rate(node_cpu_seconds_total{mode="idle",job='$job',instance=~'$instance'}[$interval])))) + * 100) / count(count(node_cpu_seconds_total{job='$job',instance=~'$instance'}) + by (cpu)) + gaugeEx: + kind: Panel + spec: + display: + name: Gauge Ex + description: This is a gauge chart + plugin: + kind: GaugeChart + spec: + calculation: last-number + format: + unit: percent + thresholds: + steps: + - value: 85 + - value: 95 + queries: + - kind: TimeSeriesQuery + spec: + plugin: + kind: PrometheusTimeSeriesQuery + spec: + query: + (((count(count(node_cpu_seconds_total{job='$job',instance=~'$instance'}) + by (cpu))) - avg(sum by (mode)(rate(node_cpu_seconds_total{mode="idle",job='$job',instance=~'$instance'}[$interval])))) + * 100) / count(count(node_cpu_seconds_total{job='$job',instance=~'$instance'}) + by (cpu)) + gaugeAltEx: + kind: Panel + spec: + display: + name: Gauge Alt Ex + description: GaugeChart description text + plugin: + kind: GaugeChart + spec: + calculation: last-number + format: + unit: percent-decimal + decimalPlaces: 1 + thresholds: + steps: + - value: 0.5 + name: "Alert: Warning condition example" + - value: 0.75 + name: "Alert: Critical condition example" + queries: + - kind: TimeSeriesQuery + spec: + plugin: + kind: PrometheusTimeSeriesQuery + spec: + query: node_load15{instance=~'$instance',job='$job'} + gaugeFormatTest: + kind: Panel + spec: + display: + name: Gauge Format Test + plugin: + kind: GaugeChart + spec: + calculation: last-number + format: + unit: bytes + max: 95000000 + thresholds: + steps: + - value: 71000000 + - value: 82000000 + queries: + - kind: TimeSeriesQuery + spec: + plugin: + kind: PrometheusTimeSeriesQuery + spec: + query: node_time_seconds{job='$job',instance=~'$instance'} - node_boot_time_seconds{job='$job',instance=~'$instance'} + layouts: + - kind: Grid + spec: + display: + title: Row 1 + collapse: + open: true + items: + - x: 0 + "y": 0 + width: 2 + height: 3 + content: + "$ref": "#/spec/panels/statRAM" + - x: 0 + "y": 4 + width: 2 + height: 3 + content: + "$ref": "#/spec/panels/statTotalRAM" + - x: 2 + "y": 0 + width: 4 + height: 6 + content: + "$ref": "#/spec/panels/statMd" + - x: 6 + "y": 0 + width: 10 + height: 6 + content: + "$ref": "#/spec/panels/statLg" + - x: 16 + "y": 0 + width: 4 + height: 6 + content: + "$ref": "#/spec/panels/gaugeFormatTest" + - x: 20 + "y": 0 + width: 4 + height: 6 + content: + "$ref": "#/spec/panels/gaugeRAM" + - kind: Grid + spec: + display: + title: Row 2 + collapse: + open: true + items: + - x: 0 + "y": 0 + width: 12 + height: 6 + content: + "$ref": "#/spec/panels/legendEx" + - x: 12 + "y": 0 + width: 12 + height: 6 + content: + "$ref": "#/spec/panels/basicEx" + - kind: Grid + spec: + display: + title: Row 3 + collapse: + open: false + items: + - x: 0 + "y": 0 + width: 24 + height: 6 + content: + "$ref": "#/spec/panels/cpuGauge" + - x: 0 + "y": 6 + width: 12 + height: 8 + content: + "$ref": "#/spec/panels/cpuLine" + - x: 12 + "y": 0 + width: 12 + height: 8 + content: + "$ref": "#/spec/panels/defaultTimeSeriesChart" diff --git a/test/util/constants.go b/test/util/constants.go index 9b1e483b..be818e19 100644 --- a/test/util/constants.go +++ b/test/util/constants.go @@ -17,7 +17,7 @@ import ( const ( TestNamespaceName = "test-namespace" - Dash0OperatorNamespace = "dash0-system" + OperatorNamespace = "dash0-system" OTelCollectorNamePrefixTest = "unit-test" CronJobNamePrefix = "cronjob" @@ -40,6 +40,9 @@ const ( EndpointDash0WithProtocolTest = "https://endpoint.dash0.com:4317" EndpointGrpcTest = "endpoint.backend.com:4317" EndpointHttpTest = "https://endpoint.backend.com:4318" + + ApiEndpointTest = "https://api.dash0.com" + DatasetTest = "test-dataset" ) var ( @@ -65,7 +68,7 @@ var ( DeploymentSelfReference = &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Namespace: Dash0OperatorNamespace, + Namespace: OperatorNamespace, Name: "unit-test-dash0-operator-controller", UID: "2f009c75-d69f-4b02-9d9d-fa17e76f5c1d", }, @@ -93,6 +96,18 @@ func Dash0ExportWithEndpointAndToken() dash0v1alpha1.Export { } } +func Dash0ExportWithEndpointAndTokenAndApiEndpoint() dash0v1alpha1.Export { + return dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + Endpoint: EndpointDash0Test, + Authorization: dash0v1alpha1.Authorization{ + Token: &AuthorizationTokenTest, + }, + ApiEndpoint: ApiEndpointTest, + }, + } +} + func Dash0ExportWithEndpointTokenAndInsightsDataset() dash0v1alpha1.Export { return dash0v1alpha1.Export{ Dash0: &dash0v1alpha1.Dash0Configuration{ @@ -116,6 +131,18 @@ func Dash0ExportWithEndpointAndSecretRef() dash0v1alpha1.Export { } } +func Dash0ExportWithEndpointAndSecretRefAndApiEndpoint() dash0v1alpha1.Export { + return dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + Endpoint: EndpointDash0Test, + Authorization: dash0v1alpha1.Authorization{ + SecretRef: &SecretRefTest, + }, + ApiEndpoint: ApiEndpointTest, + }, + } +} + func ExportToPrt(export dash0v1alpha1.Export) *dash0v1alpha1.Export { return &export } diff --git a/test/util/controller_deployment.go b/test/util/controller_deployment.go new file mode 100644 index 00000000..b57ea110 --- /dev/null +++ b/test/util/controller_deployment.go @@ -0,0 +1,251 @@ +// SPDX-FileCopyrightText: Copyright 2024 Dash0 Inc. +// SPDX-License-Identifier: Apache-2.0 + +package util + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/dash0hq/dash0-operator/internal/dash0/util" +) + +func CreateControllerDeploymentWithoutSelfMonitoringWithoutAuth() *appsv1.Deployment { + return createControllerDeployment(createDefaultEnvVars()) +} + +func CreateControllerDeploymentWithoutSelfMonitoringWithToken() *appsv1.Deployment { + tokenEnvVar := corev1.EnvVar{ + Name: util.SelfMonitoringAndApiAuthTokenEnvVarName, + Value: AuthorizationTokenTest, + } + env := append([]corev1.EnvVar{tokenEnvVar}, createDefaultEnvVars()...) + return createControllerDeployment(env) +} + +func CreateControllerDeploymentWithoutSelfMonitoringWithSecretRef() *appsv1.Deployment { + env := append([]corev1.EnvVar{createSecretRefEnvVar()}, createDefaultEnvVars()...) + return createControllerDeployment(env) +} + +func CreateControllerDeploymentWithSelfMonitoringWithToken() *appsv1.Deployment { + tokenEnvVar := corev1.EnvVar{ + Name: util.SelfMonitoringAndApiAuthTokenEnvVarName, + Value: AuthorizationTokenTest, + } + env := append([]corev1.EnvVar{tokenEnvVar}, createDefaultEnvVars()...) + return createControllerDeployment(appendSelfMonitoringEnvVars(env)) +} + +func CreateControllerDeploymentWithSelfMonitoringWithSecretRef() *appsv1.Deployment { + env := append([]corev1.EnvVar{createSecretRefEnvVar()}, createDefaultEnvVars()...) + return createControllerDeployment(appendSelfMonitoringEnvVars(env)) +} + +func createSecretRefEnvVar() corev1.EnvVar { + return corev1.EnvVar{ + Name: util.SelfMonitoringAndApiAuthTokenEnvVarName, + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: SecretRefTest.Name, + }, + Key: SecretRefTest.Key, + }, + }, + } +} + +func appendSelfMonitoringEnvVars(env []corev1.EnvVar) []corev1.EnvVar { + return append(env, + corev1.EnvVar{ + Name: "OTEL_EXPORTER_OTLP_ENDPOINT", + Value: EndpointDash0WithProtocolTest, + }, + corev1.EnvVar{ + Name: "OTEL_EXPORTER_OTLP_HEADERS", + Value: "Authorization=Bearer $(SELF_MONITORING_AND_API_AUTH_TOKEN),Dash0-Dataset=dash0-internal", + }, + corev1.EnvVar{ + Name: "OTEL_EXPORTER_OTLP_PROTOCOL", + Value: "grpc", + }, + corev1.EnvVar{ + Name: "OTEL_RESOURCE_ATTRIBUTES", + Value: "service.namespace=dash0.operator,service.name=manager,service.version=1.2.3", + }, + ) +} + +func createDefaultEnvVars() []corev1.EnvVar { + return []corev1.EnvVar{ + { + Name: "DASH0_OPERATOR_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.namespace", + }, + }, + }, + { + Name: "DASH0_DEPLOYMENT_NAME", + Value: OperatorDeploymentName, + }, + { + Name: "OTEL_COLLECTOR_NAME_PREFIX", + Value: "dash0-system", + }, + { + Name: "DASH0_INIT_CONTAINER_IMAGE", + Value: "ghcr.io/dash0hq/instrumentation", + }, + { + Name: "DASH0_INIT_CONTAINER_IMAGE_PULL_POLICY", + Value: "", + }, + { + Name: "DASH0_COLLECTOR_IMAGE", + Value: "ghcr.io/dash0hq/collector", + }, + { + Name: "DASH0_COLLECTOR_IMAGE_PULL_POLICY", + Value: "", + }, + { + Name: "DASH0_CONFIGURATION_RELOADER_IMAGE", + Value: "ghcr.io/dash0hq/configuration-reloader@latest", + }, + { + Name: "DASH0_CONFIGURATION_RELOADER_IMAGE_PULL_POLICY", + Value: "", + }, + { + Name: "DASH0_FILELOG_OFFSET_SYNCH_IMAGE", + Value: "ghcr.io/dash0hq/filelog-offset-synch", + }, + { + Name: "DASH0_FILELOG_OFFSET_SYNCH_IMAGE_PULL_POLICY", + Value: "", + }, + { + Name: "DASH0_DEVELOPMENT_MODE", + Value: "false", + }, + } +} + +func createControllerDeployment(env []corev1.EnvVar) *appsv1.Deployment { + replicaCount := int32(2) + falsy := false + truthy := true + terminationGracePeriodSeconds := int64(10) + secretMode := corev1.SecretVolumeSourceDefaultMode + + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: OperatorDeploymentName, + Namespace: OperatorNamespace, + Labels: map[string]string{ + "app.kubernetes.io/name": "dash0-operator", + "app.kubernetes.io/component": "controller", + "app.kubernetes.io/instance": "deployment", + "dash0.com/enable": "false", + }, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicaCount, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "dash0-operator", + "app.kubernetes.io/component": "controller", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "kubectl.kubernetes.io/default-container": "manager", + }, + Labels: map[string]string{ + "app.kubernetes.io/name": "dash0-operator", + "app.kubernetes.io/component": "controller", + "dash0.cert-digest": "1234567890", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "manager", + Image: "ghcr.io/dash0hq/operator-controller@latest", + Command: []string{"/manager"}, + Args: []string{ + "--health-probe-bind-address=:8081", + "--metrics-bind-address=127.0.0.1:8080", + "--leader-elect", + }, + Env: env, + Ports: []corev1.ContainerPort{ + { + Name: "webhook-server", + ContainerPort: 9443, + Protocol: "TCP", + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: "certificates", + MountPath: "/tmp/k8s-webhook-server/serving-certs", + ReadOnly: true, + }, + }, + }, + { + Name: "kube-rbac-proxy", + Image: "quay.io/brancz/kube-rbac-proxy:v0.18.0", + Args: []string{ + "--secure-listen-address=0.0.0.0:8443", + "--upstream=http://127.0.0.1:8080/", + "--logtostderr=true", + "--v=0", + }, + Ports: []corev1.ContainerPort{ + { + Name: "https", + ContainerPort: 8443, + Protocol: "TCP", + }, + }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: &falsy, + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, + }, + }, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: &truthy, + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, + ServiceAccountName: "dash0-operator-service-account", + AutomountServiceAccountToken: &truthy, + TerminationGracePeriodSeconds: &terminationGracePeriodSeconds, + Volumes: []corev1.Volume{ + { + Name: "certificates", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + DefaultMode: &secretMode, + SecretName: "dash0-operator-certificates", + }, + }, + }, + }, + }, + }, + }, + } +} diff --git a/test/util/matchers.go b/test/util/matchers.go index 103c003f..cb8247d2 100644 --- a/test/util/matchers.go +++ b/test/util/matchers.go @@ -49,6 +49,56 @@ func (matcher *MatchEnvVarMatcher) NegatedFailureMessage(actual interface{}) (me return format.Message(actual, fmt.Sprintf("not %s", matcher.message())) } +func MatchEnvVarValueFrom(name string, secretName string, secretKey string, args ...interface{}) gomega.OmegaMatcher { + return &MatchEnvVarValueFromSecretMatcher{ + Name: name, + SecretName: secretName, + SecretKey: secretKey, + Args: args, + } +} + +type MatchEnvVarValueFromSecretMatcher struct { + Name string + SecretName string + SecretKey string + Args []interface{} +} + +func (matcher *MatchEnvVarValueFromSecretMatcher) Match(actual interface{}) (success bool, err error) { + envVar, ok := actual.(corev1.EnvVar) + if !ok { + return false, + fmt.Errorf( + "MatchEnvVarValueFromSecretMatcher matcher requires a corev1.EnvVar. Got:\n%s", + format.Object(actual, 1), + ) + } + return matcher.Name == envVar.Name && + envVar.ValueFrom != nil && + envVar.ValueFrom.SecretKeyRef != nil && + matcher.SecretName == envVar.ValueFrom.SecretKeyRef.Name && + matcher.SecretKey == envVar.ValueFrom.SecretKeyRef.Key, + nil +} + +func (matcher *MatchEnvVarValueFromSecretMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, matcher.message()) +} + +func (matcher *MatchEnvVarValueFromSecretMatcher) message() string { + return fmt.Sprintf( + "to contain env var with name %s and value from secret %s/%s", + matcher.Name, + matcher.SecretName, + matcher.SecretKey, + ) +} + +func (matcher *MatchEnvVarValueFromSecretMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, fmt.Sprintf("not %s", matcher.message())) +} + func MatchVolumeMount(name string, mountPath string, args ...interface{}) gomega.OmegaMatcher { return &MatchVolumeMountMatcher{ Name: name, diff --git a/test/util/operator_resource.go b/test/util/operator_resource.go index 9b457f2c..9b79446d 100644 --- a/test/util/operator_resource.go +++ b/test/util/operator_resource.go @@ -20,7 +20,7 @@ import ( ) const ( - Dash0OperatorDeploymentName = "controller-deployment" + OperatorDeploymentName = "controller-deployment" OperatorConfigurationResourceName = "dash0-operator-configuration-test" ) @@ -28,7 +28,42 @@ var ( OperatorConfigurationResourceDefaultObjectMeta = metav1.ObjectMeta{ Name: OperatorConfigurationResourceName, } - OperatorConfigurationResourceDefaultSpec = dash0v1alpha1.Dash0OperatorConfigurationSpec{ + + OperatorConfigurationResourceWithoutSelfMonitoringWithoutAuth = dash0v1alpha1.Dash0OperatorConfigurationSpec{ + SelfMonitoring: dash0v1alpha1.SelfMonitoring{ + Enabled: false, + }, + } + + OperatorConfigurationResourceWithoutSelfMonitoringWithToken = dash0v1alpha1.Dash0OperatorConfigurationSpec{ + SelfMonitoring: dash0v1alpha1.SelfMonitoring{ + Enabled: false, + }, + Export: &dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + ApiEndpoint: ApiEndpointTest, + Authorization: dash0v1alpha1.Authorization{ + Token: &AuthorizationTokenTest, + }, + }, + }, + } + + OperatorConfigurationResourceWithoutSelfMonitoringWithSecretRef = dash0v1alpha1.Dash0OperatorConfigurationSpec{ + SelfMonitoring: dash0v1alpha1.SelfMonitoring{ + Enabled: false, + }, + Export: &dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + ApiEndpoint: ApiEndpointTest, + Authorization: dash0v1alpha1.Authorization{ + SecretRef: &SecretRefTest, + }, + }, + }, + } + + OperatorConfigurationResourceWithSelfMonitoringWithToken = dash0v1alpha1.Dash0OperatorConfigurationSpec{ SelfMonitoring: dash0v1alpha1.SelfMonitoring{ Enabled: true, }, @@ -41,6 +76,22 @@ var ( }, }, } + + OperatorConfigurationResourceWithSelfMonitoringWithSecretRef = dash0v1alpha1.Dash0OperatorConfigurationSpec{ + SelfMonitoring: dash0v1alpha1.SelfMonitoring{ + Enabled: true, + }, + Export: &dash0v1alpha1.Export{ + Dash0: &dash0v1alpha1.Dash0Configuration{ + Endpoint: EndpointDash0Test, + Authorization: dash0v1alpha1.Authorization{ + SecretRef: &SecretRefTest, + }, + }, + }, + } + + OperatorConfigurationResourceDefaultSpec = OperatorConfigurationResourceWithSelfMonitoringWithToken ) func EnsureControllerDeploymentExists( @@ -133,7 +184,7 @@ func LoadOperatorDeploymentOrFail( deployment := &appsv1.Deployment{} if err := k8sClient.Get( ctx, - types.NamespacedName{Namespace: Dash0OperatorNamespace, Name: Dash0OperatorDeploymentName}, + types.NamespacedName{Namespace: OperatorNamespace, Name: OperatorDeploymentName}, deployment, ); err != nil { g.Expect(err).NotTo(HaveOccurred()) diff --git a/test/util/resources.go b/test/util/resources.go index fd00424f..1d758356 100644 --- a/test/util/resources.go +++ b/test/util/resources.go @@ -17,10 +17,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/dash0hq/dash0-operator/internal/dash0/util" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -35,10 +34,10 @@ var ( Value: "/__dash0__", }}, SecurityContext: &corev1.SecurityContext{ - AllowPrivilegeEscalation: &util.False, - Privileged: &util.False, - ReadOnlyRootFilesystem: &util.True, - RunAsNonRoot: &util.False, + AllowPrivilegeEscalation: ptr.To(false), + Privileged: ptr.To(false), + ReadOnlyRootFilesystem: ptr.To(true), + RunAsNonRoot: ptr.To(false), RunAsUser: &ArbitraryNumer, RunAsGroup: &ArbitraryNumer, }, @@ -63,11 +62,11 @@ func EnsureTestNamespaceExists( return EnsureNamespaceExists(ctx, k8sClient, TestNamespaceName) } -func EnsureDash0OperatorNamespaceExists( +func EnsureOperatorNamespaceExists( ctx context.Context, k8sClient client.Client, ) *corev1.Namespace { - return EnsureNamespaceExists(ctx, k8sClient, Dash0OperatorNamespace) + return EnsureNamespaceExists(ctx, k8sClient, OperatorNamespace) } func EnsureNamespaceExists( @@ -701,7 +700,7 @@ func DeploymentWithExistingDash0Artifacts(namespace string, name string) *appsv1 Value: "value", }, { - // Dash0 does not support injecting into containers that already have NODE_OPTIONS set via a + // The operator does not support injecting into containers that already have NODE_OPTIONS set via a // ValueFrom clause, thus this env var will not be modified. Name: "NODE_OPTIONS", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.namespace"}},