From 434e6fafc9b0573b1b5c78a24549a2c092b97f30 Mon Sep 17 00:00:00 2001 From: Todd Short Date: Thu, 29 Jan 2026 15:57:53 -0500 Subject: [PATCH] Add HTTP proxy support for operator deployments Fixes: OCPBUGS-61082 Add support for HTTP_PROXY, HTTPS_PROXY, and NO_PROXY environment variables in operator deployments. Proxy configuration is read from operator-controller's environment at startup and injected into all containers in deployed operator Deployments. When operator-controller restarts with changed proxy configuration, existing operator deployments are automatically updated via triggered reconciliation. Supports both helm and boxcutter appliers. Includes unit and e2e tests. Signed-off-by: Todd Short Assisted-By: Claude --- Makefile | 2 +- cmd/operator-controller/main.go | 82 ++- .../operator-controller/applier/boxcutter.go | 36 +- .../operator-controller/applier/provider.go | 12 + .../controllers/boxcutter_reconcile_steps.go | 7 +- .../boxcutter_reconcile_steps_apply_test.go | 2 + internal/operator-controller/proxy/proxy.go | 126 +++++ .../operator-controller/proxy/proxy_test.go | 143 +++++ .../registryv1/generators/generators.go | 16 +- .../render/registryv1/generators/resources.go | 58 ++ .../registryv1/generators/resources_test.go | 112 ++++ .../rukpak/render/render.go | 8 + test/e2e/features/proxy.feature | 97 ++++ test/e2e/steps/steps.go | 534 +++++++++++++++++- 14 files changed, 1217 insertions(+), 18 deletions(-) create mode 100644 internal/operator-controller/proxy/proxy.go create mode 100644 internal/operator-controller/proxy/proxy_test.go create mode 100644 test/e2e/features/proxy.feature diff --git a/Makefile b/Makefile index 41397e9cc4..4eadbc56bb 100644 --- a/Makefile +++ b/Makefile @@ -243,7 +243,7 @@ test: manifests generate fmt lint test-unit test-e2e test-regression #HELP Run a .PHONY: e2e e2e: #EXHELP Run the e2e tests. - go test -count=1 -v ./test/e2e/features_test.go + $(if $(GODOG_TAGS),go test -timeout=30m -count=1 -v ./test/e2e/features_test.go -args --godog.tags="$(GODOG_TAGS)",go test -timeout=30m -count=1 -v ./test/e2e/features_test.go) E2E_REGISTRY_NAME := docker-registry E2E_REGISTRY_NAMESPACE := operator-controller-e2e diff --git a/cmd/operator-controller/main.go b/cmd/operator-controller/main.go index dd0bc98f6d..d7e097b3a9 100644 --- a/cmd/operator-controller/main.go +++ b/cmd/operator-controller/main.go @@ -68,6 +68,7 @@ import ( "github.com/operator-framework/operator-controller/internal/operator-controller/controllers" "github.com/operator-framework/operator-controller/internal/operator-controller/features" "github.com/operator-framework/operator-controller/internal/operator-controller/finalizers" + "github.com/operator-framework/operator-controller/internal/operator-controller/proxy" "github.com/operator-framework/operator-controller/internal/operator-controller/resolve" "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/preflights/crdupgradesafety" "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/render" @@ -219,6 +220,7 @@ func validateMetricsFlags() error { } return nil } + func run() error { setupLog.Info("starting up the controller", "version info", version.String()) @@ -474,11 +476,17 @@ func run() error { } certProvider := getCertificateProvider() + + // Create proxy configuration from environment variables + // The fingerprint is calculated once during construction and cached + proxyConfig := proxy.NewFromEnv() + regv1ManifestProvider := &applier.RegistryV1ManifestProvider{ BundleRenderer: registryv1.Renderer, CertificateProvider: certProvider, IsWebhookSupportEnabled: certProvider != nil, IsSingleOwnNamespaceEnabled: features.OperatorControllerFeatureGate.Enabled(features.SingleOwnNamespaceInstallSupport), + Proxy: proxyConfig, } var cerCfg reconcilerConfigurator if features.OperatorControllerFeatureGate.Enabled(features.BoxcutterRuntime) { @@ -540,6 +548,73 @@ func run() error { return err } + // Add a runnable to trigger reconciliation of all ClusterExtensions on startup. + // This ensures existing deployments get updated when proxy configuration changes + // (added, modified, or removed). + if err := mgr.Add(manager.RunnableFunc(func(ctx context.Context) error { + // Wait for the cache to sync + if !mgr.GetCache().WaitForCacheSync(ctx) { + return fmt.Errorf("failed to wait for cache sync") + } + + // Always trigger reconciliation on startup to handle proxy config changes + if proxyConfig != nil { + setupLog.Info("proxy configuration detected, triggering reconciliation of all ClusterExtensions", + "httpProxy", proxy.SanitizeURL(proxyConfig.HTTPProxy), "httpsProxy", proxy.SanitizeURL(proxyConfig.HTTPSProxy), "noProxy", proxyConfig.NoProxy) + } else { + setupLog.Info("no proxy configuration detected, triggering reconciliation to remove proxy vars from existing deployments") + } + + extList := &ocv1.ClusterExtensionList{} + if err := cl.List(ctx, extList); err != nil { + setupLog.Error(err, "failed to list ClusterExtensions for proxy update") + return nil // Don't fail startup + } + + for i := range extList.Items { + ext := &extList.Items[i] + + // Get current and desired proxy hash + currentHash, exists := ext.Annotations[proxy.ConfigHashKey] + desiredHash := proxyConfig.Fingerprint() + + // Skip if annotation matches desired state + if exists && currentHash == desiredHash { + continue + } + // Skip if neither proxy nor annotation exists + if !exists && desiredHash == "" { + continue + } + + // Use Patch instead of Update to avoid resourceVersion conflicts + // This ensures the annotation is set even if the ClusterExtension is modified concurrently + patch := client.MergeFrom(ext.DeepCopy()) + if ext.Annotations == nil { + ext.Annotations = make(map[string]string) + } + + // Delete annotation if no proxy is configured, otherwise set it + if desiredHash == "" { + delete(ext.Annotations, proxy.ConfigHashKey) + } else { + ext.Annotations[proxy.ConfigHashKey] = desiredHash + } + + if err := cl.Patch(ctx, ext, patch); err != nil { + setupLog.Error(err, "failed to set proxy hash on ClusterExtension", "name", ext.Name) + // Continue with other ClusterExtensions + } + } + + setupLog.Info("triggered reconciliation for existing ClusterExtensions", "count", len(extList.Items)) + + return nil + })); err != nil { + setupLog.Error(err, "unable to add startup reconciliation trigger") + return err + } + setupLog.Info("starting manager") ctx := ctrl.SetupSignalHandler() if err := mgr.Start(ctx); err != nil { @@ -625,13 +700,18 @@ func (c *boxcutterReconcilerConfigurator) Configure(ceReconciler *controllers.Cl ActionClientGetter: acg, RevisionGenerator: rg, } + // Get the ManifestProvider to extract proxy fingerprint + regv1Provider, ok := c.regv1ManifestProvider.(*applier.RegistryV1ManifestProvider) + if !ok { + return fmt.Errorf("manifest provider is not of type *applier.RegistryV1ManifestProvider") + } ceReconciler.ReconcileSteps = []controllers.ReconcileStepFunc{ controllers.HandleFinalizers(c.finalizers), controllers.MigrateStorage(storageMigrator), controllers.RetrieveRevisionStates(revisionStatesGetter), controllers.ResolveBundle(c.resolver, c.mgr.GetClient()), controllers.UnpackBundle(c.imagePuller, c.imageCache), - controllers.ApplyBundleWithBoxcutter(appl.Apply), + controllers.ApplyBundleWithBoxcutter(appl.Apply, regv1Provider.ProxyFingerprint), } baseDiscoveryClient, err := discovery.NewDiscoveryClientForConfig(c.mgr.GetConfig()) diff --git a/internal/operator-controller/applier/boxcutter.go b/internal/operator-controller/applier/boxcutter.go index 5f10716c77..2a69b21591 100644 --- a/internal/operator-controller/applier/boxcutter.go +++ b/internal/operator-controller/applier/boxcutter.go @@ -33,6 +33,7 @@ import ( ocv1 "github.com/operator-framework/operator-controller/api/v1" "github.com/operator-framework/operator-controller/internal/operator-controller/authorization" "github.com/operator-framework/operator-controller/internal/operator-controller/labels" + "github.com/operator-framework/operator-controller/internal/operator-controller/proxy" "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/bundle/source" "github.com/operator-framework/operator-controller/internal/shared/util/cache" ) @@ -485,17 +486,28 @@ func (bc *Boxcutter) Apply(ctx context.Context, contentFS fs.FS, ext *ocv1.Clust desiredRevision.Spec.Revision = currentRevision.Spec.Revision desiredRevision.Name = currentRevision.Name - err := bc.createOrUpdate(ctx, getUserInfo(ext), desiredRevision) - switch { - case apierrors.IsInvalid(err): - // We could not update the current revision due to trying to update an immutable field. - // Therefore, we need to create a new revision. + // Check if proxy configuration changed by comparing annotations. + // The Phases field has CEL immutability validation (self == oldSelf) which prevents + // updating nested content like env vars. When proxy config changes, we must create + // a new revision rather than attempting to patch the existing one. + // Comparing proxy hash annotations is more reliable than comparing phases content, + // which has false positives due to serialization differences in unstructured objects. + currentProxyHash := currentRevision.Annotations[proxy.ConfigHashKey] + desiredProxyHash := desiredRevision.Annotations[proxy.ConfigHashKey] + if currentProxyHash != desiredProxyHash { state = StateNeedsUpgrade - case err == nil: - // inplace patch was successful, no changes in phases - state = StateUnchanged - default: - return false, "", fmt.Errorf("patching %s Revision: %w", desiredRevision.Name, err) + } else { + err = bc.createOrUpdate(ctx, getUserInfo(ext), desiredRevision) + switch { + case apierrors.IsInvalid(err): + // We could not update the current revision due to trying to update an immutable field. + // Therefore, we need to create a new revision. + state = StateNeedsUpgrade + case err == nil: + state = StateUnchanged + default: + return false, "", fmt.Errorf("patching %s Revision: %w", desiredRevision.Name, err) + } } } @@ -530,6 +542,10 @@ func (bc *Boxcutter) Apply(ctx context.Context, contentFS fs.FS, ext *ocv1.Clust desiredRevision.Name = fmt.Sprintf("%s-%d", ext.Name, revisionNumber) desiredRevision.Spec.Revision = revisionNumber + // Clear server-added metadata fields from previous patch operation + desiredRevision.SetResourceVersion("") + desiredRevision.SetUID("") + desiredRevision.SetManagedFields(nil) if err = bc.garbageCollectOldRevisions(ctx, prevRevisions); err != nil { return false, "", fmt.Errorf("garbage collecting old revisions: %w", err) diff --git a/internal/operator-controller/applier/provider.go b/internal/operator-controller/applier/provider.go index 4602803ab6..6e2e440690 100644 --- a/internal/operator-controller/applier/provider.go +++ b/internal/operator-controller/applier/provider.go @@ -14,6 +14,7 @@ import ( ocv1 "github.com/operator-framework/operator-controller/api/v1" "github.com/operator-framework/operator-controller/internal/operator-controller/config" + "github.com/operator-framework/operator-controller/internal/operator-controller/proxy" "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/bundle" "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/bundle/source" "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/render" @@ -33,6 +34,7 @@ type RegistryV1ManifestProvider struct { CertificateProvider render.CertificateProvider IsWebhookSupportEnabled bool IsSingleOwnNamespaceEnabled bool + Proxy *proxy.Proxy } func (r *RegistryV1ManifestProvider) Get(bundleFS fs.FS, ext *ocv1.ClusterExtension) ([]client.Object, error) { @@ -68,6 +70,9 @@ func (r *RegistryV1ManifestProvider) Get(bundleFS fs.FS, ext *ocv1.ClusterExtens opts := []render.Option{ render.WithCertificateProvider(r.CertificateProvider), + // Always include proxy option to ensure manifests reflect current proxy state + // When r.Proxy is nil, this ensures proxy env vars are removed from manifests + render.WithProxy(r.Proxy), } if r.IsSingleOwnNamespaceEnabled { @@ -111,6 +116,13 @@ func (r *RegistryV1ManifestProvider) extractBundleConfigOptions(rv1 *bundle.Regi return opts, nil } +// ProxyFingerprint returns a stable hash of the proxy configuration. +// This is used to detect when proxy settings change and a new revision is needed. +// The hash is calculated once during proxy construction and cached in the Proxy itself. +func (r *RegistryV1ManifestProvider) ProxyFingerprint() string { + return r.Proxy.Fingerprint() +} + // RegistryV1HelmChartProvider creates a Helm-Chart from a registry+v1 bundle and its associated ClusterExtension type RegistryV1HelmChartProvider struct { ManifestProvider ManifestProvider diff --git a/internal/operator-controller/controllers/boxcutter_reconcile_steps.go b/internal/operator-controller/controllers/boxcutter_reconcile_steps.go index dfb5dad145..c4961bc728 100644 --- a/internal/operator-controller/controllers/boxcutter_reconcile_steps.go +++ b/internal/operator-controller/controllers/boxcutter_reconcile_steps.go @@ -32,6 +32,7 @@ import ( ocv1 "github.com/operator-framework/operator-controller/api/v1" "github.com/operator-framework/operator-controller/internal/operator-controller/labels" + "github.com/operator-framework/operator-controller/internal/operator-controller/proxy" ) type BoxcutterRevisionStatesGetter struct { @@ -96,7 +97,7 @@ func MigrateStorage(m StorageMigrator) ReconcileStepFunc { } } -func ApplyBundleWithBoxcutter(apply func(ctx context.Context, contentFS fs.FS, ext *ocv1.ClusterExtension, objectLabels, revisionAnnotations map[string]string) (bool, string, error)) ReconcileStepFunc { +func ApplyBundleWithBoxcutter(apply func(ctx context.Context, contentFS fs.FS, ext *ocv1.ClusterExtension, objectLabels, revisionAnnotations map[string]string) (bool, string, error), proxyFingerprint func() string) ReconcileStepFunc { return func(ctx context.Context, state *reconcileState, ext *ocv1.ClusterExtension) (*ctrl.Result, error) { l := log.FromContext(ctx) revisionAnnotations := map[string]string{ @@ -105,6 +106,10 @@ func ApplyBundleWithBoxcutter(apply func(ctx context.Context, contentFS fs.FS, e labels.BundleVersionKey: state.resolvedRevisionMetadata.Version, labels.BundleReferenceKey: state.resolvedRevisionMetadata.Image, } + // Add proxy config fingerprint to detect when proxy settings change + if fp := proxyFingerprint(); fp != "" { + revisionAnnotations[proxy.ConfigHashKey] = fp + } objLbls := map[string]string{ labels.OwnerKindKey: ocv1.ClusterExtensionKind, labels.OwnerNameKey: ext.GetName(), diff --git a/internal/operator-controller/controllers/boxcutter_reconcile_steps_apply_test.go b/internal/operator-controller/controllers/boxcutter_reconcile_steps_apply_test.go index 78adb70090..edce06c9fa 100644 --- a/internal/operator-controller/controllers/boxcutter_reconcile_steps_apply_test.go +++ b/internal/operator-controller/controllers/boxcutter_reconcile_steps_apply_test.go @@ -135,6 +135,8 @@ func TestApplyBundleWithBoxcutter(t *testing.T) { stepFunc := ApplyBundleWithBoxcutter(func(_ context.Context, _ fs.FS, _ *ocv1.ClusterExtension, _, _ map[string]string) (bool, string, error) { return true, "", nil + }, func() string { + return "" // empty proxy fingerprint for tests }) result, err := stepFunc(ctx, state, ext) require.NoError(t, err) diff --git a/internal/operator-controller/proxy/proxy.go b/internal/operator-controller/proxy/proxy.go new file mode 100644 index 0000000000..8b2b5f74a4 --- /dev/null +++ b/internal/operator-controller/proxy/proxy.go @@ -0,0 +1,126 @@ +// Package proxy defines HTTP proxy configuration types used across applier implementations. +package proxy + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + "net/url" + "os" + "strings" +) + +const ( + // ConfigHashKey is the annotation key used to record the proxy configuration hash. + // This annotation is set on both ClusterExtensions and ClusterExtensionRevisions. + // When the hash changes on a ClusterExtension, it triggers reconciliation. + // Comparing hashes between ClusterExtension and its current revision determines + // if a new revision is needed due to proxy configuration changes. + ConfigHashKey = "olm.operatorframework.io/proxy-config-hash" +) + +// Proxy holds HTTP proxy configuration values that are applied to rendered resources. +// These values are typically set as environment variables on generated Pods to enable +// operators to function correctly in environments that require HTTP proxies for outbound +// connections. +type Proxy struct { + // HTTPProxy is the HTTP proxy URL (e.g., "http://proxy.example.com:8080"). + // An empty value means no HTTP proxy is configured. + HTTPProxy string + // HTTPSProxy is the HTTPS proxy URL (e.g., "https://proxy.example.com:8443"). + // An empty value means no HTTPS proxy is configured. + HTTPSProxy string + // NoProxy is a comma-separated list of hosts, domains, or CIDR ranges that should + // bypass the proxy (e.g., "localhost,127.0.0.1,.cluster.local"). + // An empty value means all traffic will use the proxy (if configured). + NoProxy string + // fingerprint is a cached hash of the proxy configuration, calculated once during construction. + // This is used to detect when proxy settings change and a new revision is needed. + fingerprint string +} + +// NewFromEnv creates a new Proxy from environment variables. +// Returns nil if no proxy environment variables are set. +// The fingerprint is calculated once during construction and cached. +func NewFromEnv() *Proxy { + httpProxy := os.Getenv("HTTP_PROXY") + httpsProxy := os.Getenv("HTTPS_PROXY") + noProxy := os.Getenv("NO_PROXY") + + // If no proxy variables are set, return nil + if httpProxy == "" && httpsProxy == "" && noProxy == "" { + return nil + } + + p := &Proxy{ + HTTPProxy: httpProxy, + HTTPSProxy: httpsProxy, + NoProxy: noProxy, + } + + // Calculate and cache the fingerprint + p.fingerprint = calculateFingerprint(p) + + return p +} + +// calculateFingerprint computes a stable hash of the proxy configuration. +func calculateFingerprint(p *Proxy) string { + if p == nil { + return "" + } + data, err := json.Marshal(p) + if err != nil { + // This should never happen for a simple struct with string fields, + // but return empty string if it does + return "" + } + hash := sha256.Sum256(data) + return fmt.Sprintf("%x", hash[:8]) +} + +// Fingerprint returns the cached hash of the proxy configuration. +// This is used to detect when proxy settings change and a new revision is needed. +// Returns an empty string if the proxy is nil. +func (p *Proxy) Fingerprint() string { + if p == nil { + return "" + } + return p.fingerprint +} + +// SanitizeURL removes credentials from a proxy URL for safe logging. +// Returns the original string if it's not a valid URL or doesn't contain credentials. +// If the string contains @ but credentials can't be parsed out, returns a redacted version. +func SanitizeURL(proxyURL string) string { + if proxyURL == "" { + return "" + } + + u, err := url.Parse(proxyURL) + if err != nil { + // If we can't parse it, check if it might contain credentials (user:pass@host pattern) + // If so, redact it to avoid leaking credentials in logs + if strings.Contains(proxyURL, "@") { + return "" + } + // Otherwise return as-is (might be a hostname or other format without credentials) + return proxyURL + } + + // If there's user info, remove it and return sanitized URL + if u.User != nil { + u.User = nil + return u.String() + } + + // If no user info was parsed but the string contains @, it might be a schemelessly-formatted + // URL like "user:pass@host:port" which url.Parse doesn't recognize as having credentials. + // Redact it to be safe. + if strings.Contains(proxyURL, "@") { + return "" + } + + // No credentials detected, return as-is + return proxyURL +} diff --git a/internal/operator-controller/proxy/proxy_test.go b/internal/operator-controller/proxy/proxy_test.go new file mode 100644 index 0000000000..f595d27fe4 --- /dev/null +++ b/internal/operator-controller/proxy/proxy_test.go @@ -0,0 +1,143 @@ +package proxy + +import ( + "os" + "testing" +) + +func TestSanitizeURL(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + { + name: "empty string", + input: "", + expected: "", + }, + { + name: "valid URL without credentials", + input: "http://proxy.example.com:8080", + expected: "http://proxy.example.com:8080", + }, + { + name: "valid URL with credentials", + input: "http://user:pass@proxy.example.com:8080", + expected: "http://proxy.example.com:8080", + }, + { + name: "hostname without credentials", + input: "proxy.example.com:8080", + expected: "proxy.example.com:8080", + }, + { + name: "hostname with credentials (unparseable URL)", + input: "user:pass@proxy.example.com:8080", + expected: "", + }, + { + name: "invalid format with @ but no credentials", + input: "something@weird", + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := SanitizeURL(tt.input) + if result != tt.expected { + t.Errorf("SanitizeURL(%q) = %q, want %q", tt.input, result, tt.expected) + } + }) + } +} + +func TestNewFromEnv(t *testing.T) { + // Save original env vars + origHTTP := os.Getenv("HTTP_PROXY") + origHTTPS := os.Getenv("HTTPS_PROXY") + origNO := os.Getenv("NO_PROXY") + + // Restore at end + defer func() { + os.Setenv("HTTP_PROXY", origHTTP) + os.Setenv("HTTPS_PROXY", origHTTPS) + os.Setenv("NO_PROXY", origNO) + }() + + tests := []struct { + name string + httpProxy string + httpsProxy string + noProxy string + expectNil bool + expectHash bool + }{ + { + name: "all empty returns nil", + httpProxy: "", + httpsProxy: "", + noProxy: "", + expectNil: true, + }, + { + name: "http proxy only", + httpProxy: "http://proxy.example.com:8080", + httpsProxy: "", + noProxy: "", + expectNil: false, + expectHash: true, + }, + { + name: "all proxies set", + httpProxy: "http://proxy.example.com:8080", + httpsProxy: "https://proxy.example.com:8443", + noProxy: "localhost,.cluster.local", + expectNil: false, + expectHash: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + os.Setenv("HTTP_PROXY", tt.httpProxy) + os.Setenv("HTTPS_PROXY", tt.httpsProxy) + os.Setenv("NO_PROXY", tt.noProxy) + + p := NewFromEnv() + + if tt.expectNil { + if p != nil { + t.Errorf("NewFromEnv() = %v, want nil", p) + } + return + } + + if p == nil { + t.Fatal("NewFromEnv() = nil, want non-nil") + } + + if p.HTTPProxy != tt.httpProxy { + t.Errorf("HTTPProxy = %q, want %q", p.HTTPProxy, tt.httpProxy) + } + if p.HTTPSProxy != tt.httpsProxy { + t.Errorf("HTTPSProxy = %q, want %q", p.HTTPSProxy, tt.httpsProxy) + } + if p.NoProxy != tt.noProxy { + t.Errorf("NoProxy = %q, want %q", p.NoProxy, tt.noProxy) + } + + if tt.expectHash { + fingerprint := p.Fingerprint() + if fingerprint == "" { + t.Error("Fingerprint() = empty string, want non-empty hash") + } + // Verify fingerprint is cached (calling again returns same value) + if p.Fingerprint() != fingerprint { + t.Error("Fingerprint() not cached properly") + } + } + }) + } +} diff --git a/internal/operator-controller/rukpak/render/registryv1/generators/generators.go b/internal/operator-controller/rukpak/render/registryv1/generators/generators.go index 8f45bb7620..39fbacb066 100644 --- a/internal/operator-controller/rukpak/render/registryv1/generators/generators.go +++ b/internal/operator-controller/rukpak/render/registryv1/generators/generators.go @@ -88,11 +88,23 @@ func BundleCSVDeploymentGenerator(rv1 *bundle.RegistryV1, opts render.Options) ( // See https://github.com/operator-framework/operator-lifecycle-manager/blob/dfd0b2bea85038d3c0d65348bc812d297f16b8d2/pkg/controller/install/deployment.go#L177-L180 depSpec.Spec.RevisionHistoryLimit = ptr.To(int32(1)) + // Always call WithProxy to ensure proxy env vars are managed correctly + // When proxy is nil, this will remove any existing proxy env vars + var httpProxy, httpsProxy, noProxy string + if opts.Proxy != nil { + httpProxy = opts.Proxy.HTTPProxy + httpsProxy = opts.Proxy.HTTPSProxy + noProxy = opts.Proxy.NoProxy + } + deploymentOpts := []ResourceCreatorOption{ + WithDeploymentSpec(depSpec.Spec), + WithLabels(depSpec.Label), + WithProxy(httpProxy, httpsProxy, noProxy), + } deploymentResource := CreateDeploymentResource( depSpec.Name, opts.InstallNamespace, - WithDeploymentSpec(depSpec.Spec), - WithLabels(depSpec.Label), + deploymentOpts..., ) secretInfo := render.CertProvisionerFor(depSpec.Name, opts).GetCertSecretInfo() diff --git a/internal/operator-controller/rukpak/render/registryv1/generators/resources.go b/internal/operator-controller/rukpak/render/registryv1/generators/resources.go index ed1cf6552f..a707b243cd 100644 --- a/internal/operator-controller/rukpak/render/registryv1/generators/resources.go +++ b/internal/operator-controller/rukpak/render/registryv1/generators/resources.go @@ -115,6 +115,64 @@ func WithMutatingWebhooks(webhooks ...admissionregistrationv1.MutatingWebhook) f } } +// WithProxy applies HTTP proxy environment variables to Deployment resources. +// Proxy env vars are applied to both regular containers and init containers. +func WithProxy(httpProxy, httpsProxy, noProxy string) func(client.Object) { + return func(obj client.Object) { + switch o := obj.(type) { + case *appsv1.Deployment: + addProxyEnvVars(httpProxy, httpsProxy, noProxy, o.Spec.Template.Spec.Containers) + addProxyEnvVars(httpProxy, httpsProxy, noProxy, o.Spec.Template.Spec.InitContainers) + } + } +} + +func addProxyEnvVars(httpProxy, httpsProxy, noProxy string, containers []corev1.Container) { + proxyEnvNames := []string{"HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY"} + + for i := range containers { + // First, remove any existing proxy env vars to ensure clean slate + // This allows us to remove proxy vars when they're no longer configured + var newEnv []corev1.EnvVar + if len(containers[i].Env) > 0 { + newEnv = make([]corev1.EnvVar, 0, len(containers[i].Env)) + for _, env := range containers[i].Env { + isProxyVar := false + for _, proxyName := range proxyEnvNames { + if env.Name == proxyName { + isProxyVar = true + break + } + } + if !isProxyVar { + newEnv = append(newEnv, env) + } + } + } + containers[i].Env = newEnv + + // Then add the proxy env vars if they're configured + if len(httpProxy) > 0 { + containers[i].Env = append(containers[i].Env, corev1.EnvVar{ + Name: "HTTP_PROXY", + Value: httpProxy, + }) + } + if len(httpsProxy) > 0 { + containers[i].Env = append(containers[i].Env, corev1.EnvVar{ + Name: "HTTPS_PROXY", + Value: httpsProxy, + }) + } + if len(noProxy) > 0 { + containers[i].Env = append(containers[i].Env, corev1.EnvVar{ + Name: "NO_PROXY", + Value: noProxy, + }) + } + } +} + // CreateServiceAccountResource creates a ServiceAccount resource with name 'name', namespace 'namespace', and applying // any ServiceAccount related options in opts func CreateServiceAccountResource(name string, namespace string, opts ...ResourceCreatorOption) *corev1.ServiceAccount { diff --git a/internal/operator-controller/rukpak/render/registryv1/generators/resources_test.go b/internal/operator-controller/rukpak/render/registryv1/generators/resources_test.go index aa3227987e..4f337a601b 100644 --- a/internal/operator-controller/rukpak/render/registryv1/generators/resources_test.go +++ b/internal/operator-controller/rukpak/render/registryv1/generators/resources_test.go @@ -6,8 +6,10 @@ import ( "strings" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -276,3 +278,113 @@ func Test_WithMutatingWebhook(t *testing.T) { {Name: "wh-two"}, }, wh.Webhooks) } + +func Test_WithProxy(t *testing.T) { + depSpec := appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: "init-container", + Env: []corev1.EnvVar{ + { + Name: "INIT_VAR", + Value: "init-value", + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "c1", + Env: []corev1.EnvVar{ + { + Name: "TEST", + Value: "xxx", + }, + }, + }, + { + Name: "c2", + Env: []corev1.EnvVar{ + { + Name: "APP_ENV", + Value: "production", + }, + }, + }, + }, + }, + }, + } + + depl := generators.CreateDeploymentResource( + "test", + "test-ns", + generators.WithDeploymentSpec(depSpec), + generators.WithProxy("http://proxy.example.com:8080", "https://proxy.example.com:8443", "localhost,.cluster.local"), + ) + + // Verify init container has proxy env vars + expectedInitEnv := []corev1.EnvVar{ + { + Name: "INIT_VAR", + Value: "init-value", + }, + { + Name: "HTTP_PROXY", + Value: "http://proxy.example.com:8080", + }, + { + Name: "HTTPS_PROXY", + Value: "https://proxy.example.com:8443", + }, + { + Name: "NO_PROXY", + Value: "localhost,.cluster.local", + }, + } + assert.Equal(t, expectedInitEnv, depl.Spec.Template.Spec.InitContainers[0].Env, "init container should have proxy env vars") + + // Verify first regular container has proxy env vars + expectedC1Env := []corev1.EnvVar{ + { + Name: "TEST", + Value: "xxx", + }, + { + Name: "HTTP_PROXY", + Value: "http://proxy.example.com:8080", + }, + { + Name: "HTTPS_PROXY", + Value: "https://proxy.example.com:8443", + }, + { + Name: "NO_PROXY", + Value: "localhost,.cluster.local", + }, + } + assert.Equal(t, expectedC1Env, depl.Spec.Template.Spec.Containers[0].Env, "container c1 should have proxy env vars") + + // Verify second regular container has proxy env vars + expectedC2Env := []corev1.EnvVar{ + { + Name: "APP_ENV", + Value: "production", + }, + { + Name: "HTTP_PROXY", + Value: "http://proxy.example.com:8080", + }, + { + Name: "HTTPS_PROXY", + Value: "https://proxy.example.com:8443", + }, + { + Name: "NO_PROXY", + Value: "localhost,.cluster.local", + }, + } + assert.Equal(t, expectedC2Env, depl.Spec.Template.Spec.Containers[1].Env, "container c2 should have proxy env vars") +} diff --git a/internal/operator-controller/rukpak/render/render.go b/internal/operator-controller/rukpak/render/render.go index 86eb2ff492..5253406974 100644 --- a/internal/operator-controller/rukpak/render/render.go +++ b/internal/operator-controller/rukpak/render/render.go @@ -11,6 +11,7 @@ import ( "github.com/operator-framework/api/pkg/operators/v1alpha1" "github.com/operator-framework/operator-controller/internal/operator-controller/config" + "github.com/operator-framework/operator-controller/internal/operator-controller/proxy" "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/bundle" "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/util" hashutil "github.com/operator-framework/operator-controller/internal/shared/util/hash" @@ -66,6 +67,7 @@ type Options struct { // DeploymentConfig contains optional customizations to apply to CSV deployments. // If nil, no customizations are applied. DeploymentConfig *config.DeploymentConfig + Proxy *proxy.Proxy } func (o *Options) apply(opts ...Option) *Options { @@ -121,6 +123,12 @@ func WithDeploymentConfig(deploymentConfig *config.DeploymentConfig) Option { } } +func WithProxy(proxyVal *proxy.Proxy) Option { + return func(o *Options) { + o.Proxy = proxyVal + } +} + type BundleRenderer struct { BundleValidator BundleValidator ResourceGenerators []ResourceGenerator diff --git a/test/e2e/features/proxy.feature b/test/e2e/features/proxy.feature new file mode 100644 index 0000000000..e5c3d6a9c1 --- /dev/null +++ b/test/e2e/features/proxy.feature @@ -0,0 +1,97 @@ +Feature: HTTP Proxy Support for Operator Deployments + + As an OLM user I would like operators installed from catalogs to inherit + HTTP proxy configuration from the operator-controller, enabling them to + function correctly in environments that require HTTP proxies for outbound + connections. + + Background: + Given OLM is available + And ClusterCatalog "test" serves bundles + And ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE} + + Scenario: Operator deployment has no proxy env vars in default configuration + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + Then ClusterExtension is rolled out + And ClusterExtension is available + And resource "deployment/test-operator" is installed + And resource "deployment/test-operator" has no proxy environment variables + When ClusterExtension is removed + Then the ClusterExtension's constituent resources are removed + + Scenario: Operator deployment inherits proxy env vars from operator-controller + Given operator-controller has environment variable "NO_PROXY" set to "localhost,.cluster.local" + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + Then ClusterExtension is rolled out + And ClusterExtension is available + And resource "deployment/test-operator" is installed + And resource "deployment/test-operator" has environment variable "NO_PROXY" set to "localhost,.cluster.local" + When operator-controller has environment variable "NO_PROXY" removed + When ClusterExtension is removed + Then the ClusterExtension's constituent resources are removed + + Scenario: Operator deployment inherits proxy env vars and updates when they are removed + Given operator-controller has environment variable "NO_PROXY" set to "localhost,.cluster.local" + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + Then ClusterExtension is rolled out + And ClusterExtension is available + And resource "deployment/test-operator" is installed + And resource "deployment/test-operator" has environment variable "NO_PROXY" set to "localhost,.cluster.local" + When operator-controller has environment variable "NO_PROXY" removed + Then ClusterExtension is rolled out + And ClusterExtension is available + And ClusterExtension revision manifests have no proxy environment variables + And resource "deployment/test-operator" has no proxy environment variables + When ClusterExtension is removed + Then the ClusterExtension's constituent resources are removed diff --git a/test/e2e/steps/steps.go b/test/e2e/steps/steps.go index 483514e403..9bba775341 100644 --- a/test/e2e/steps/steps.go +++ b/test/e2e/steps/steps.go @@ -5,6 +5,7 @@ import ( "compress/gzip" "context" "crypto/tls" + "encoding/base64" "encoding/json" "fmt" "io" @@ -41,9 +42,10 @@ import ( ) const ( - olmDeploymentName = "operator-controller-controller-manager" - timeout = 5 * time.Minute - tick = 1 * time.Second + olmDeploymentName = "operator-controller-controller-manager" + timeout = 10 * time.Minute + deploymentUpdateTimeout = 20 * time.Minute // Longer timeout for deployment updates via boxcutter + tick = 1 * time.Second ) var ( @@ -107,6 +109,13 @@ func RegisterSteps(sc *godog.ScenarioContext) { sc.Step(`^(?i)Prometheus metrics are returned in the response$`, PrometheusMetricsAreReturned) sc.Step(`^(?i)min value for (ClusterExtension|ClusterExtensionRevision) ((?:\.[a-zA-Z]+)+) is set to (\d+)$`, SetCRDFieldMinValue) + + sc.Step(`^(?i)resource "([^"]+)" has no proxy environment variables$`, ResourceHasNoProxyEnvVars) + sc.Step(`^(?i)ClusterExtension revision manifests have no proxy environment variables$`, ClusterExtensionRevisionHasNoProxyEnvVars) + sc.Step(`^(?i)operator-controller has environment variable "([^"]+)" set to "([^"]+)"$`, OperatorControllerHasEnvVar) + sc.Step(`^(?i)resource "([^"]+)" has environment variable "([^"]+)" set to "([^"]+)"$`, ResourceHasEnvVar) + sc.Step(`^(?i)operator-controller has environment variable "([^"]+)" removed$`, OperatorControllerEnvVarRemoved) + sc.Step(`^(?i)resource "([^"]+)" has no environment variable "([^"]+)"$`, ResourceHasNoEnvVar) } func init() { @@ -352,6 +361,33 @@ func ClusterExtensionResourcesRemoved(ctx context.Context) error { return err } } + + // Wait for CRDs owned by this ClusterExtension to be fully deleted + // This is critical to avoid CRD ownership conflicts when running multiple test scenarios + // that install the same bundle (which creates the same cluster-scoped CRDs) + logger.Info("waiting for CRDs owned by ClusterExtension to be deleted", "clusterExtension", sc.clusterExtensionName) + + // Use a longer timeout for CRD deletion since finalizers and cleanup can take time + crdTimeout := 2 * timeout // Double the normal timeout for CRD deletion + require.Eventually(godog.T(ctx), func() bool { + // Get all CRDs that are owned by this ClusterExtension + output, err := k8sClient("get", "crd", "-l", fmt.Sprintf("olm.operatorframework.io/owner-name=%s", sc.clusterExtensionName), "-o", "jsonpath={.items[*].metadata.name}") + if err != nil { + logger.V(1).Info("error checking for CRDs", "error", err) + return false + } + + // If output is empty, all CRDs are deleted + crdNames := strings.TrimSpace(output) + if crdNames == "" { + logger.Info("all CRDs owned by ClusterExtension have been deleted", "clusterExtension", sc.clusterExtensionName) + return true + } + + logger.V(1).Info("still waiting for CRDs to be deleted", "crds", crdNames) + return false + }, crdTimeout, tick, "CRDs owned by ClusterExtension not deleted after timeout") + return nil } @@ -359,6 +395,10 @@ func waitFor(ctx context.Context, conditionFn func() bool) { require.Eventually(godog.T(ctx), conditionFn, timeout, tick) } +func waitForWithTimeout(ctx context.Context, conditionFn func() bool, customTimeout time.Duration) { + require.Eventually(godog.T(ctx), conditionFn, customTimeout, tick) +} + type msgMatchFn func(string) bool func alwaysMatch(_ string) bool { return true } @@ -394,6 +434,78 @@ func waitForExtensionCondition(ctx context.Context, conditionType, conditionStat return waitForCondition(ctx, "clusterextension", sc.clusterExtensionName, conditionType, conditionStatus, conditionReason, msgCmp) } +// waitForOperatorControllerStartup waits for the operator-controller pod to be Ready +// and for the startup hook to complete. This ensures the manager cache is synced +// and the controller is ready to handle new ClusterExtensions with the current configuration. +func waitForOperatorControllerStartup(ctx context.Context) { + // Wait for the operator-controller pod to be Running and Ready + var podName string + var podStartTime *metav1.Time + waitFor(ctx, func() bool { + raw, err := k8sClient("get", "pods", "-n", olmNamespace, "-l", "app.kubernetes.io/name=operator-controller", "-o", "json") + if err != nil { + return false + } + var podList corev1.PodList + if err := json.Unmarshal([]byte(raw), &podList); err != nil { + return false + } + if len(podList.Items) == 0 { + return false + } + + // Find a Running and Ready pod (preferably the newest by start time) + var readyPod *corev1.Pod + for i := range podList.Items { + pod := &podList.Items[i] + if pod.Status.Phase != corev1.PodRunning { + continue + } + + // Check if pod is Ready + isReady := false + for _, cond := range pod.Status.Conditions { + if cond.Type == corev1.PodReady && cond.Status == corev1.ConditionTrue { + isReady = true + break + } + } + + if isReady { + // Select this pod if we don't have one yet, or if it's newer than the current selection + if readyPod == nil || (pod.Status.StartTime != nil && readyPod.Status.StartTime != nil && pod.Status.StartTime.After(readyPod.Status.StartTime.Time)) { + readyPod = pod + } + } + } + + if readyPod != nil { + podName = readyPod.Name + podStartTime = readyPod.Status.StartTime + return true + } + return false + }) + + // Wait for the startup hook to complete + // The startup hook logs "triggered reconciliation for existing ClusterExtensions" after processing + // We check all logs since pod start to ensure we don't miss the message + waitFor(ctx, func() bool { + // Build kubectl logs arguments conditionally based on whether we have a start time + args := []string{"logs", "-n", olmNamespace, podName} + if podStartTime != nil { + args = append(args, "--since-time="+podStartTime.Format(time.RFC3339)) + } + + logs, err := k8sClient(args...) + if err != nil { + return false + } + // Look for the log message that indicates startup hook completion + return strings.Contains(logs, "triggered reconciliation for existing ClusterExtensions") + }) +} + func ClusterExtensionReportsCondition(ctx context.Context, conditionType, conditionStatus, conditionReason string, msg *godog.DocString) error { msgCmp := alwaysMatch if msg != nil { @@ -1168,3 +1280,419 @@ func latestActiveRevisionForExtension(extName string) (*ocv1.ClusterExtensionRev return latest, nil } + +// ResourceHasNoProxyEnvVars verifies that a deployment resource does NOT have proxy env vars +func ResourceHasNoProxyEnvVars(ctx context.Context, resource string) error { + sc := scenarioCtx(ctx) + resource = substituteScenarioVars(resource, sc) + rtype, name, found := strings.Cut(resource, "/") + if !found { + return fmt.Errorf("resource %s is not in the format /", resource) + } + + if rtype != "deployment" { + return fmt.Errorf("resource type %s is not supported for proxy env var checking, only deployment is supported", rtype) + } + + // Use longer timeout for deployment updates (boxcutter can take >10 minutes) + waitForWithTimeout(ctx, func() bool { + raw, err := k8sClient("get", rtype, name, "-n", sc.namespace, "-o", "json") + if err != nil { + return false + } + + // IMPORTANT: Create fresh struct on each iteration so old data doesn't persist + var dep appsv1.Deployment + if err := json.Unmarshal([]byte(raw), &dep); err != nil { + return false + } + + // Check that NO containers have proxy env vars + for _, container := range dep.Spec.Template.Spec.Containers { + for _, env := range container.Env { + if env.Name == "HTTP_PROXY" || env.Name == "HTTPS_PROXY" || env.Name == "NO_PROXY" { + return false + } + } + } + // Also check that NO init containers have proxy env vars + for _, container := range dep.Spec.Template.Spec.InitContainers { + for _, env := range container.Env { + if env.Name == "HTTP_PROXY" || env.Name == "HTTPS_PROXY" || env.Name == "NO_PROXY" { + return false + } + } + } + return true + }, deploymentUpdateTimeout) + + return nil +} + +// OperatorControllerHasEnvVar patches the operator-controller deployment to add an environment variable +func OperatorControllerHasEnvVar(ctx context.Context, envName, envValue string) error { + // Get the current deployment first + raw, err := k8sClient("get", "deployment", olmDeploymentName, "-n", olmNamespace, "-o", "json") + if err != nil { + return fmt.Errorf("failed to get operator-controller deployment: %w", err) + } + var dep appsv1.Deployment + if err := json.Unmarshal([]byte(raw), &dep); err != nil { + return fmt.Errorf("failed to unmarshal deployment: %w", err) + } + + // Check if the env var already exists + if len(dep.Spec.Template.Spec.Containers) > 0 { + for _, env := range dep.Spec.Template.Spec.Containers[0].Env { + if env.Name == envName { + if env.Value == envValue { + // Already has the correct value, no need to patch + return nil + } + // Env var exists with different value, need to update + break + } + } + } + + // Use strategic merge patch to add/update the env var + patchObj := map[string]interface{}{ + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "spec": map[string]interface{}{ + "containers": []map[string]interface{}{ + { + "name": "manager", + "env": []map[string]interface{}{ + { + "name": envName, + "value": envValue, + }, + }, + }, + }, + }, + }, + }, + } + patchBytes, err := json.Marshal(patchObj) + if err != nil { + return fmt.Errorf("failed to marshal deployment patch: %w", err) + } + patch := string(patchBytes) + _, err = k8sClient("patch", "deployment", olmDeploymentName, "-n", olmNamespace, "--type=strategic", "-p", patch) + if err != nil { + return fmt.Errorf("failed to patch operator-controller deployment: %w", err) + } + + // Wait for the deployment to roll out with the new env var + waitFor(ctx, func() bool { + raw, err := k8sClient("get", "deployment", olmDeploymentName, "-n", olmNamespace, "-o", "json") + if err != nil { + return false + } + var dep appsv1.Deployment + if err := json.Unmarshal([]byte(raw), &dep); err != nil { + return false + } + + // Check deployment is available + available := false + for _, cond := range dep.Status.Conditions { + if cond.Type == appsv1.DeploymentAvailable && cond.Status == corev1.ConditionTrue { + available = true + break + } + } + if !available { + return false + } + + // Check all replicas are updated + if dep.Status.UpdatedReplicas != dep.Status.Replicas { + return false + } + + // Check the env var is present in the first container (manager) + if len(dep.Spec.Template.Spec.Containers) == 0 { + return false + } + for _, env := range dep.Spec.Template.Spec.Containers[0].Env { + if env.Name == envName && env.Value == envValue { + return true + } + } + return false + }) + + // Wait for operator-controller to fully start up with the new configuration + waitForOperatorControllerStartup(ctx) + + return nil +} + +// ResourceHasEnvVar verifies that a deployment resource has a specific environment variable with a specific value +func ResourceHasEnvVar(ctx context.Context, resource, envName, envValue string) error { + sc := scenarioCtx(ctx) + resource = substituteScenarioVars(resource, sc) + rtype, name, found := strings.Cut(resource, "/") + if !found { + return fmt.Errorf("resource %s is not in the format /", resource) + } + + if rtype != "deployment" { + return fmt.Errorf("resource type %s is not supported for env var checking, only deployment is supported", rtype) + } + + waitFor(ctx, func() bool { + var dep appsv1.Deployment + raw, err := k8sClient("get", rtype, name, "-n", sc.namespace, "-o", "json") + if err != nil { + return false + } + if err := json.Unmarshal([]byte(raw), &dep); err != nil { + return false + } + + // Check that ALL containers have the env var with the correct value + if len(dep.Spec.Template.Spec.Containers) == 0 { + return false + } + for _, container := range dep.Spec.Template.Spec.Containers { + found := false + for _, env := range container.Env { + if env.Name == envName && env.Value == envValue { + found = true + break + } + } + if !found { + return false + } + } + // Also check that ALL init containers have the env var with the correct value + for _, container := range dep.Spec.Template.Spec.InitContainers { + found := false + for _, env := range container.Env { + if env.Name == envName && env.Value == envValue { + found = true + break + } + } + if !found { + return false + } + } + return true + }) + + return nil +} + +// OperatorControllerEnvVarRemoved removes an environment variable from the operator-controller deployment +func OperatorControllerEnvVarRemoved(ctx context.Context, envName string) error { + // Use kubectl set env to remove the environment variable by name + // This is more stable than JSON patch with array indices, which can break if env var ordering changes + _, err := k8sClient("set", "env", "deployment/"+olmDeploymentName, "-n", olmNamespace, envName+"-") + if err != nil { + return fmt.Errorf("failed to remove env var %s from operator-controller deployment: %w", envName, err) + } + + // Wait for the deployment to roll out without the env var + waitFor(ctx, func() bool { + raw, err := k8sClient("get", "deployment", olmDeploymentName, "-n", olmNamespace, "-o", "json") + if err != nil { + return false + } + var dep appsv1.Deployment + if err := json.Unmarshal([]byte(raw), &dep); err != nil { + return false + } + + // Check deployment is available + available := false + for _, cond := range dep.Status.Conditions { + if cond.Type == appsv1.DeploymentAvailable && cond.Status == corev1.ConditionTrue { + available = true + break + } + } + if !available { + return false + } + + // Check all replicas are updated + if dep.Status.UpdatedReplicas != dep.Status.Replicas { + return false + } + + // Check the env var is NOT present in the first container (manager) + if len(dep.Spec.Template.Spec.Containers) == 0 { + return false + } + for _, env := range dep.Spec.Template.Spec.Containers[0].Env { + if env.Name == envName { + return false + } + } + return true + }) + + // Wait for operator-controller to fully start up with the new configuration + waitForOperatorControllerStartup(ctx) + logger.Info("operator-controller startup hook completed, proxy config applied") + + // Wait for reconciliation to complete - the ClusterExtension should reach rolled out state + // with the new proxy configuration (or lack thereof) + return ClusterExtensionIsRolledOut(ctx) +} + +// ResourceHasNoEnvVar verifies that a deployment resource does NOT have a specific environment variable +func ResourceHasNoEnvVar(ctx context.Context, resource, envName string) error { + sc := scenarioCtx(ctx) + resource = substituteScenarioVars(resource, sc) + rtype, name, found := strings.Cut(resource, "/") + if !found { + return fmt.Errorf("resource %s is not in the format /", resource) + } + + if rtype != "deployment" { + return fmt.Errorf("resource type %s is not supported for env var checking, only deployment is supported", rtype) + } + + waitFor(ctx, func() bool { + var dep appsv1.Deployment + raw, err := k8sClient("get", rtype, name, "-n", sc.namespace, "-o", "json") + if err != nil { + return false + } + if err := json.Unmarshal([]byte(raw), &dep); err != nil { + return false + } + + // Check that NO containers have the env var + for _, container := range dep.Spec.Template.Spec.Containers { + for _, env := range container.Env { + if env.Name == envName { + return false + } + } + } + // Also check that NO init containers have the env var + for _, container := range dep.Spec.Template.Spec.InitContainers { + for _, env := range container.Env { + if env.Name == envName { + return false + } + } + } + return true + }) + + return nil +} + +// ClusterExtensionRevisionHasNoProxyEnvVars verifies that the active ClusterExtensionRevision +// manifests do NOT contain proxy environment variables in deployment resources +// NOTE: This step only works with BoxcutterRuntime enabled. When BoxcutterRuntime is disabled, +// Rukpak/Helm is used instead, which doesn't create ClusterExtensionRevisions. +func ClusterExtensionRevisionHasNoProxyEnvVars(ctx context.Context) error { + // Skip this check if BoxcutterRuntime is disabled + // When using Rukpak/Helm, there are no ClusterExtensionRevisions to check + if !featureGates[features.BoxcutterRuntime] { + logger.Info("Skipping ClusterExtensionRevision manifest check - BoxcutterRuntime is disabled") + return nil + } + + sc := scenarioCtx(ctx) + + var revisionName string + // Wait for active revision to be available - there might be a brief transition period + // where the ClusterExtension is updating and activeRevisions is temporarily empty + require.Eventually(godog.T(ctx), func() bool { + raw, err := k8sClient("get", "clusterextension", sc.clusterExtensionName, "-o", "jsonpath={.status.activeRevisions[0].name}") + if err != nil { + logger.V(1).Info("failed to get active revision name", "error", err) + return false + } + revisionName = strings.TrimSpace(raw) + return revisionName != "" + }, timeout, tick, "active revision not found after timeout") + + // Get the ClusterExtensionRevision + revRaw, err := k8sClient("get", "clusterextensionrevision", revisionName, "-o", "json") + if err != nil { + return fmt.Errorf("failed to get revision %s: %w", revisionName, err) + } + + var rev map[string]interface{} + if err := json.Unmarshal([]byte(revRaw), &rev); err != nil { + return fmt.Errorf("failed to unmarshal revision: %w", err) + } + + // Check phases for deployment resources + specVal, ok := rev["spec"] + if !ok { + return fmt.Errorf("revision %s has no spec field", revisionName) + } + + specMap, ok := specVal.(map[string]interface{}) + if !ok { + return fmt.Errorf("revision %s has unexpected spec structure (got %T)", revisionName, specVal) + } + + phasesVal, found := specMap["phases"] + if !found || phasesVal == nil { + return fmt.Errorf("no phases found in revision %s", revisionName) + } + + phases, ok := phasesVal.([]interface{}) + if !ok { + return fmt.Errorf("revision %s has phases field with unexpected type (got %T)", revisionName, phasesVal) + } + + for _, phase := range phases { + phaseMap, ok := phase.(map[string]interface{}) + if !ok { + return fmt.Errorf("revision %s has phase with unexpected structure (got %T)", revisionName, phase) + } + kindVal, ok := phaseMap["kind"].(string) + if !ok || kindVal != "Deployment" { + continue + } + + // Get manifests from this phase + manifestsVal, exists := phaseMap["manifests"] + if !exists || manifestsVal == nil { + continue + } + manifests, ok := manifestsVal.([]interface{}) + if !ok { + return fmt.Errorf("revision %s has manifests field with unexpected type (got %T)", revisionName, manifestsVal) + } + + for _, manifestData := range manifests { + // manifestData is base64-encoded YAML + manifestStr, ok := manifestData.(string) + if !ok { + return fmt.Errorf("unexpected manifest data type %T in revision %s", manifestData, revisionName) + } + + // Decode base64 + decoded, err := base64.StdEncoding.DecodeString(manifestStr) + if err != nil { + return fmt.Errorf("failed to base64 decode manifest in revision %s: %w", revisionName, err) + } + + // Check if the manifest contains proxy env vars + manifestYaml := string(decoded) + if strings.Contains(manifestYaml, "NO_PROXY") || + strings.Contains(manifestYaml, "HTTP_PROXY") || + strings.Contains(manifestYaml, "HTTPS_PROXY") { + return fmt.Errorf("revision %s contains proxy environment variables in manifests", revisionName) + } + } + } + + return nil +}