Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
77 changes: 77 additions & 0 deletions test/e2e/features/user-managed-fields.feature
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
@BoxcutterRuntime
Feature: Preserve user-managed fields on deployed resources
OLM uses Server-Side Apply with specific field ownership. Fields that OLM does
not declare ownership of (e.g. user-applied annotations and labels) belong to
other managers and must be preserved across reconciliation cycles.
Related: https://github.com/operator-framework/operator-lifecycle-manager/issues/3392

Background:
Given OLM is available
And ClusterCatalog "test" serves bundles
And ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE}

Scenario: User-added annotations and labels coexist with bundle-defined labels after reconciliation
When ClusterExtension is applied
"""
apiVersion: olm.operatorframework.io/v1
kind: ClusterExtension
metadata:
name: ${NAME}
spec:
namespace: ${TEST_NAMESPACE}
serviceAccount:
name: olm-sa
source:
sourceType: Catalog
catalog:
packageName: test
selector:
matchLabels:
"olm.operatorframework.io/metadata.name": test-catalog
"""
Then ClusterExtension is rolled out
And ClusterExtension is available
And resource "deployment/test-operator" is available
# The bundle defines labels on the deployment via the CSV spec; verify they are present
And resource "deployment/test-operator" has label "app.kubernetes.io/name" with value "test-operator"
When user adds annotation "example.com/custom-annotation=my-value" to "deployment/test-operator"
And user adds label "example.com/custom-label=my-value" to "deployment/test-operator"
Then resource "deployment/test-operator" has annotation "example.com/custom-annotation" with value "my-value"
And resource "deployment/test-operator" has label "example.com/custom-label" with value "my-value"
When ClusterExtension reconciliation is triggered
And ClusterExtension has been reconciled the latest generation
Then resource "deployment/test-operator" has annotation "example.com/custom-annotation" with value "my-value"
And resource "deployment/test-operator" has label "example.com/custom-label" with value "my-value"
# Bundle-defined labels must still be intact after reconciliation
And resource "deployment/test-operator" has label "app.kubernetes.io/name" with value "test-operator"

Scenario: Deployment rollout restart persists after OLM reconciliation
When ClusterExtension is applied
"""
apiVersion: olm.operatorframework.io/v1
kind: ClusterExtension
metadata:
name: ${NAME}
spec:
namespace: ${TEST_NAMESPACE}
serviceAccount:
name: olm-sa
source:
sourceType: Catalog
catalog:
packageName: test
selector:
matchLabels:
"olm.operatorframework.io/metadata.name": test-catalog
"""
Then ClusterExtension is rolled out
And ClusterExtension is available
And resource "deployment/test-operator" is available
When user performs rollout restart on "deployment/test-operator"
Then deployment "test-operator" has restart annotation
And deployment "test-operator" rollout is complete
And deployment "test-operator" has 2 replica sets
When ClusterExtension reconciliation is triggered
And ClusterExtension has been reconciled the latest generation
Then deployment "test-operator" has restart annotation
And deployment "test-operator" rollout is complete
306 changes: 306 additions & 0 deletions test/e2e/steps/steps.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,15 @@ func RegisterSteps(sc *godog.ScenarioContext) {
sc.Step(`^(?i)resource apply fails with error msg containing "([^"]+)"$`, ResourceApplyFails)
sc.Step(`^(?i)resource "([^"]+)" is eventually restored$`, ResourceRestored)
sc.Step(`^(?i)resource "([^"]+)" matches$`, ResourceMatches)
sc.Step(`^(?i)user performs rollout restart on "([^"]+)"$`, UserPerformsRolloutRestart)
sc.Step(`^(?i)user adds annotation "([^"]+)" to "([^"]+)"$`, UserAddsAnnotation)
sc.Step(`^(?i)user adds label "([^"]+)" to "([^"]+)"$`, UserAddsLabel)
sc.Step(`^(?i)resource "([^"]+)" has annotation "([^"]+)" with value "([^"]+)"$`, ResourceHasAnnotation)
sc.Step(`^(?i)resource "([^"]+)" has label "([^"]+)" with value "([^"]+)"$`, ResourceHasLabel)
sc.Step(`^(?i)deployment "([^"]+)" has restart annotation$`, DeploymentHasRestartAnnotation)
sc.Step(`^(?i)deployment "([^"]+)" rollout is complete$`, DeploymentRolloutIsComplete)
sc.Step(`^(?i)deployment "([^"]+)" has (\d+) replica sets?$`, DeploymentHasReplicaSets)
sc.Step(`^(?i)ClusterExtension reconciliation is triggered$`, TriggerClusterExtensionReconciliation)

sc.Step(`^(?i)ServiceAccount "([^"]*)" with needed permissions is available in test namespace$`, ServiceAccountWithNeededPermissionsIsAvailableInNamespace)
sc.Step(`^(?i)ServiceAccount "([^"]*)" with needed permissions is available in \${TEST_NAMESPACE}$`, ServiceAccountWithNeededPermissionsIsAvailableInNamespace)
Expand Down Expand Up @@ -1309,3 +1318,300 @@ func latestActiveRevisionForExtension(extName string) (*ocv1.ClusterExtensionRev

return latest, nil
}

// UserAddsAnnotation adds a custom annotation to a resource using kubectl annotate.
func UserAddsAnnotation(ctx context.Context, annotation, resourceName string) error {
sc := scenarioCtx(ctx)
resourceName = substituteScenarioVars(resourceName, sc)

kind, name, ok := strings.Cut(resourceName, "/")
if !ok {
return fmt.Errorf("invalid resource name format: %q (expected kind/name)", resourceName)
}

out, err := k8sClient("annotate", kind, name, annotation, "--overwrite", "-n", sc.namespace)
if err != nil {
return fmt.Errorf("failed to annotate %s: %w; stderr: %s", resourceName, err, stderrOutput(err))
}
logger.V(1).Info("Annotation added", "resource", resourceName, "annotation", annotation, "output", out)
return nil
}

// UserAddsLabel adds a custom label to a resource using kubectl label.
func UserAddsLabel(ctx context.Context, label, resourceName string) error {
sc := scenarioCtx(ctx)
resourceName = substituteScenarioVars(resourceName, sc)

kind, name, ok := strings.Cut(resourceName, "/")
if !ok {
return fmt.Errorf("invalid resource name format: %q (expected kind/name)", resourceName)
}

out, err := k8sClient("label", kind, name, label, "--overwrite", "-n", sc.namespace)
if err != nil {
return fmt.Errorf("failed to label %s: %w; stderr: %s", resourceName, err, stderrOutput(err))
}
logger.V(1).Info("Label added", "resource", resourceName, "label", label, "output", out)
return nil
}

// ResourceHasAnnotation waits for a resource to have the given annotation key with the expected value.
func ResourceHasAnnotation(ctx context.Context, resourceName, annotationKey, expectedValue string) error {
sc := scenarioCtx(ctx)
resourceName = substituteScenarioVars(resourceName, sc)

kind, name, ok := strings.Cut(resourceName, "/")
if !ok {
return fmt.Errorf("invalid resource name format: %q (expected kind/name)", resourceName)
}

waitFor(ctx, func() bool {
out, err := k8sClient("get", kind, name, "-n", sc.namespace, "-o", "json")
if err != nil {
return false
}
var obj unstructured.Unstructured
if err := json.Unmarshal([]byte(out), &obj); err != nil {
return false
}
annotations := obj.GetAnnotations()
if v, found := annotations[annotationKey]; found && v == expectedValue {
logger.V(1).Info("Annotation found", "resource", resourceName, "key", annotationKey, "value", v)
return true
}
logger.V(1).Info("Annotation not yet present or value mismatch", "resource", resourceName, "key", annotationKey, "annotations", annotations)
return false
})
return nil
}

// ResourceHasLabel waits for a resource to have the given label key with the expected value.
func ResourceHasLabel(ctx context.Context, resourceName, labelKey, expectedValue string) error {
sc := scenarioCtx(ctx)
resourceName = substituteScenarioVars(resourceName, sc)

kind, name, ok := strings.Cut(resourceName, "/")
if !ok {
return fmt.Errorf("invalid resource name format: %q (expected kind/name)", resourceName)
}

waitFor(ctx, func() bool {
out, err := k8sClient("get", kind, name, "-n", sc.namespace, "-o", "json")
if err != nil {
return false
}
var obj unstructured.Unstructured
if err := json.Unmarshal([]byte(out), &obj); err != nil {
return false
}
labels := obj.GetLabels()
if v, found := labels[labelKey]; found && v == expectedValue {
logger.V(1).Info("Label found", "resource", resourceName, "key", labelKey, "value", v)
return true
}
logger.V(1).Info("Label not yet present or value mismatch", "resource", resourceName, "key", labelKey, "labels", labels)
return false
})
return nil
}

// UserPerformsRolloutRestart simulates a user running "kubectl rollout restart deployment/<name>".
// See: https://github.com/operator-framework/operator-lifecycle-manager/issues/3392
func UserPerformsRolloutRestart(ctx context.Context, resourceName string) error {
sc := scenarioCtx(ctx)
resourceName = substituteScenarioVars(resourceName, sc)

kind, deploymentName, ok := strings.Cut(resourceName, "/")
if !ok {
return fmt.Errorf("invalid resource name format: %q (expected kind/name)", resourceName)
}

if kind != "deployment" {
return fmt.Errorf("only deployment resources are supported for restart annotation, got: %q", kind)
}

// Run kubectl rollout restart to add the restart annotation.
// This is the real command users run, so we test actual user behavior.
out, err := k8sClient("rollout", "restart", resourceName, "-n", sc.namespace)
if err != nil {
return fmt.Errorf("failed to rollout restart %s: %w; stderr: %s", resourceName, err, stderrOutput(err))
}

logger.V(1).Info("Rollout restart initiated", "deployment", deploymentName, "output", out)

return nil
}

// DeploymentHasRestartAnnotation waits for the deployment's pod template to have
// the kubectl.kubernetes.io/restartedAt annotation. Uses JSON parsing to avoid
// JSONPath issues with dots in annotation keys. Polls with timeout.
func DeploymentHasRestartAnnotation(ctx context.Context, deploymentName string) error {
sc := scenarioCtx(ctx)
deploymentName = substituteScenarioVars(deploymentName, sc)

restartAnnotationKey := "kubectl.kubernetes.io/restartedAt"
waitFor(ctx, func() bool {
out, err := k8sClient("get", "deployment", deploymentName, "-n", sc.namespace, "-o", "json")
if err != nil {
return false
}
var d appsv1.Deployment
if err := json.Unmarshal([]byte(out), &d); err != nil {
return false
}
if v, found := d.Spec.Template.Annotations[restartAnnotationKey]; found {
logger.V(1).Info("Restart annotation found", "deployment", deploymentName, "restartedAt", v)
return true
}
logger.V(1).Info("Restart annotation not yet present", "deployment", deploymentName, "annotations", d.Spec.Template.Annotations)
return false
})
return nil
}

// TriggerClusterExtensionReconciliation patches the ClusterExtension spec to bump
// its metadata generation, forcing the controller to run a full reconciliation loop.
// Use with "ClusterExtension has been reconciled the latest generation" to confirm
// the controller processed the change before asserting on the cluster state.
//
// We flip install.preflight.crdUpgradeSafety.enforcement between "None" and "Strict"
// because it is a real spec field that the API server will persist (unlike unknown
// fields, which are pruned by structural schemas). Toggling ensures that each call
// results in a spec change, reliably bumping .metadata.generation.
func TriggerClusterExtensionReconciliation(ctx context.Context) error {
sc := scenarioCtx(ctx)

out, err := k8sClient("get", "clusterextension", sc.clusterExtensionName, "-o", "json")
if err != nil {
return fmt.Errorf("failed to get ClusterExtension %s: %w; stderr: %s", sc.clusterExtensionName, err, stderrOutput(err))
}

var obj map[string]interface{}
if err := json.Unmarshal([]byte(out), &obj); err != nil {
return fmt.Errorf("failed to unmarshal ClusterExtension %s JSON: %w", sc.clusterExtensionName, err)
}

currentEnforcement := ""
if spec, ok := obj["spec"].(map[string]interface{}); ok {
if install, ok := spec["install"].(map[string]interface{}); ok {
if preflight, ok := install["preflight"].(map[string]interface{}); ok {
if crdUpgradeSafety, ok := preflight["crdUpgradeSafety"].(map[string]interface{}); ok {
if v, ok := crdUpgradeSafety["enforcement"].(string); ok {
currentEnforcement = v
}
}
}
}
}

newEnforcement := "None"
if currentEnforcement == "None" {
newEnforcement = "Strict"
}

payload := fmt.Sprintf(`{"spec":{"install":{"preflight":{"crdUpgradeSafety":{"enforcement":%q}}}}}`, newEnforcement)
_, err = k8sClient("patch", "clusterextension", sc.clusterExtensionName,
"--type=merge",
"-p", payload)
if err != nil {
return fmt.Errorf("failed to trigger reconciliation for ClusterExtension %s: %w; stderr: %s", sc.clusterExtensionName, err, stderrOutput(err))
}
return nil
}

// DeploymentRolloutIsComplete verifies that a deployment rollout has completed successfully.
// This ensures the new ReplicaSet is fully scaled up and the old one is scaled down.
func DeploymentRolloutIsComplete(ctx context.Context, deploymentName string) error {
sc := scenarioCtx(ctx)
deploymentName = substituteScenarioVars(deploymentName, sc)

waitFor(ctx, func() bool {
out, err := k8sClient("rollout", "status", "deployment/"+deploymentName, "-n", sc.namespace, "--watch=false")
if err != nil {
logger.V(1).Info("Failed to get rollout status", "deployment", deploymentName, "error", err)
return false
}
// Successful rollout shows "successfully rolled out"
if strings.Contains(out, "successfully rolled out") {
logger.V(1).Info("Rollout completed successfully", "deployment", deploymentName)
return true
}
logger.V(1).Info("Rollout not yet complete", "deployment", deploymentName, "status", out)
return false
})
return nil
}

// DeploymentHasReplicaSets verifies that a deployment has the expected number of ReplicaSets
// and that at least one owned ReplicaSet is active with pods running.
func DeploymentHasReplicaSets(ctx context.Context, deploymentName string, expectedCountStr string) error {
sc := scenarioCtx(ctx)
deploymentName = substituteScenarioVars(deploymentName, sc)

expectedCount := 2 // Default to 2 (original + restarted)
if n, err := fmt.Sscanf(expectedCountStr, "%d", &expectedCount); err != nil || n != 1 {
logger.V(1).Info("Failed to parse expected count, using default", "input", expectedCountStr, "default", 2)
expectedCount = 2
}

waitFor(ctx, func() bool {
deploymentOut, err := k8sClient("get", "deployment", deploymentName, "-n", sc.namespace, "-o", "json")
if err != nil {
logger.V(1).Info("Failed to get deployment", "deployment", deploymentName, "error", err)
return false
}

var deployment appsv1.Deployment
if err := json.Unmarshal([]byte(deploymentOut), &deployment); err != nil {
logger.V(1).Info("Failed to parse deployment", "error", err)
return false
}

out, err := k8sClient("get", "rs", "-n", sc.namespace, "-o", "json")
if err != nil {
logger.V(1).Info("Failed to get ReplicaSets", "deployment", deploymentName, "error", err)
return false
}

var allRsList struct {
Items []appsv1.ReplicaSet `json:"items"`
}
if err := json.Unmarshal([]byte(out), &allRsList); err != nil {
logger.V(1).Info("Failed to parse ReplicaSets", "error", err)
return false
}

var rsList []appsv1.ReplicaSet
for _, rs := range allRsList.Items {
for _, owner := range rs.OwnerReferences {
if owner.Kind == "Deployment" && owner.UID == deployment.UID {
rsList = append(rsList, rs)
break
}
}
}

if len(rsList) != expectedCount {
logger.V(1).Info("ReplicaSet count does not match expected value yet", "deployment", deploymentName, "current", len(rsList), "expected", expectedCount)
return false
}

// Verify at least one ReplicaSet has active replicas
hasActiveRS := false
for _, rs := range rsList {
if rs.Status.Replicas > 0 && rs.Status.ReadyReplicas > 0 {
hasActiveRS = true
logger.V(1).Info("Found active ReplicaSet", "name", rs.Name, "replicas", rs.Status.Replicas, "ready", rs.Status.ReadyReplicas)
}
}

if !hasActiveRS {
logger.V(1).Info("No active ReplicaSet found yet", "deployment", deploymentName)
return false
}

logger.V(1).Info("ReplicaSet verification passed", "deployment", deploymentName, "count", len(rsList))
return true
})
return nil
}
Loading