Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -140,11 +140,13 @@ func (c *ClusterExtensionRevisionReconciler) reconcile(ctx context.Context, rev
return ctrl.Result{}, fmt.Errorf("converting to boxcutter revision: %v", err)
}

//
// Teardown
//
if !rev.DeletionTimestamp.IsZero() || rev.Spec.LifecycleState == ocv1.ClusterExtensionRevisionLifecycleStateArchived {
return c.teardown(ctx, rev)
return c.teardown(ctx, revision, rev)
}

revVersion := rev.GetAnnotations()[labels.BundleVersionKey]
//
// Reconcile
//
Expand Down Expand Up @@ -203,6 +205,7 @@ func (c *ClusterExtensionRevisionReconciler) reconcile(ctx context.Context, rev
}
}

revVersion := rev.GetAnnotations()[labels.BundleVersionKey]
if !rres.InTransition() {
markAsProgressing(rev, ocv1.ReasonSucceeded, fmt.Sprintf("Revision %s has rolled out.", revVersion))
} else {
Expand Down Expand Up @@ -275,18 +278,32 @@ func (c *ClusterExtensionRevisionReconciler) reconcile(ctx context.Context, rev
return ctrl.Result{}, nil
}

func (c *ClusterExtensionRevisionReconciler) teardown(ctx context.Context, rev *ocv1.ClusterExtensionRevision) (ctrl.Result, error) {
if err := c.TrackingCache.Free(ctx, rev); err != nil {
markAsAvailableUnknown(rev, ocv1.ClusterExtensionRevisionReasonReconciling, err.Error())
func (c *ClusterExtensionRevisionReconciler) teardown(ctx context.Context, revision *boxcutter.Revision, cer *ocv1.ClusterExtensionRevision) (ctrl.Result, error) {
if err := c.TrackingCache.Free(ctx, cer); err != nil {
markAsAvailableUnknown(cer, ocv1.ClusterExtensionRevisionReasonReconciling, err.Error())
return ctrl.Result{}, fmt.Errorf("error stopping informers: %v", err)
}

// Ensure conditions are set before removing the finalizer when archiving
if rev.Spec.LifecycleState == ocv1.ClusterExtensionRevisionLifecycleStateArchived && markAsArchived(rev) {
return ctrl.Result{}, nil
if cer.Spec.LifecycleState == ocv1.ClusterExtensionRevisionLifecycleStateArchived {
revisionEngine, err := c.RevisionEngineFactory.CreateRevisionEngine(ctx, cer)
if err != nil {
setRetryingConditions(cer, fmt.Sprintf("error archiving: %v", err))
return ctrl.Result{}, fmt.Errorf("failed to create revision engine: %v", err)
}
tdres, err := revisionEngine.Teardown(ctx, *revision)
if err != nil {
setRetryingConditions(cer, fmt.Sprintf("error archiving: %v", err))
return ctrl.Result{}, fmt.Errorf("error tearing down revision: %v", err)
}
if tdres != nil && !tdres.IsComplete() {
setRetryingConditions(cer, "tearing down revision")
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
}
// Ensure conditions are set before removing the finalizer when archiving
if markAsArchived(cer) {
return ctrl.Result{}, nil
}
}

if err := c.removeFinalizer(ctx, rev, clusterExtensionRevisionTeardownFinalizer); err != nil {
if err := c.removeFinalizer(ctx, cer, clusterExtensionRevisionTeardownFinalizer); err != nil {
return ctrl.Result{}, fmt.Errorf("error removing teardown finalizer: %v", err)
}
return ctrl.Result{}, nil
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -641,9 +641,11 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_Deletion(t *testing.T) {
existingObjs func() []client.Object
revisionResult machinery.RevisionResult
revisionEngineTeardownFn func(*testing.T) func(context.Context, machinerytypes.Revision, ...machinerytypes.RevisionTeardownOption) (machinery.RevisionTeardownResult, error)
revisionEngineFactoryErr error
validate func(*testing.T, client.Client)
trackingCacheFreeFn func(context.Context, client.Object) error
expectedErr string
expectedResult ctrl.Result
}{
{
name: "teardown finalizer is removed",
Expand Down Expand Up @@ -775,6 +777,109 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_Deletion(t *testing.T) {
require.Equal(t, int64(1), cond.ObservedGeneration)
},
},
{
name: "set Progressing:True:Retrying and requeue when archived revision teardown is incomplete",
revisionResult: mockRevisionResult{},
existingObjs: func() []client.Object {
ext := newTestClusterExtension()
rev1 := newTestClusterExtensionRevision(t, clusterExtensionRevisionName, ext, testScheme)
rev1.Finalizers = []string{
"olm.operatorframework.io/teardown",
}
rev1.Spec.LifecycleState = ocv1.ClusterExtensionRevisionLifecycleStateArchived
return []client.Object{rev1, ext}
},
revisionEngineTeardownFn: func(t *testing.T) func(ctx context.Context, rev machinerytypes.Revision, opts ...machinerytypes.RevisionTeardownOption) (machinery.RevisionTeardownResult, error) {
return func(ctx context.Context, rev machinerytypes.Revision, opts ...machinerytypes.RevisionTeardownOption) (machinery.RevisionTeardownResult, error) {
return &mockRevisionTeardownResult{
isComplete: false,
}, nil
}
},
expectedResult: ctrl.Result{RequeueAfter: 5 * time.Second},
validate: func(t *testing.T, c client.Client) {
rev := &ocv1.ClusterExtensionRevision{}
err := c.Get(t.Context(), client.ObjectKey{
Name: clusterExtensionRevisionName,
}, rev)
require.NoError(t, err)
cond := meta.FindStatusCondition(rev.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing)
require.NotNil(t, cond)
require.Equal(t, metav1.ConditionTrue, cond.Status)
require.Equal(t, ocv1.ClusterExtensionRevisionReasonRetrying, cond.Reason)
require.Equal(t, "tearing down revision", cond.Message)

// Finalizer should still be present
require.Contains(t, rev.Finalizers, "olm.operatorframework.io/teardown")
},
},
{
name: "return error and set retrying conditions when archived revision teardown fails",
revisionResult: mockRevisionResult{},
existingObjs: func() []client.Object {
ext := newTestClusterExtension()
rev1 := newTestClusterExtensionRevision(t, clusterExtensionRevisionName, ext, testScheme)
rev1.Finalizers = []string{
"olm.operatorframework.io/teardown",
}
rev1.Spec.LifecycleState = ocv1.ClusterExtensionRevisionLifecycleStateArchived
return []client.Object{rev1, ext}
},
revisionEngineTeardownFn: func(t *testing.T) func(ctx context.Context, rev machinerytypes.Revision, opts ...machinerytypes.RevisionTeardownOption) (machinery.RevisionTeardownResult, error) {
return func(ctx context.Context, rev machinerytypes.Revision, opts ...machinerytypes.RevisionTeardownOption) (machinery.RevisionTeardownResult, error) {
return nil, fmt.Errorf("teardown failed: connection refused")
}
},
expectedErr: "error tearing down revision",
validate: func(t *testing.T, c client.Client) {
rev := &ocv1.ClusterExtensionRevision{}
err := c.Get(t.Context(), client.ObjectKey{
Name: clusterExtensionRevisionName,
}, rev)
require.NoError(t, err)
cond := meta.FindStatusCondition(rev.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing)
require.NotNil(t, cond)
require.Equal(t, metav1.ConditionTrue, cond.Status)
require.Equal(t, ocv1.ClusterExtensionRevisionReasonRetrying, cond.Reason)
require.Contains(t, cond.Message, "teardown failed: connection refused")

// Finalizer should still be present
require.Contains(t, rev.Finalizers, "olm.operatorframework.io/teardown")
},
},
{
name: "return error and set retrying conditions when factory fails to create engine during archived teardown",
revisionResult: mockRevisionResult{},
existingObjs: func() []client.Object {
ext := newTestClusterExtension()
rev1 := newTestClusterExtensionRevision(t, clusterExtensionRevisionName, ext, testScheme)
rev1.Finalizers = []string{
"olm.operatorframework.io/teardown",
}
rev1.Spec.LifecycleState = ocv1.ClusterExtensionRevisionLifecycleStateArchived
return []client.Object{rev1, ext}
},
revisionEngineTeardownFn: func(t *testing.T) func(ctx context.Context, rev machinerytypes.Revision, opts ...machinerytypes.RevisionTeardownOption) (machinery.RevisionTeardownResult, error) {
return nil
},
revisionEngineFactoryErr: fmt.Errorf("token getter failed"),
expectedErr: "failed to create revision engine",
validate: func(t *testing.T, c client.Client) {
rev := &ocv1.ClusterExtensionRevision{}
err := c.Get(t.Context(), client.ObjectKey{
Name: clusterExtensionRevisionName,
}, rev)
require.NoError(t, err)
cond := meta.FindStatusCondition(rev.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing)
require.NotNil(t, cond)
require.Equal(t, metav1.ConditionTrue, cond.Status)
require.Equal(t, ocv1.ClusterExtensionRevisionReasonRetrying, cond.Reason)
require.Contains(t, cond.Message, "token getter failed")

// Finalizer should still be present
require.Contains(t, rev.Finalizers, "olm.operatorframework.io/teardown")
},
},
{
name: "revision is torn down when in archived state and finalizer is removed",
revisionResult: mockRevisionResult{},
Expand Down Expand Up @@ -833,9 +938,10 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_Deletion(t *testing.T) {
},
teardown: tc.revisionEngineTeardownFn(t),
}
factory := &mockRevisionEngineFactory{engine: mockEngine, createErr: tc.revisionEngineFactoryErr}
result, err := (&controllers.ClusterExtensionRevisionReconciler{
Client: testClient,
RevisionEngineFactory: &mockRevisionEngineFactory{engine: mockEngine},
RevisionEngineFactory: factory,
TrackingCache: &mockTrackingCache{
client: testClient,
freeFn: tc.trackingCacheFreeFn,
Expand All @@ -847,7 +953,7 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_Deletion(t *testing.T) {
})

// reconcile cluster extension revision
require.Equal(t, ctrl.Result{}, result)
require.Equal(t, tc.expectedResult, result)
if tc.expectedErr != "" {
require.Contains(t, err.Error(), tc.expectedErr)
} else {
Expand Down
4 changes: 3 additions & 1 deletion test/e2e/features/update.feature
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ Feature: Update ClusterExtension
Then bundle "test-operator.1.3.0" is installed in version "1.3.0"

@BoxcutterRuntime
Scenario: Each update creates a new revision
Scenario: Each update creates a new revision and resources not present in the new revision are removed from the cluster
Given ClusterExtension is applied
"""
apiVersion: olm.operatorframework.io/v1
Expand Down Expand Up @@ -212,6 +212,8 @@ Feature: Update ClusterExtension
And ClusterExtensionRevision "${NAME}-2" reports Progressing as True with Reason Succeeded
And ClusterExtensionRevision "${NAME}-2" reports Available as True with Reason ProbesSucceeded
And ClusterExtensionRevision "${NAME}-1" is archived
# dummy-config map exists only in v1.0.0 - once the v1.0.0 revision is archived, it should be gone from the cluster
And resource "configmap/dummy-configmap" is eventually not found

@BoxcutterRuntime
Scenario: Report all active revisions on ClusterExtension
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: dummy-configmap
data:
why: "this config map does not exist in higher versions of the bundle - it is useful to test whether resources removed between versions are removed from the cluster as well"
Loading