Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions coriolis/tests/integration/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,7 @@ def f(*args, **kwargs):
class ReplicaIntegrationTestBase(CoriolisIntegrationTestBase):

_CREATE_MINION_POOLS = False
_SCSI_DEBUG_SIZE_MB = 16

@classmethod
def setUpClass(cls):
Expand Down Expand Up @@ -280,6 +281,10 @@ def setUpClass(cls):
"Pool did not reach ALLOCATED (got %s)" % pool_obj.status,
)

# (re)init the scsi_debug module.
test_utils.destroy_scsi_debug()
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are we planning to switch to loopback devices btw? I think we'll need to in order to parallelize the tests.

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm afraid we'll be using those for now. loopback devices are fine on the importer (coriolis-writer doesn't care what type of device it writes to), but on the exporter, coriolis-replicator ignores loopback devices.

test_utils.init_scsi_debug(size_mb=cls._SCSI_DEBUG_SIZE_MB)

def setUp(self):
super().setUp()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

from coriolis import constants
from coriolis.tests.integration import base
from coriolis.tests.integration.providers.test_provider import imp
from coriolis.tests.integration.test_provider import imp


class ReplicaDeploymentIntegrationTest(base.ReplicaIntegrationTestBase):
Expand Down
42 changes: 42 additions & 0 deletions coriolis/tests/integration/deployments/test_osmorphing.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# Copyright 2026 Cloudbase Solutions Srl
# All Rights Reserved.

"""Integration tests for the OS morphing deployments.

Exercises deployments with skip_os_morphing=False, OS detection, and package
installation in the target OS.
"""

from coriolis.tests.integration import base as integration_base
from coriolis.tests.integration import utils as test_utils


class OsMorphingDeploymentTest(integration_base.ReplicaIntegrationTestBase):

# NOTE(claudiub): Size must be high enough to contain the tested OS and
# any new packages to be added during OS morphing.
_SCSI_DEBUG_SIZE_MB = 256

def setUp(self):
super().setUp()
test_utils.write_os_image_to_disk(self._src_device, "ubuntu:24.04")

def test_deployment_with_os_morphing(self):
self.assertFalse(
Copy link
Copy Markdown
Member

@petrutlucian94 petrutlucian94 May 15, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This feels a bit brittle. So if jq ever gets included in the Ubuntu image, this test will start to fail.

On the other hand, we can keep it for the sake of simplicity. We'll use a different package if that happens :).

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Agreed. In this example, we're basically using a container image, which is supposed to be a minimal base image to be further used for for other purposes; it shouldn't have a situational package as jq.

I've added this assertion as a sanity check, so we don't have a potentially false positive test (expect package to be installed by OS morphing, but it already existed anyways).

I'd keep jq for now, as it's very small. We can change it if needed later.

test_utils.path_exists_on_device(self._src_device, "usr/bin/jq"),
"jq was found on the source device before OS morphing",
)

self._execute_and_wait(self._transfer.id)

deployment = self._client.deployments.create_from_transfer(
self._transfer.id,
skip_os_morphing=False,
)
self.addCleanup(self._client.deployments.delete, deployment.id)

self.assertDeploymentCompleted(deployment.id)
self.assertTrue(
test_utils.path_exists_on_device(self._dst_device, "usr/bin/jq"),
"jq was not found on the destination device after OS morphing",
)
2 changes: 2 additions & 0 deletions coriolis/tests/integration/dockerfiles/data-minion/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,10 @@ FROM ubuntu:24.04

# dbus is required for systemd to fully manage units;
# sudo is used by replicator / writer setup.
# kmod is required during OS morphing (modprobe is being called).
RUN apt-get update && apt-get install -y --no-install-recommends \
dbus \
kmod \
openssh-server \
sudo \
systemd \
Expand Down
5 changes: 2 additions & 3 deletions coriolis/tests/integration/harness.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,10 +66,10 @@
# Dotted paths to the export (source) and import (destination) provider
# classes.
_TEST_EXPORT_PROVIDER = (
"coriolis.tests.integration.providers.test_provider.exp.TestExportProvider"
"coriolis.tests.integration.test_provider.exp.TestExportProvider"
)
_TEST_IMPORT_PROVIDER = (
"coriolis.tests.integration.providers.test_provider.imp.TestImportProvider"
"coriolis.tests.integration.test_provider.imp.TestImportProvider"
)

# Fixed project used for all test requests.
Expand Down Expand Up @@ -298,7 +298,6 @@ def __init__(self):
group='minion_manager')

coriolis_utils.setup_logging()
test_utils.init_scsi_debug()

# Policy enforcer: reset so it re-reads the new CONF (no policy file).
policy_module.reset()
Expand Down
Empty file.
2 changes: 1 addition & 1 deletion coriolis/tests/integration/test_failure_recovery.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from unittest import mock

from coriolis.tests.integration import base
from coriolis.tests.integration.providers.test_provider import imp
from coriolis.tests.integration.test_provider import imp


class TransferFailureIntegrationTest(base.ReplicaIntegrationTestBase):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
from coriolis.providers.base import BaseReplicaImportProvider
from coriolis.providers.base import BaseReplicaImportValidationProvider
from coriolis.providers.base import BaseUpdateDestinationReplicaProvider
from coriolis.tests.integration.test_provider import osmorphing
from coriolis.tests.integration import utils as test_utils
from coriolis import utils as coriolis_utils

Expand Down Expand Up @@ -155,8 +156,9 @@ def deploy_replica_disks(

def deploy_replica_target_resources(
self, ctxt, connection_info, target_environment, volumes_info):
devices = [vol["volume_dev"] for vol in volumes_info]
result = self._create_minion(
"coriolis-writer", connection_info, volumes_info)
"coriolis-writer", connection_info, devices)

return {
"volumes_info": volumes_info,
Expand All @@ -165,18 +167,18 @@ def deploy_replica_target_resources(
}

def _create_minion(
self, name_prefix, connection_info, volumes_info,
device_cgroup_rules=None):
self, name_prefix, connection_info, devices=None, volumes=None,
device_cgroup_rules=None, setup_writer=True):
pkey_path = connection_info["pkey_path"]
dest_devices = [vol["volume_dev"] for vol in volumes_info]
container_name = "%s-%s" % (name_prefix, uuid.uuid4().hex[:8])

container_id = test_utils.run_container(
test_utils.DATA_MINION_IMAGE,
container_name,
is_systemd=True,
ssh_key=f"{pkey_path}.pub",
devices=dest_devices,
devices=devices,
volumes=volumes,
device_cgroup_rules=device_cgroup_rules,
)

Expand All @@ -189,20 +191,23 @@ def _create_minion(
"ip": container_ip,
"port": 22,
"username": "root",
"pkey": pkey,
"pkey": coriolis_utils.serialize_key(pkey),
}
bootstrapper = backup_writers.HTTPBackupWriterBootstrapper(
ssh_conn_info, WRITER_TEST_PORT)
writer_conn_details = bootstrapper.setup_writer()

return {
info = {
"container_id": container_id,
"ssh_connection_info": ssh_conn_info,
"backup_writer_connection_info": {
}
if setup_writer:
bootstrapper = backup_writers.HTTPBackupWriterBootstrapper(
ssh_conn_info, WRITER_TEST_PORT)
writer_conn_details = bootstrapper.setup_writer()
info["backup_writer_connection_info"] = {
"backend": "http_backup_writer",
"connection_details": writer_conn_details,
},
}
}

return info
except Exception:
test_utils.remove_container(container_id)
raise
Expand Down Expand Up @@ -265,19 +270,47 @@ def cleanup_failed_replica_instance_deployment(
# BaseInstanceProvider

def get_os_morphing_tools(self, os_type, osmorphing_info):
return []
return osmorphing.OS_MORPHERS

# BaseImportInstanceProvider

def deploy_os_morphing_resources(
self, ctxt, connection_info, target_environment,
instance_deployment_info):
return {}
devices = list(target_environment.get("devices", []))

# lsblk inside the container sees all the host block devices because
# Docker containers share the host kernel's sysfs (/sys/block/).
# Populate ignore_devices with every host disk except the target
# so osmorphing only considers the devices we actually attached.
ignore_devices = list(
test_utils.get_host_disk_devices() - set(devices)
)

# Mount the host's /lib/modules tree so that modprobe can
# resolve built-in modules.
volumes = ["/lib/modules:/lib/modules:ro"]
result = self._create_minion(
"coriolis-osmorphing", connection_info, devices,
volumes, setup_writer=False,
)

return {
"os_morphing_resources": {"container_id": result["container_id"]},
"osmorphing_connection_info": result["ssh_connection_info"],
"osmorphing_info": {
"os_type": instance_deployment_info.get("os_type", "linux"),
"ignore_devices": ignore_devices,
},
}

def delete_os_morphing_resources(
self, ctxt, connection_info, target_environment,
os_morphing_resources):
pass
if os_morphing_resources:
container_id = os_morphing_resources.get("container_id")
if container_id:
test_utils.remove_container(container_id)

# BaseReplicaImportValidationProvider

Expand Down
10 changes: 10 additions & 0 deletions coriolis/tests/integration/test_provider/osmorphing/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# Copyright 2026 Cloudbase Solutions Srl
# All Rights Reserved.

from coriolis.osmorphing import base
from coriolis.tests.integration.test_provider.osmorphing import ubuntu


OS_MORPHERS: list[base.BaseLinuxOSMorphingTools] = [
ubuntu.TestUbuntuOSMorphingTools,
]
18 changes: 18 additions & 0 deletions coriolis/tests/integration/test_provider/osmorphing/ubuntu.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
# Copyright 2026 Cloudbase Solutions Srl
# All Rights Reserved.

"""
Ubuntu OS Morphing tools.
"""

from coriolis.osmorphing import ubuntu


class TestUbuntuOSMorphingTools(ubuntu.BaseUbuntuMorphingTools):
"""Ubuntu OSMorphing tools for integration tests."""

# Package meant to be installed during OS morphing.
# jq is a very small package which is not available by default.
_packages = {
None: [("jq", True)],
}
2 changes: 1 addition & 1 deletion coriolis/tests/integration/transfers/test_executions.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

from coriolis import constants
from coriolis.tests.integration import base
from coriolis.tests.integration.providers.test_provider import imp
from coriolis.tests.integration.test_provider import imp


class TransferExecutionsTests(base.ReplicaIntegrationTestBase):
Expand Down
73 changes: 68 additions & 5 deletions coriolis/tests/integration/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,12 @@
DATA_MINION_IMAGE = "coriolis-data-minion:test"


def get_host_disk_devices() -> set:
"""Return the /dev paths of disk-type block devices visible on the host."""
disk_names = _lsblk_disk_names()
return {"/dev/" + disk_name for disk_name in disk_names}


def _lsblk_disk_names() -> set:
"""Return the set of disk-type block device names visible to lsblk."""
result = _run(["lsblk", "-Jb", "-o", "NAME,TYPE"], check=False)
Expand Down Expand Up @@ -62,12 +68,13 @@ def _poll_for_new_disks(before, count, timeout=_SETTLE_TIMEOUT):
)


def init_scsi_debug(size_mb=64):
"""Load scsi_debug with per_host_store=1.
def init_scsi_debug(size_mb=16):
"""Load scsi_debug with per_host_store=1 and size_mb per device.

Must be called once per process before any ``add_scsi_debug_device``
calls. With ``per_host_store=1`` every host added via the sysfs knob
gets its own independent backing store, so devices never share storage.
Call ``destroy_scsi_debug`` first if the module is already loaded with a
different size. With ``per_host_store=1`` every host added via the sysfs
knob gets its own independent backing store, so devices never share
storage.
"""
_run([
"modprobe",
Expand Down Expand Up @@ -303,3 +310,59 @@ def unplug_device_from_container(container_id, device_path):
"nsenter", "--target", str(pid), "--mount", "--",
"rm", "-f", device_path,
], check=False)


# OS Morphing utils


def write_os_image_to_disk(device_path, container_image):
"""Write a real Linux rootfs to *device_path*.

Exports the filesystem of a container image via ``docker export`` and
extracts it onto an ext4-formatted device, giving a chroot-able root with
that container OS' standard filesystem and binaries present.
"""
_run(["mkfs.ext4", "-F", device_path])

result = _run(["docker", "create", container_image])
container_id = result.stdout.decode().strip()

try:
with tempfile.TemporaryDirectory() as mount_point:
_run(["mount", device_path, mount_point])

try:
export = subprocess.Popen(
["docker", "export", container_id],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
)
subprocess.run(
["tar", "-x", "-C", mount_point],
stdin=export.stdout,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
check=True,
)
export.stdout.close()
export.wait()
finally:
_run(["umount", mount_point])

finally:
_run(["docker", "rm", "-f", container_id], check=False)


def path_exists_on_device(device_path, rel_path):
"""Checks if *path* exists on the filesystem of *device_path*.

Mounts the device read-only into a temporary directory, checks for the
path, then unmounts.
"""
with tempfile.TemporaryDirectory() as mount_point:
_run(["mount", "-o", "ro", device_path, mount_point])

try:
return os.path.exists(os.path.join(mount_point, rel_path))
finally:
_run(["umount", mount_point])
Loading