From e8e5820eaaf1809a5cef07b63a867b75066cd0f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Jandre?= Date: Fri, 30 May 2025 17:54:17 -0300 Subject: [PATCH 1/2] KNIB --- agent/conf/agent.properties | 4 + .../agent/properties/AgentProperties.java | 3 +- .../cloud/agent/api/to/DataObjectType.java | 2 +- .../com/cloud/hypervisor/HypervisorGuru.java | 5 +- .../main/java/com/cloud/storage/Volume.java | 11 +- .../com/cloud/storage/VolumeApiService.java | 2 +- .../java/com/cloud/vm/VirtualMachine.java | 18 +- .../java/com/cloud/vm/VmDetailConstants.java | 3 + .../apache/cloudstack/api/ApiConstants.java | 26 + .../cloudstack/api/ResponseGenerator.java | 4 + .../command/user/backup/CreateBackupCmd.java | 10 + .../user/backup/CreateBackupScheduleCmd.java | 10 + .../backup/ListBackupCompressionJobsCmd.java | 104 + .../command/user/backup/RestoreBackupCmd.java | 20 +- ...storeVolumeFromBackupAndAttachToVMCmd.java | 19 +- .../CreateNativeBackupOfferingCmd.java | 126 + .../DeleteNativeBackupOfferingCmd.java | 74 + .../ListNativeBackupOfferingsCmd.java | 105 + .../user/vm/CreateVMFromBackupCmd.java | 9 + .../BackupCompressionJobResponse.java | 79 + .../api/response/BackupResponse.java | 24 + .../api/response/BackupScheduleResponse.java | 8 + .../NativeBackupOfferingResponse.java | 80 + .../org/apache/cloudstack/backup/Backup.java | 13 +- .../cloudstack/backup/BackupManager.java | 14 +- .../cloudstack/backup/BackupProvider.java | 17 +- .../cloudstack/backup/BackupSchedule.java | 1 + .../backup/NativeBackupOffering.java | 37 + .../backup/NativeBackupOfferingService.java | 29 + .../backup/NativeBackupProvider.java | 123 + .../backup/NativeBackupService.java | 42 + .../apache/cloudstack/query/QueryService.java | 8 + .../secstorage/heuristics/HeuristicType.java | 4 +- client/pom.xml | 5 + ...ackupsBetweenSecondaryStoragesCommand.java | 41 + ...igrateBetweenSecondaryStoragesCommand.java | 48 + ...BetweenSecondaryStoragesCommandAnswer.java | 41 + .../MergeDiskOnlyVmSnapshotCommand.java | 16 +- .../storage/resource/StorageProcessor.java | 4 + .../StorageSubsystemCommandHandlerBase.java | 2 + .../backup/CompressBackupCommand.java | 78 + .../backup/ConsolidateVolumesAnswer.java | 37 + .../backup/ConsolidateVolumesCommand.java | 56 + .../FinalizeBackupCompressionCommand.java | 49 + .../backup/RestoreKnibBackupAnswer.java | 41 + .../backup/RestoreKnibBackupCommand.java | 66 + .../backup/TakeKnibBackupAnswer.java | 59 + .../backup/TakeKnibBackupCommand.java | 92 + .../storage/command/BackupDeleteAnswer.java | 36 + .../storage/command/DeleteCommand.java | 10 + .../command/RevertSnapshotCommand.java | 10 + .../cloudstack/storage/to/BackupDeltaTO.java | 87 + .../storage/to/DeltaMergeTreeTO.java} | 16 +- .../apache/cloudstack/storage/to/KnibTO.java | 67 + .../cloudstack/storage/to/VolumeObjectTO.java | 3 +- ...e-lifecycle-backup-context-inheritable.xml | 5 + .../spring-core-registry-core-context.xml | 4 + .../service/VolumeOrchestrationService.java | 2 +- .../com/cloud/storage/StorageManager.java | 3 + .../java/com/cloud/vm/VmWorkDeleteBackup.java | 38 + .../com/cloud/vm/VmWorkRestoreBackup.java | 45 + .../VmWorkRestoreVolumeBackupAndAttach.java | 55 + .../java/com/cloud/vm/VmWorkTakeBackup.java | 50 + .../cloud/vm/VirtualMachineManagerImpl.java | 9 +- .../orchestration/DataMigrationUtility.java | 89 +- .../orchestration/StorageOrchestrator.java | 148 +- .../orchestration/VolumeOrchestrator.java | 25 +- ...ring-engine-orchestration-core-context.xml | 1 + .../main/java/com/cloud/host/dao/HostDao.java | 4 + .../java/com/cloud/host/dao/HostDaoImpl.java | 18 + .../java/com/cloud/storage/SnapshotVO.java | 4 + .../cloud/storage/dao/SnapshotDaoImpl.java | 98 +- .../backup/BackupCompressionJobType.java | 21 + .../backup/BackupCompressionJobVO.java | 166 ++ .../cloudstack/backup/BackupScheduleVO.java | 16 +- .../apache/cloudstack/backup/BackupVO.java | 46 +- .../backup/NativeBackupDataStoreVO.java | 95 + .../cloudstack/backup/NativeBackupJoinVO.java | 190 ++ .../backup/NativeBackupOfferingVO.java | 175 ++ .../backup/NativeBackupStoragePoolVO.java | 101 + .../backup/dao/BackupCompressionJobDao.java | 35 + .../dao/BackupCompressionJobDaoImpl.java | 99 + .../cloudstack/backup/dao/BackupDaoImpl.java | 2 + .../backup/dao/BackupDetailsDao.java | 11 + .../backup/dao/BackupDetailsDaoImpl.java | 26 + .../backup/dao/BackupScheduleDaoImpl.java | 1 + .../backup/dao/NativeBackupDataStoreDao.java | 33 + .../dao/NativeBackupDataStoreDaoImpl.java | 74 + .../backup/dao/NativeBackupJoinDao.java | 40 + .../backup/dao/NativeBackupJoinDaoImpl.java | 129 + .../backup/dao/NativeBackupOfferingDao.java | 23 + .../dao/NativeBackupOfferingDaoImpl.java | 23 + .../dao/NativeBackupStoragePoolDao.java | 33 + .../dao/NativeBackupStoragePoolDaoImpl.java | 70 + .../datastore/db/SnapshotDataStoreDao.java | 2 + .../db/SnapshotDataStoreDaoImpl.java | 33 +- ...spring-engine-schema-core-daos-context.xml | 5 + .../META-INF/db/schema-42210to42300.sql | 65 + .../db/views/cloud.native_backup_view.sql | 47 + .../StorageSystemDataMotionStrategy.java | 5 + .../storage/snapshot/SnapshotServiceImpl.java | 4 + ...KvmFileBasedStorageVmSnapshotStrategy.java | 182 +- .../vmsnapshot/StorageVMSnapshotStrategy.java | 16 + .../vmsnapshot/VMSnapshotStrategyKVMTest.java | 6 + .../storage/backup/BackupObject.java | 198 ++ .../storage/helper/VMSnapshotHelperImpl.java | 43 +- .../storage/vmsnapshot/VMSnapshotHelper.java | 3 + .../storage/volume/VolumeServiceImpl.java | 5 + .../framework/jobs/impl/VmWorkJobVO.java | 8 + .../backup/DummyBackupProvider.java | 9 +- plugins/backup/knib/pom.xml | 50 + .../cloudstack/backup/KnibBackupProvider.java | 2331 +++++++++++++++++ .../cloudstack/knib/module.properties | 18 + .../knib/spring-backup-knib-context.xml | 26 + .../backup/KnibBackupProviderTest.java | 609 +++++ .../cloudstack/backup/NASBackupProvider.java | 10 +- .../backup/NASBackupProviderTest.java | 2 +- .../backup/NetworkerBackupProvider.java | 9 +- .../backup/VeeamBackupProvider.java | 9 +- .../kvm/resource/BlockCommitListener.java | 12 +- .../resource/LibvirtComputingResource.java | 328 ++- ...grateResourceBetweenSecondaryStorages.java | 123 + .../LibvirtCompressBackupCommandWrapper.java | 144 + ...bvirtConsolidateVolumesCommandWrapper.java | 59 + ...reateDiskOnlyVMSnapshotCommandWrapper.java | 165 +- ...nalizeBackupCompressionCommandWrapper.java | 73 + .../LibvirtGetStorageStatsCommandWrapper.java | 2 +- ...virtGetVolumesOnStorageCommandWrapper.java | 2 +- ...MergeDiskOnlyVMSnapshotCommandWrapper.java | 99 +- ...etweenSecondaryStoragesCommandWrapper.java | 132 + ...ibvirtRestoreKnibBackupCommandWrapper.java | 119 + .../LibvirtRevertSnapshotCommandWrapper.java | 12 +- .../LibvirtTakeKnibBackupCommandWrapper.java | 391 +++ .../kvm/storage/KVMStoragePoolManager.java | 8 +- .../kvm/storage/KVMStorageProcessor.java | 22 +- .../kvm/storage/LibvirtStorageAdaptor.java | 5 +- .../apache/cloudstack/utils/qemu/QemuImg.java | 85 +- .../LibvirtComputingResourceTest.java | 46 +- ...GetVolumesOnStorageCommandWrapperTest.java | 2 +- ...bvirtRevertSnapshotCommandWrapperTest.java | 10 +- .../com/cloud/hypervisor/guru/VMwareGuru.java | 7 +- plugins/pom.xml | 1 + .../CloudStackPrimaryDataStoreDriverImpl.java | 14 +- .../java/com/cloud/api/ApiResponseHelper.java | 8 + .../com/cloud/api/query/QueryManagerImpl.java | 117 +- .../consoleproxy/ConsoleProxyManagerImpl.java | 2 +- .../cloud/hypervisor/HypervisorGuruBase.java | 5 +- .../java/com/cloud/hypervisor/KVMGuru.java | 21 +- .../network/as/AutoScaleManagerImpl.java | 2 +- .../com/cloud/storage/StorageManagerImpl.java | 3 +- .../cloud/storage/VolumeApiServiceImpl.java | 55 +- .../main/java/com/cloud/vm/UserVmManager.java | 6 +- .../java/com/cloud/vm/UserVmManagerImpl.java | 36 +- .../backup/BackupCompressionService.java | 388 +++ .../cloudstack/backup/BackupManagerImpl.java | 199 +- .../NativeBackupOfferingServiceImpl.java | 62 + .../backup/NativeBackupServiceImpl.java | 276 ++ .../heuristics/HeuristicRuleHelper.java | 24 + .../heuristics/presetvariables/Backup.java | 40 + .../presetvariables/PresetVariables.java | 10 + .../spring-server-core-managers-context.xml | 6 + .../network/as/AutoScaleManagerImplTest.java | 5 +- .../storage/VolumeApiServiceImplTest.java | 34 +- .../template/TemplateManagerImplTest.java | 6 + .../com/cloud/vm/UserVmManagerImplTest.java | 12 +- .../cloudstack/backup/BackupManagerTest.java | 79 +- .../heuristics/HeuristicRuleHelperTest.java | 19 + .../resource/NfsSecondaryStorageResource.java | 38 + ui/public/locales/en.json | 2 + ui/public/locales/pt_BR.json | 2 + ui/src/components/view/ListView.vue | 3 + ui/src/components/widgets/Status.vue | 4 + ui/src/config/section/storage.js | 12 +- ui/src/views/AutogenView.vue | 4 + ui/src/views/compute/InstanceTab.vue | 2 +- ui/src/views/compute/StartBackup.vue | 13 +- .../views/compute/backup/BackupSchedule.vue | 8 + ui/src/views/compute/backup/FormSchedule.vue | 13 +- ui/src/views/storage/CreateVMFromBackup.vue | 12 +- .../storage/RestoreAttachBackupVolume.vue | 74 +- .../main/java/com/cloud/utils/DateUtil.java | 7 + .../utils/exception/BackupException.java | 42 + .../exception/BackupProviderException.java | 33 + 183 files changed, 10425 insertions(+), 720 deletions(-) create mode 100644 api/src/main/java/org/apache/cloudstack/api/command/user/backup/ListBackupCompressionJobsCmd.java create mode 100644 api/src/main/java/org/apache/cloudstack/api/command/user/backup/nativeoffering/CreateNativeBackupOfferingCmd.java create mode 100644 api/src/main/java/org/apache/cloudstack/api/command/user/backup/nativeoffering/DeleteNativeBackupOfferingCmd.java create mode 100644 api/src/main/java/org/apache/cloudstack/api/command/user/backup/nativeoffering/ListNativeBackupOfferingsCmd.java create mode 100644 api/src/main/java/org/apache/cloudstack/api/response/BackupCompressionJobResponse.java create mode 100644 api/src/main/java/org/apache/cloudstack/api/response/NativeBackupOfferingResponse.java create mode 100644 api/src/main/java/org/apache/cloudstack/backup/NativeBackupOffering.java create mode 100644 api/src/main/java/org/apache/cloudstack/backup/NativeBackupOfferingService.java create mode 100644 api/src/main/java/org/apache/cloudstack/backup/NativeBackupProvider.java create mode 100644 api/src/main/java/org/apache/cloudstack/backup/NativeBackupService.java create mode 100644 core/src/main/java/com/cloud/agent/api/MigrateBackupsBetweenSecondaryStoragesCommand.java create mode 100644 core/src/main/java/com/cloud/agent/api/MigrateBetweenSecondaryStoragesCommand.java create mode 100644 core/src/main/java/com/cloud/agent/api/MigrateBetweenSecondaryStoragesCommandAnswer.java create mode 100644 core/src/main/java/org/apache/cloudstack/backup/CompressBackupCommand.java create mode 100644 core/src/main/java/org/apache/cloudstack/backup/ConsolidateVolumesAnswer.java create mode 100644 core/src/main/java/org/apache/cloudstack/backup/ConsolidateVolumesCommand.java create mode 100644 core/src/main/java/org/apache/cloudstack/backup/FinalizeBackupCompressionCommand.java create mode 100644 core/src/main/java/org/apache/cloudstack/backup/RestoreKnibBackupAnswer.java create mode 100644 core/src/main/java/org/apache/cloudstack/backup/RestoreKnibBackupCommand.java create mode 100644 core/src/main/java/org/apache/cloudstack/backup/TakeKnibBackupAnswer.java create mode 100644 core/src/main/java/org/apache/cloudstack/backup/TakeKnibBackupCommand.java create mode 100644 core/src/main/java/org/apache/cloudstack/storage/command/BackupDeleteAnswer.java create mode 100644 core/src/main/java/org/apache/cloudstack/storage/to/BackupDeltaTO.java rename core/src/main/java/{com/cloud/agent/api/storage/SnapshotMergeTreeTO.java => org/apache/cloudstack/storage/to/DeltaMergeTreeTO.java} (74%) create mode 100644 core/src/main/java/org/apache/cloudstack/storage/to/KnibTO.java create mode 100644 engine/components-api/src/main/java/com/cloud/vm/VmWorkDeleteBackup.java create mode 100644 engine/components-api/src/main/java/com/cloud/vm/VmWorkRestoreBackup.java create mode 100644 engine/components-api/src/main/java/com/cloud/vm/VmWorkRestoreVolumeBackupAndAttach.java create mode 100644 engine/components-api/src/main/java/com/cloud/vm/VmWorkTakeBackup.java create mode 100644 engine/schema/src/main/java/org/apache/cloudstack/backup/BackupCompressionJobType.java create mode 100644 engine/schema/src/main/java/org/apache/cloudstack/backup/BackupCompressionJobVO.java create mode 100644 engine/schema/src/main/java/org/apache/cloudstack/backup/NativeBackupDataStoreVO.java create mode 100644 engine/schema/src/main/java/org/apache/cloudstack/backup/NativeBackupJoinVO.java create mode 100644 engine/schema/src/main/java/org/apache/cloudstack/backup/NativeBackupOfferingVO.java create mode 100644 engine/schema/src/main/java/org/apache/cloudstack/backup/NativeBackupStoragePoolVO.java create mode 100644 engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupCompressionJobDao.java create mode 100644 engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupCompressionJobDaoImpl.java create mode 100644 engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupDataStoreDao.java create mode 100644 engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupDataStoreDaoImpl.java create mode 100644 engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupJoinDao.java create mode 100644 engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupJoinDaoImpl.java create mode 100644 engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupOfferingDao.java create mode 100644 engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupOfferingDaoImpl.java create mode 100644 engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupStoragePoolDao.java create mode 100644 engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupStoragePoolDaoImpl.java create mode 100644 engine/schema/src/main/resources/META-INF/db/views/cloud.native_backup_view.sql create mode 100644 engine/storage/src/main/java/org/apache/cloudstack/storage/backup/BackupObject.java create mode 100644 plugins/backup/knib/pom.xml create mode 100644 plugins/backup/knib/src/main/java/org/apache/cloudstack/backup/KnibBackupProvider.java create mode 100644 plugins/backup/knib/src/main/resources/META-INF/cloudstack/knib/module.properties create mode 100644 plugins/backup/knib/src/main/resources/META-INF/cloudstack/knib/spring-backup-knib-context.xml create mode 100644 plugins/backup/knib/src/test/java/org/apache/cloudstack/backup/KnibBackupProviderTest.java create mode 100644 plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtMigrateResourceBetweenSecondaryStorages.java create mode 100644 plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCompressBackupCommandWrapper.java create mode 100644 plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConsolidateVolumesCommandWrapper.java create mode 100644 plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtFinalizeBackupCompressionCommandWrapper.java create mode 100644 plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateBackupsBetweenSecondaryStoragesCommandWrapper.java create mode 100644 plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRestoreKnibBackupCommandWrapper.java create mode 100644 plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtTakeKnibBackupCommandWrapper.java create mode 100644 server/src/main/java/org/apache/cloudstack/backup/BackupCompressionService.java create mode 100644 server/src/main/java/org/apache/cloudstack/backup/NativeBackupOfferingServiceImpl.java create mode 100644 server/src/main/java/org/apache/cloudstack/backup/NativeBackupServiceImpl.java create mode 100644 server/src/main/java/org/apache/cloudstack/storage/heuristics/presetvariables/Backup.java create mode 100644 utils/src/main/java/com/cloud/utils/exception/BackupException.java create mode 100644 utils/src/main/java/com/cloud/utils/exception/BackupProviderException.java diff --git a/agent/conf/agent.properties b/agent/conf/agent.properties index 0dc5b8211e0d..51fc046af0aa 100644 --- a/agent/conf/agent.properties +++ b/agent/conf/agent.properties @@ -457,3 +457,7 @@ iscsi.session.cleanup.enabled=false # Instance conversion VIRT_V2V_TMPDIR env var #convert.instance.env.virtv2v.tmpdir= + +# Timeout (in seconds) for QCOW2 delta merge operations, mainly used for classic volume snapshots, disk-only VM snapshots on file-based storage, and the KNIB plugin. +# If a value of 0 or less is informed, the default will be used. +# qcow2.delta.merge.timeout=259200 diff --git a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java index 3364f9708cf5..0c40021ca929 100644 --- a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java +++ b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java @@ -156,7 +156,8 @@ public class AgentProperties{ public static final Property CMDS_TIMEOUT = new Property<>("cmds.timeout", 7200); /** - * The timeout (in seconds) for the snapshot merge operation, mainly used for classic volume snapshots and disk-only VM snapshots on file-based storage.
+ * The timeout (in seconds) for QCOW2 delta merge operations, mainly used for classic volume snapshots, disk-only VM snapshots on file-based storage, and the KNIB plugin. + * If a value of 0 or less is informed, the default will be used.
* This configuration is only considered if libvirt.events.enabled is also true.
* Data type: Integer.
* Default value: 259200 diff --git a/api/src/main/java/com/cloud/agent/api/to/DataObjectType.java b/api/src/main/java/com/cloud/agent/api/to/DataObjectType.java index 26294cfbb223..76a75e03ba55 100644 --- a/api/src/main/java/com/cloud/agent/api/to/DataObjectType.java +++ b/api/src/main/java/com/cloud/agent/api/to/DataObjectType.java @@ -19,5 +19,5 @@ package com.cloud.agent.api.to; public enum DataObjectType { - VOLUME, SNAPSHOT, TEMPLATE, ARCHIVE + VOLUME, SNAPSHOT, TEMPLATE, ARCHIVE, BACKUP } diff --git a/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java b/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java index 0c821b4e36c0..67db19b7cc54 100644 --- a/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java +++ b/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java @@ -20,6 +20,7 @@ import java.util.Map; import org.apache.cloudstack.backup.Backup; +import org.apache.cloudstack.backup.BackupProvider; import org.apache.cloudstack.framework.config.ConfigKey; import com.cloud.agent.api.Command; @@ -94,10 +95,10 @@ public interface HypervisorGuru extends Adapter { Map getClusterSettings(long vmId); VirtualMachine importVirtualMachineFromBackup(long zoneId, long domainId, long accountId, long userId, - String vmInternalName, Backup backup) throws Exception; + String vmInternalName, Backup backup, BackupProvider backupProvider) throws Exception; boolean attachRestoredVolumeToVirtualMachine(long zoneId, String location, Backup.VolumeInfo volumeInfo, - VirtualMachine vm, long poolId, Backup backup) throws Exception; + VirtualMachine vm, long poolId, Backup backup, BackupProvider backupProvider) throws Exception; /** * Will generate commands to migrate a vm to a pool. For now this will only work for stopped VMs on Vmware. * diff --git a/api/src/main/java/com/cloud/storage/Volume.java b/api/src/main/java/com/cloud/storage/Volume.java index c7fbdb0a5445..b9c2c73585a1 100644 --- a/api/src/main/java/com/cloud/storage/Volume.java +++ b/api/src/main/java/com/cloud/storage/Volume.java @@ -60,7 +60,9 @@ enum State { UploadError(false, "Volume upload encountered some error"), UploadAbandoned(false, "Volume upload is abandoned since the upload was never initiated within a specified time"), Attaching(true, "The volume is attaching to a VM from Ready state."), - Restoring(true, "The volume is being restored from backup."); + Restoring(true, "The volume is being restored from backup."), + Consolidating(true, "The volume is being flattened."), + RestoreError(false, "The volume restore encountered an error."); boolean _transitional; @@ -153,6 +155,10 @@ public String getDescription() { s_fsm.addTransition(new StateMachine2.Transition(Destroy, Event.RestoreRequested, Restoring, null)); s_fsm.addTransition(new StateMachine2.Transition(Restoring, Event.RestoreSucceeded, Ready, null)); s_fsm.addTransition(new StateMachine2.Transition(Restoring, Event.RestoreFailed, Ready, null)); + s_fsm.addTransition(new StateMachine2.Transition<>(Ready, Event.ConsolidationRequested, Consolidating, null)); + s_fsm.addTransition(new StateMachine2.Transition<>(Consolidating, Event.OperationSucceeded, Ready, null)); + s_fsm.addTransition(new StateMachine2.Transition<>(Consolidating, Event.OperationFailed, RestoreError, null)); + s_fsm.addTransition(new StateMachine2.Transition<>(RestoreError, Event.RestoreFailed, RestoreError, null)); } } @@ -179,7 +185,8 @@ enum Event { OperationTimeout, RestoreRequested, RestoreSucceeded, - RestoreFailed; + RestoreFailed, + ConsolidationRequested } /** diff --git a/api/src/main/java/com/cloud/storage/VolumeApiService.java b/api/src/main/java/com/cloud/storage/VolumeApiService.java index 1a9bcc6ee98b..e7116617ec89 100644 --- a/api/src/main/java/com/cloud/storage/VolumeApiService.java +++ b/api/src/main/java/com/cloud/storage/VolumeApiService.java @@ -107,7 +107,7 @@ public interface VolumeApiService { Volume attachVolumeToVM(AttachVolumeCmd command); - Volume attachVolumeToVM(Long vmId, Long volumeId, Long deviceId, Boolean allowAttachForSharedFS); + Volume attachVolumeToVM(Long vmId, Long volumeId, Long deviceId, Boolean allowAttachForSharedFS, boolean allowAttachOnRestoring); Volume detachVolumeViaDestroyVM(long vmId, long volumeId); diff --git a/api/src/main/java/com/cloud/vm/VirtualMachine.java b/api/src/main/java/com/cloud/vm/VirtualMachine.java index d244de7115e8..976c0e6c4627 100644 --- a/api/src/main/java/com/cloud/vm/VirtualMachine.java +++ b/api/src/main/java/com/cloud/vm/VirtualMachine.java @@ -58,7 +58,10 @@ public enum State { Error(false, "VM is in error"), Unknown(false, "VM state is unknown."), Shutdown(false, "VM state is shutdown from inside"), - Restoring(true, "VM is being restored from backup"); + Restoring(true, "VM is being restored from backup"), + BackingUp(true, "VM is being backed up"), + BackupError(false, "VM backup is in a inconsistent state. Operator should analyse the logs and restore the VM"), + RestoreError(false, "VM restore left the VM in a inconsistent state. Operator should analyse the logs and restore the VM"); private final boolean _transitional; String _description; @@ -131,6 +134,14 @@ public static StateMachine2 getStat s_fsm.addTransition(new Transition(State.Destroyed, Event.RestoringRequested, State.Restoring, null)); s_fsm.addTransition(new Transition(State.Restoring, Event.RestoringSuccess, State.Stopped, null)); s_fsm.addTransition(new Transition(State.Restoring, Event.RestoringFailed, State.Stopped, null)); + s_fsm.addTransition(new Transition<>(State.Running, Event.BackupRequested, State.BackingUp, null)); + s_fsm.addTransition(new Transition<>(State.Stopped, Event.BackupRequested, State.BackingUp, null)); + s_fsm.addTransition(new Transition<>(State.BackingUp, Event.BackupSucceededRunning, State.Running, null)); + s_fsm.addTransition(new Transition<>(State.BackingUp, Event.BackupSucceededStopped, State.Stopped, null)); + s_fsm.addTransition(new Transition<>(State.BackingUp, Event.OperationFailedToError, State.BackupError, null)); + s_fsm.addTransition(new Transition<>(State.BackingUp, Event.OperationFailedToRunning, State.Running, null)); + s_fsm.addTransition(new Transition<>(State.BackingUp, Event.OperationFailedToStopped, State.Stopped, null)); + s_fsm.addTransition(new Transition(State.RestoreError, Event.RestoringFailed, State.RestoreError, null)); s_fsm.addTransition(new Transition(State.Starting, VirtualMachine.Event.FollowAgentPowerOnReport, State.Running, Arrays.asList(new Impact[]{Impact.USAGE}))); s_fsm.addTransition(new Transition(State.Stopping, VirtualMachine.Event.FollowAgentPowerOnReport, State.Running, null)); @@ -209,6 +220,8 @@ public enum Event { ExpungeOperation, OperationSucceeded, OperationFailed, + OperationFailedToRunning, + OperationFailedToStopped, OperationFailedToError, OperationRetry, AgentReportShutdowned, @@ -218,6 +231,9 @@ public enum Event { RestoringRequested, RestoringFailed, RestoringSuccess, + BackupRequested, + BackupSucceededStopped, + BackupSucceededRunning, // added for new VMSync logic FollowAgentPowerOnReport, diff --git a/api/src/main/java/com/cloud/vm/VmDetailConstants.java b/api/src/main/java/com/cloud/vm/VmDetailConstants.java index db7665724973..a08d494d6b7f 100644 --- a/api/src/main/java/com/cloud/vm/VmDetailConstants.java +++ b/api/src/main/java/com/cloud/vm/VmDetailConstants.java @@ -129,4 +129,7 @@ public interface VmDetailConstants { String EXTERNAL_DETAIL_PREFIX = "External:"; String CLOUDSTACK_VM_DETAILS = "cloudstack.vm.details"; String CLOUDSTACK_VLAN = "cloudstack.vlan"; + + // KNIB specific + String LINKED_VOLUMES_SECONDARY_STORAGE_UUIDS = "linkedVolumesSecondaryStorageUuids"; } diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 9a8913da5b04..a2366ac161ac 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -518,6 +518,7 @@ public class ApiConstants { public static final String QUALIFIERS = "qualifiers"; public static final String QUERY_FILTER = "queryfilter"; public static final String QUIESCE_VM = "quiescevm"; + public static final String QUICK_RESTORE = "quickrestore"; public static final String SCHEDULE = "schedule"; public static final String SCHEDULE_ID = "scheduleid"; public static final String SCOPE = "scope"; @@ -566,6 +567,7 @@ public class ApiConstants { public static final String STATE = "state"; public static final String STATS = "stats"; public static final String STATUS = "status"; + public static final String COMPRESSION_STATUS = "compressionstatus"; public static final String STORAGE_TYPE = "storagetype"; public static final String STORAGE_POLICY = "storagepolicy"; public static final String STORAGE_MOTION_ENABLED = "storagemotionenabled"; @@ -657,6 +659,7 @@ public class ApiConstants { public static final String ETCD_SERVICE_OFFERING_NAME = "etcdofferingname"; public static final String REMOVE_VLAN = "removevlan"; public static final String VLAN_ID = "vlanid"; + public static final String ISOLATED = "isolated"; public static final String ISOLATED_PVLAN = "isolatedpvlan"; public static final String ISOLATED_PVLAN_TYPE = "isolatedpvlantype"; public static final String ISOLATION_URI = "isolationuri"; @@ -1171,6 +1174,7 @@ public class ApiConstants { public static final String CLEAN_UP_EXTRA_CONFIG = "cleanupextraconfig"; public static final String CLEAN_UP_PARAMETERS = "cleanupparameters"; public static final String VIRTUAL_SIZE = "virtualsize"; + public static final String UNCOMPRESSED_SIZE = "uncompressedsize"; public static final String NETSCALER_CONTROLCENTER_ID = "netscalercontrolcenterid"; public static final String NETSCALER_SERVICEPACKAGE_ID = "netscalerservicepackageid"; public static final String FETCH_ROUTER_HEALTH_CHECK_RESULTS = "fetchhealthcheckresults"; @@ -1353,6 +1357,10 @@ public class ApiConstants { public static final String VMWARE_DC = "vmwaredc"; + public static final String PARAMETER_DESCRIPTION_ISOLATED_BACKUPS = "Whether the backup will be isolated, defaults to false. " + + "Isolated backups are always created as full backups in independent chains. Therefore, they will never depend on any existing backup chain " + + "and no backup chain will depend on them. Currently only supported for the KNIB provider."; + public static final String CSS = "css"; public static final String JSON_CONFIGURATION = "jsonconfiguration"; @@ -1373,6 +1381,24 @@ public class ApiConstants { public static final String OBSOLETE_PARAMETERS = "obsoleteparameters"; public static final String EXCLUDED_PARAMETERS = "excludedparameters"; + public static final String COMPRESS = "compress"; + + public static final String VALIDATE = "validate"; + + public static final String ALLOW_QUICK_RESTORE = "allowquickrestore"; + + public static final String ALLOW_EXTRACT_FILE = "allowextractfile"; + + public static final String BACKUP_CHAIN_SIZE = "backupchainsize"; + + public static final String COMPRESSION_LIBRARY = "compressionlibrary"; + public static final String ATTEMPTS = "attempts"; + + public static final String EXECUTING = "executing"; + + public static final String SCHEDULED = "scheduled"; + public static final String SCHEDULED_DATE = "scheduleddate"; + /** * This enum specifies IO Drivers, each option controls specific policies on I/O. * Qemu guests support "threads" and "native" options Since 0.8.8 ; "io_uring" is supported Since 6.3.0 (QEMU 5.0). diff --git a/api/src/main/java/org/apache/cloudstack/api/ResponseGenerator.java b/api/src/main/java/org/apache/cloudstack/api/ResponseGenerator.java index 8e92e877f5ca..a4fa0d961b64 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ResponseGenerator.java +++ b/api/src/main/java/org/apache/cloudstack/api/ResponseGenerator.java @@ -22,6 +22,8 @@ import java.util.Map; import java.util.Set; +import org.apache.cloudstack.api.response.NativeBackupOfferingResponse; +import org.apache.cloudstack.backup.NativeBackupOffering; import org.apache.cloudstack.api.response.ConsoleSessionResponse; import org.apache.cloudstack.consoleproxy.ConsoleSession; import org.apache.cloudstack.affinity.AffinityGroup; @@ -583,4 +585,6 @@ List createTemplateResponses(ResponseView view, VirtualMachine GuiThemeResponse createGuiThemeResponse(GuiThemeJoin guiThemeJoin); ConsoleSessionResponse createConsoleSessionResponse(ConsoleSession consoleSession, ResponseView responseView); + + NativeBackupOfferingResponse createNativeBackupOfferingResponse(NativeBackupOffering offering); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupCmd.java index ca60ea674fe3..34524f2c26d7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupCmd.java @@ -81,6 +81,12 @@ public class CreateBackupCmd extends BaseAsyncCreateCmd { since = "4.21.0") private Boolean quiesceVM; + @Parameter(name = ApiConstants.ISOLATED, + type = CommandType.BOOLEAN, + description = ApiConstants.PARAMETER_DESCRIPTION_ISOLATED_BACKUPS, + since = "4.23.0") + private boolean isolated; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -101,6 +107,10 @@ public Boolean getQuiesceVM() { return quiesceVM; } + public boolean isIsolated() { + return isolated; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupScheduleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupScheduleCmd.java index 67ad7c71503f..a1fbcdf6c6cb 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupScheduleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupScheduleCmd.java @@ -87,6 +87,12 @@ public class CreateBackupScheduleCmd extends BaseCmd { since = "4.21.0") private Boolean quiesceVM; + @Parameter(name = ApiConstants.ISOLATED, + type = CommandType.BOOLEAN, + description = ApiConstants.PARAMETER_DESCRIPTION_ISOLATED_BACKUPS, + since = "4.23.0") + private boolean isolated; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -115,6 +121,10 @@ public Boolean getQuiesceVM() { return quiesceVM; } + public boolean isIsolated() { + return isolated; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/ListBackupCompressionJobsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/ListBackupCompressionJobsCmd.java new file mode 100644 index 000000000000..d0f57a6869d4 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/ListBackupCompressionJobsCmd.java @@ -0,0 +1,104 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.backup; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.BackupCompressionJobResponse; +import org.apache.cloudstack.api.response.BackupResponse; +import org.apache.cloudstack.api.response.HostResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.ZoneResponse; + +@APICommand(name = "listBackupCompressionJobs", description = "List backup compression jobs", responseObject = BackupCompressionJobResponse.class, + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin}, since = "4.23.0") +public class ListBackupCompressionJobsCmd extends BaseListCmd { + + @Parameter(name = ApiConstants.ID, type = CommandType.LONG, entityType = BackupCompressionJobResponse.class, description = "List only job with given ID.") + private Long id; + + @Parameter(name = ApiConstants.BACKUP_ID, type = CommandType.UUID, entityType = BackupResponse.class, description = "List jobs for the given backup.") + private Long backupId; + + @Parameter(name = ApiConstants.HOST_ID, type = CommandType.UUID, entityType = HostResponse.class, description = "List jobs in the given host, implies executing.") + private Long hostId; + + @Parameter(name = ApiConstants.ZONE_ID, type = CommandType.UUID, entityType = ZoneResponse.class, description = "List jobs in the given zone.") + private Long zoneId; + + @Parameter(name = ApiConstants.TYPE, type = CommandType.STRING, description = "List jobs with the given type. Accepted values are Start and Finalize.") + private String type; + + @Parameter(name = ApiConstants.EXECUTING, type = CommandType.BOOLEAN, description = "List executing jobs.") + private Boolean executing; + + @Parameter(name = ApiConstants.SCHEDULED, type = CommandType.BOOLEAN, description = "List scheduled jobs.") + private Boolean scheduled; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + public Long getBackupId() { + return backupId; + } + + public Long getHostId() { + return hostId; + } + + public Long getZoneId() { + return zoneId; + } + + public String getType() { + return type; + } + + public boolean getExecuting() { + return Boolean.TRUE.equals(executing); + } + + public boolean getScheduled() { + return Boolean.TRUE.equals(scheduled); + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, + NetworkRuleConflictException { + ListResponse response = _queryService.listBackupCompressionJobs(this); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/RestoreBackupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/RestoreBackupCmd.java index c29d117161f2..5830c0a8d62d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/RestoreBackupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/RestoreBackupCmd.java @@ -26,6 +26,7 @@ import org.apache.cloudstack.api.BaseAsyncCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.HostResponse; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.BackupResponse; import org.apache.cloudstack.backup.BackupManager; @@ -38,6 +39,7 @@ import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.commons.lang3.BooleanUtils; @APICommand(name = "restoreBackup", description = "Restores an existing stopped or deleted Instance using an Instance backup", @@ -59,6 +61,14 @@ public class RestoreBackupCmd extends BaseAsyncCmd { description = "ID of the backup") private Long backupId; + @Parameter(name = ApiConstants.QUICK_RESTORE, type = CommandType.BOOLEAN, entityType = BackupResponse.class, description = "Whether to use the quick restore process or not. " + + "Currently this parameter is only supported by the KNIB provider.", since = "4.23.0") + private Boolean quickRestore; + + @Parameter(name = ApiConstants.HOST_ID, type = CommandType.UUID, entityType = HostResponse.class, description = "If quickrestore is true, which host to start the VM on;" + + " otherwise, ignored. Currently this parameter is only supported by the KNIB provider.", since = "4.23.0") + private Long hostId; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -67,6 +77,14 @@ public Long getBackupId() { return backupId; } + public boolean isQuickRestore() { + return BooleanUtils.isTrue(quickRestore); + } + + public Long getHostId() { + return hostId; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -74,7 +92,7 @@ public Long getBackupId() { @Override public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { try { - boolean result = backupManager.restoreBackup(backupId); + boolean result = backupManager.restoreBackup(backupId, isQuickRestore(), getHostId()); if (result) { SuccessResponse response = new SuccessResponse(getCommandName()); response.setResponseName(getCommandName()); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/RestoreVolumeFromBackupAndAttachToVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/RestoreVolumeFromBackupAndAttachToVMCmd.java index 703a1b2e8802..f30d07a6546a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/RestoreVolumeFromBackupAndAttachToVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/RestoreVolumeFromBackupAndAttachToVMCmd.java @@ -26,6 +26,7 @@ import org.apache.cloudstack.api.BaseAsyncCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.HostResponse; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.BackupResponse; @@ -73,6 +74,14 @@ public class RestoreVolumeFromBackupAndAttachToVMCmd extends BaseAsyncCmd { description = "ID of the Instance where to attach the restored volume") private Long vmId; + @Parameter(name = ApiConstants.QUICK_RESTORE, type = CommandType.BOOLEAN, description = "Whether to use the quick restore process or not. " + + "Currently this parameter is only supported by the KNIB provider.", since = "4.23.0") + private Boolean quickRestore; + + @Parameter(name = ApiConstants.HOST_ID, type = CommandType.UUID, entityType = HostResponse.class, description = "If quickrestore is true, which host to start the VM on;" + + " otherwise, ignored. Currently this parameter is only supported by the KNIB provider.", since = "4.23.0") + private Long hostId; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -89,6 +98,14 @@ public Long getBackupId() { return backupId; } + public boolean isQuickRestore() { + return org.apache.commons.lang3.BooleanUtils.isTrue(quickRestore); + } + + public Long getHostId() { + return hostId; + } + @Override public long getEntityOwnerId() { return CallContext.current().getCallingAccount().getId(); @@ -101,7 +118,7 @@ public long getEntityOwnerId() { @Override public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { try { - boolean result = backupManager.restoreBackupVolumeAndAttachToVM(volumeUuid, backupId, vmId); + boolean result = backupManager.restoreBackupVolumeAndAttachToVM(volumeUuid, backupId, vmId, isQuickRestore(), getHostId()); if (result) { SuccessResponse response = new SuccessResponse(getCommandName()); response.setResponseName(getCommandName()); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/nativeoffering/CreateNativeBackupOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/nativeoffering/CreateNativeBackupOfferingCmd.java new file mode 100644 index 000000000000..199264ea81de --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/nativeoffering/CreateNativeBackupOfferingCmd.java @@ -0,0 +1,126 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.backup.nativeoffering; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.NativeBackupOfferingResponse; +import org.apache.cloudstack.backup.Backup; +import org.apache.cloudstack.backup.NativeBackupOffering; +import org.apache.cloudstack.backup.NativeBackupOfferingService; + +import javax.inject.Inject; + +@APICommand(name = "createNativeBackupOffering", description = "Creates a native backup offering", responseObject = NativeBackupOfferingResponse.class, + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin}, since = "4.23.0") +public class CreateNativeBackupOfferingCmd extends BaseCmd { + + @Inject + private NativeBackupOfferingService nativeBackupOfferingService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "Backup offering name.", required = true) + private String name; + + @Parameter(name = ApiConstants.COMPRESS, type = CommandType.BOOLEAN, description = "Whether the backups should be compressed or not.") + private Boolean compress; + + @Parameter(name = ApiConstants.VALIDATE, type = CommandType.BOOLEAN, description = "Whether the backups should be validated or not.") + private Boolean validate; + + @Parameter(name = ApiConstants.ALLOW_QUICK_RESTORE, type = CommandType.BOOLEAN, description = "Whether the backups are allowed to be restored or not.") + private Boolean allowQuickRestore; + + @Parameter(name = ApiConstants.ALLOW_EXTRACT_FILE, type = CommandType.BOOLEAN, description = "Whether files may be extracted from backups or not.") + private Boolean allowExtractFile; + + @Parameter(name = ApiConstants.BACKUP_CHAIN_SIZE, type = CommandType.INTEGER, description = "Backup chain size for backups created with this offering.") + private Integer backupChainSize; + + @Parameter(name = ApiConstants.COMPRESSION_LIBRARY, type = CommandType.STRING, description = "Compression library, for offerings that support compression. Accepted values " + + "are zstd and zlib. By default, zstd is used for images that support it. If the image only supports zlib, it will be used regardless of this parameter.") + private String compressionLibrary; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public String getName() { + return name; + } + + public boolean isCompress() { + return Boolean.TRUE.equals(compress); + } + + public boolean isValidate() { + return Boolean.TRUE.equals(validate); + } + + public boolean isAllowQuickRestore() { + return Boolean.TRUE.equals(allowQuickRestore); + } + + public boolean isAllowExtractFile() { + return Boolean.TRUE.equals(allowExtractFile); + } + + public Integer getBackupChainSize() { + return backupChainSize; + } + + public Backup.CompressionLibrary getCompressionLibrary() { + if (compressionLibrary == null) { + return null; + } + try { + return Backup.CompressionLibrary.valueOf(compressionLibrary); + } catch (IllegalArgumentException e) { + throw new InvalidParameterValueException(String.format("Invalid compression library, accepted values are zstd and zlib, received [%s].", compressionLibrary)); + } + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, + NetworkRuleConflictException { + NativeBackupOffering offering = nativeBackupOfferingService.createNativeBackupOffering(this); + NativeBackupOfferingResponse response = _responseGenerator.createNativeBackupOfferingResponse(offering); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } + + @Override + public long getEntityOwnerId() { + return 0; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/nativeoffering/DeleteNativeBackupOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/nativeoffering/DeleteNativeBackupOfferingCmd.java new file mode 100644 index 000000000000..c8f81566cf5b --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/nativeoffering/DeleteNativeBackupOfferingCmd.java @@ -0,0 +1,74 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.backup.nativeoffering; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.NativeBackupOfferingResponse; +import org.apache.cloudstack.backup.NativeBackupOffering; +import org.apache.cloudstack.backup.NativeBackupOfferingService; + +import javax.inject.Inject; + +@APICommand(name = "deleteNativeBackupOffering", description = "Deletes a native backup offering", responseObject = NativeBackupOfferingResponse.class, + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin}, since = "4.23.0") +public class DeleteNativeBackupOfferingCmd extends BaseCmd { + @Inject + private NativeBackupOfferingService nativeBackupOfferingService; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = NativeBackupOfferingResponse.class, description = "Backup offering ID.") + private Long id; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, + NetworkRuleConflictException { + NativeBackupOffering offering = nativeBackupOfferingService.deleteNativeBackupOffering(this); + NativeBackupOfferingResponse response = _responseGenerator.createNativeBackupOfferingResponse(offering); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } + + @Override + public long getEntityOwnerId() { + return 0; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/nativeoffering/ListNativeBackupOfferingsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/nativeoffering/ListNativeBackupOfferingsCmd.java new file mode 100644 index 000000000000..2d6d0039987e --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/nativeoffering/ListNativeBackupOfferingsCmd.java @@ -0,0 +1,105 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.user.backup.nativeoffering; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.api.response.NativeBackupOfferingResponse; + +@APICommand(name = "listNativeBackupOfferings", description = "List native backup offerings", responseObject = NativeBackupOfferingResponse.class, + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, authorized = {RoleType.Admin}, since = "4.23.0") +public class ListNativeBackupOfferingsCmd extends BaseListCmd { + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = NativeBackupOfferingResponse.class, description = "Backup offering ID.") + private Long id; + + @Parameter(name = ApiConstants.NAME, type = CommandType.STRING, description = "Backup offering name.") + private String name; + + @Parameter(name = ApiConstants.COMPRESS, type = CommandType.BOOLEAN, description = "Whether the backups should be compressed or not.") + private Boolean compress; + + @Parameter(name = ApiConstants.VALIDATE, type = CommandType.BOOLEAN, description = "Whether the backups should be validated or not.") + private Boolean validate; + + @Parameter(name = ApiConstants.ALLOW_QUICK_RESTORE, type = CommandType.BOOLEAN, description = "Whether the backups are allowed to be restored or not.") + private Boolean allowQuickRestore; + + @Parameter(name = ApiConstants.ALLOW_EXTRACT_FILE, type = CommandType.BOOLEAN, description = "Whether files may be extracted from backups or not.") + private Boolean allowExtractFile; + + @Parameter(name = ApiConstants.SHOW_REMOVED, type = CommandType.BOOLEAN, description = "Show removed offerings.") + private boolean showRemoved = false; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + public String getName() { + return name; + } + + public Boolean isCompress() { + return compress; + } + + public Boolean isValidate() { + return validate; + } + + public Boolean isAllowQuickRestore() { + return allowQuickRestore; + } + + public Boolean isAllowExtractFile() { + return allowExtractFile; + } + + public boolean isShowRemoved() { + return showRemoved; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, + NetworkRuleConflictException { + ListResponse offeringResponseList = _queryService.listNativeBackupOfferings(this); + offeringResponseList.setResponseName(getCommandName()); + this.setResponseObject(offeringResponseList); + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/CreateVMFromBackupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/CreateVMFromBackupCmd.java index e17ba9c2d705..242a367dc764 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/CreateVMFromBackupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/CreateVMFromBackupCmd.java @@ -36,6 +36,7 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.uservm.UserVm; import com.cloud.vm.VirtualMachine; +import org.apache.commons.lang3.ObjectUtils; @APICommand(name = "createVMFromBackup", description = "Creates and automatically starts a VM from a backup.", @@ -69,6 +70,10 @@ public class CreateVMFromBackupCmd extends BaseDeployVMCmd { @Parameter(name = ApiConstants.PRESERVE_IP, type = CommandType.BOOLEAN, description = "Use the same IP/MAC addresses as stored in the backup metadata. Works only if the original Instance is deleted and the IP/MAC address is available.") private Boolean preserveIp; + @Parameter(name = ApiConstants.QUICK_RESTORE, type = CommandType.BOOLEAN, entityType = BackupResponse.class, description = "Whether to use the quick restore process or not. " + + "Currently this parameter is only supported by the KNIB provider.", since = "4.23.0") + private Boolean quickRestore; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -89,6 +94,10 @@ public boolean getPreserveIp() { return (preserveIp != null) ? preserveIp : false; } + public Boolean getQuickRestore() { + return ObjectUtils.defaultIfNull(this.quickRestore, false); + } + @Override public void create() { UserVm vm; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/BackupCompressionJobResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/BackupCompressionJobResponse.java new file mode 100644 index 000000000000..6df5678a9174 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/response/BackupCompressionJobResponse.java @@ -0,0 +1,79 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; + +import java.util.Date; + +public class BackupCompressionJobResponse extends BaseResponse { + + @SerializedName(ApiConstants.ID) + @Param(description = "Compression job ID.") + private Long id; + + @SerializedName(ApiConstants.BACKUP_ID) + @Param(description = "Backup ID.") + private String backupId; + + @SerializedName(ApiConstants.HOST_ID) + @Param(description = "Host where the job is being executed.") + private String hostId; + + @SerializedName(ApiConstants.ZONE_ID) + @Param(description = "Zone where the job is being executed.") + private String zoneId; + + @SerializedName(ApiConstants.ATTEMPTS) + @Param(description = "Number of attempts already made to complete this job.") + private Integer attempts; + + @SerializedName(ApiConstants.TYPE) + @Param(description = "Compression job type.") + private String type; + + @SerializedName(ApiConstants.START_DATE) + @Param(description = "Compression job start date.") + private Date startDate; + + @SerializedName(ApiConstants.SCHEDULED_DATE) + @Param(description = "Compression job scheduled start date.") + private Date scheduledDate; + + @SerializedName(ApiConstants.REMOVED) + @Param(description = "Compression job scheduled removed date.") + private Date removed; + + public BackupCompressionJobResponse(Long id, String backupId, String zoneId, Integer attempts, String type, Date startDate, Date scheduledDate, Date removed) { + super("backupcompressionjob"); + this.id = id; + this.backupId = backupId; + this.zoneId = zoneId; + this.attempts = attempts; + this.type = type; + this.startDate = startDate; + this.scheduledDate = scheduledDate; + this.removed = removed; + } + + public void setHostId(String hostId) { + this.hostId = hostId; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/response/BackupResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/BackupResponse.java index b855bfe40b8d..3ce559340113 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/BackupResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/BackupResponse.java @@ -71,10 +71,18 @@ public class BackupResponse extends BaseResponse { @Param(description = "Backup protected (virtual) size in bytes") private Long protectedSize; + @SerializedName(ApiConstants.UNCOMPRESSED_SIZE) + @Param(description = "backup uncompressed size in bytes. Only set if backup is compressed") + private Long uncompressedSize; + @SerializedName(ApiConstants.STATUS) @Param(description = "Backup status") private Backup.Status status; + @SerializedName(ApiConstants.COMPRESSION_STATUS) + @Param(description = "backup compression status") + private Backup.CompressionStatus compressionStatus; + @SerializedName(ApiConstants.VOLUMES) @Param(description = "Backed up volumes") private String volumes; @@ -207,6 +215,14 @@ public void setProtectedSize(Long protectedSize) { this.protectedSize = protectedSize; } + public Long getUncompressedSize() { + return uncompressedSize; + } + + public void setUncompressedSize(Long uncompressedSize) { + this.uncompressedSize = uncompressedSize; + } + public Backup.Status getStatus() { return status; } @@ -215,6 +231,14 @@ public void setStatus(Backup.Status status) { this.status = status; } + public Backup.CompressionStatus getCompressionStatus() { + return compressionStatus; + } + + public void setCompressionStatus(Backup.CompressionStatus compressionStatus) { + this.compressionStatus = compressionStatus; + } + public String getVolumes() { return volumes; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/BackupScheduleResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/BackupScheduleResponse.java index 13d0c5d8c562..5da07864603a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/BackupScheduleResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/BackupScheduleResponse.java @@ -56,6 +56,10 @@ public class BackupScheduleResponse extends BaseResponse { @Param(description = "maximum number of backups retained") private Integer maxBackups; + @SerializedName(ApiConstants.ISOLATED) + @Param(description = ApiConstants.PARAMETER_DESCRIPTION_ISOLATED_BACKUPS) + private boolean isolated; + public void setId(String id) { this.id = id; } @@ -111,4 +115,8 @@ public void setMaxBackups(Integer maxBackups) { public void setQuiesceVM(Boolean quiesceVM) { this.quiesceVM = quiesceVM; } + + public void setIsolated(boolean isolated) { + this.isolated = isolated; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/NativeBackupOfferingResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/NativeBackupOfferingResponse.java new file mode 100644 index 000000000000..cbea02edc609 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/response/NativeBackupOfferingResponse.java @@ -0,0 +1,80 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; +import org.apache.cloudstack.backup.NativeBackupOffering; + +import java.util.Date; + +@EntityReference(value = NativeBackupOffering.class) +public class NativeBackupOfferingResponse extends BaseResponse { + + @SerializedName(ApiConstants.ID) + @Param(description = "ID of the offering.") + private String id; + + @SerializedName(ApiConstants.NAME) + @Param(description = "Name of the offering.") + private String name; + + @SerializedName(ApiConstants.COMPRESS) + @Param(description = "Whether the backups should be compressed or not.") + private Boolean compress; + + @SerializedName(ApiConstants.VALIDATE) + @Param(description = "Whether the backups should be validated or not.") + private Boolean validate; + + @SerializedName(ApiConstants.ALLOW_QUICK_RESTORE) + @Param(description = "Whether the backups are allowed to be restored or not.") + private Boolean allowQuickRestore; + + @SerializedName(ApiConstants.ALLOW_EXTRACT_FILE) + @Param(description = "Whether files may be extracted from backups or not.") + private Boolean allowExtractFile; + + @SerializedName(ApiConstants.BACKUP_CHAIN_SIZE) + @Param(description = "Backup chain size for backups created with this offering.") + private Integer backupChainSize; + + @SerializedName(ApiConstants.CREATED) + @Param(description = "When the offering was created.") + private Date created; + + @SerializedName(ApiConstants.REMOVED) + @Param(description = "When the offering was removed.") + private Date removed; + + public NativeBackupOfferingResponse(String id, String name, Boolean compress, Boolean validate, Boolean allowQuickRestore, Boolean allowExtractFile, Integer chainSize, Date created, + Date removed) { + super("nativebackupoffering"); + this.id = id; + this.name = name; + this.compress = compress; + this.validate = validate; + this.allowQuickRestore = allowQuickRestore; + this.allowExtractFile = allowExtractFile; + this.backupChainSize = chainSize; + this.created = created; + this.removed = removed; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/backup/Backup.java b/api/src/main/java/org/apache/cloudstack/backup/Backup.java index 951af9180e7f..cc626cb1dad9 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/Backup.java +++ b/api/src/main/java/org/apache/cloudstack/backup/Backup.java @@ -17,6 +17,7 @@ package org.apache.cloudstack.backup; +import java.io.Serializable; import java.util.Date; import java.util.List; import java.util.Map; @@ -34,6 +35,14 @@ enum Status { Allocated, Queued, BackingUp, BackedUp, Error, Failed, Restoring, Removed, Expunged } + enum CompressionStatus { + Uncompressed, Compressing, FinalizingCompression, Compressed, CompressionError + } + + enum CompressionLibrary { + zstd, zlib + } + class Metric { private Long backupSize = 0L; private Long dataSize = 0L; @@ -120,7 +129,7 @@ public void setDataSize(Long dataSize) { } } - class VolumeInfo { + class VolumeInfo implements Serializable { private String uuid; private Volume.Type type; private Long size; @@ -189,11 +198,13 @@ public String toString() { String getType(); Date getDate(); Backup.Status getStatus(); + Backup.CompressionStatus getCompressionStatus(); Long getSize(); Long getProtectedSize(); void setName(String name); String getDescription(); void setDescription(String description); + Long getUncompressedSize(); List getBackedUpVolumes(); long getZoneId(); Map getDetails(); diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java b/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java index e83db3a25895..a73a763b9541 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java @@ -20,6 +20,8 @@ import java.util.List; import java.util.Map; +import com.cloud.storage.Volume; +import com.cloud.vm.VirtualMachine; import com.cloud.capacity.Capacity; import com.cloud.exception.ResourceAllocationException; import org.apache.cloudstack.api.command.admin.backup.ImportBackupOfferingCmd; @@ -37,11 +39,9 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.Network; -import com.cloud.storage.Volume; import com.cloud.utils.Pair; import com.cloud.utils.component.Manager; import com.cloud.utils.component.PluggableService; -import com.cloud.vm.VirtualMachine; import com.cloud.vm.VmDiskInfo; /** @@ -57,7 +57,7 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer ConfigKey BackupProviderPlugin = new ValidatedConfigKey<>("Advanced", String.class, "backup.framework.provider.plugin", "dummy", - "The backup and recovery provider plugin. Valid plugin values: dummy, veeam, networker and nas", + "The backup and recovery provider plugin. Valid plugin values: dummy, veeam, networker, nas and knib", true, ConfigKey.Scope.Zone, BackupFrameworkEnabled.key(), value -> validateBackupProviderConfig((String)value)); ConfigKey BackupSyncPollingInterval = new ConfigKey<>("Advanced", Long.class, @@ -204,7 +204,7 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer /** * Restore a full VM from backup */ - boolean restoreBackup(final Long backupId); + boolean restoreBackup(final Long backupId, boolean quickRestore, Long hostId); Map getIpToNetworkMapFromBackup(Backup backup, boolean preserveIps, List networkIds); @@ -215,12 +215,12 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer /** * Restore a backup to a new Instance */ - boolean restoreBackupToVM(Long backupId, Long vmId) throws ResourceUnavailableException; + boolean restoreBackupToVM(Long backupId, Long vmId, boolean quickrestore) throws ResourceUnavailableException; /** * Restore a backed up volume and attach it to a VM */ - boolean restoreBackupVolumeAndAttachToVM(final String backedUpVolumeUuid, final Long backupId, final Long vmId) throws Exception; + boolean restoreBackupVolumeAndAttachToVM(final String backedUpVolumeUuid, final Long backupId, final Long vmId, boolean isQuickRestore, Long hostId) throws Exception; /** * Deletes a backup @@ -256,7 +256,7 @@ static void validateBackupProviderConfig(String value) { if (value != null && (value.contains(",") || value.trim().contains(" "))) { throw new IllegalArgumentException("Multiple backup provider plugins are not supported. Please provide a single plugin value."); } - List validPlugins = List.of("dummy", "veeam", "networker", "nas"); + List validPlugins = List.of("dummy", "veeam", "networker", "nas", "knib"); if (value != null && !validPlugins.contains(value)) { throw new IllegalArgumentException("Invalid backup provider plugin: " + value + ". Valid plugin values are: " + String.join(", ", validPlugins)); } diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java b/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java index 23b8092425d9..598bd861bbd8 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java @@ -73,11 +73,14 @@ public interface BackupProvider { * Starts and creates an adhoc backup process * for a previously registered VM backup * - * @param vm the machine to make a backup of - * @param quiesceVM instance will be quiesced for checkpointing for backup. Applicable only to NAS plugin. + * @param vm + * the machine to make a backup of + * @param quiesceVM + * instance will be quiesced for checkpointing for backup. Applicable only to NAS plugin. + * @param isolated * @return the result and {code}Backup{code} {code}Object{code} */ - Pair takeBackup(VirtualMachine vm, Boolean quiesceVM); + Pair takeBackup(VirtualMachine vm, Boolean quiesceVM, boolean isolated); /** * Delete an existing backup @@ -87,17 +90,18 @@ public interface BackupProvider { */ boolean deleteBackup(Backup backup, boolean forced); - Pair restoreBackupToVM(VirtualMachine vm, Backup backup, String hostIp, String dataStoreUuid); + Pair restoreBackupToVM(VirtualMachine vm, Backup backup, String hostIp, String dataStoreUuid, boolean quickrestore); /** * Restore VM from backup */ - boolean restoreVMFromBackup(VirtualMachine vm, Backup backup); + boolean restoreVMFromBackup(VirtualMachine vm, Backup backup, boolean quickRestore, Long hostId); /** * Restore a volume from a backup */ - Pair restoreBackedUpVolume(Backup backup, Backup.VolumeInfo backupVolumeInfo, String hostIp, String dataStoreUuid, Pair vmNameAndState); + Pair restoreBackedUpVolume(Backup backup, Backup.VolumeInfo backupVolumeInfo, String hostIp, String dataStoreUuid, + Pair vmNameAndState, VirtualMachine vm, boolean quickRestore); /** * Syncs backup metrics (backup size, protected size) from the plugin and stores it within the provider @@ -140,5 +144,4 @@ default boolean supportsMemoryVmSnapshot() { * @param zoneId the zone for which to return metrics */ void syncBackupStorageStats(Long zoneId); - } diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupSchedule.java b/api/src/main/java/org/apache/cloudstack/backup/BackupSchedule.java index 44fdf70c4c15..ddc823a14ff5 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/BackupSchedule.java +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupSchedule.java @@ -34,4 +34,5 @@ public interface BackupSchedule extends ControlledEntity, InternalIdentity { Boolean getQuiesceVM(); int getMaxBackups(); String getUuid(); + boolean isIsolated(); } diff --git a/api/src/main/java/org/apache/cloudstack/backup/NativeBackupOffering.java b/api/src/main/java/org/apache/cloudstack/backup/NativeBackupOffering.java new file mode 100644 index 000000000000..e5878c69052d --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/backup/NativeBackupOffering.java @@ -0,0 +1,37 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.backup; + +import java.util.Date; + +public interface NativeBackupOffering extends BackupOffering { + + boolean isCompress(); + + boolean isValidate(); + + boolean isAllowQuickRestore(); + + boolean isAllowExtractFile(); + + Integer getBackupChainSize(); + + Date getCreated(); + + Date getRemoved(); +} diff --git a/api/src/main/java/org/apache/cloudstack/backup/NativeBackupOfferingService.java b/api/src/main/java/org/apache/cloudstack/backup/NativeBackupOfferingService.java new file mode 100644 index 000000000000..04aaf8d83397 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/backup/NativeBackupOfferingService.java @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.backup; + +import org.apache.cloudstack.api.command.user.backup.nativeoffering.CreateNativeBackupOfferingCmd; +import org.apache.cloudstack.api.command.user.backup.nativeoffering.DeleteNativeBackupOfferingCmd; + + +public interface NativeBackupOfferingService { + + NativeBackupOffering createNativeBackupOffering(CreateNativeBackupOfferingCmd cmd); + + NativeBackupOffering deleteNativeBackupOffering(DeleteNativeBackupOfferingCmd cmd); +} diff --git a/api/src/main/java/org/apache/cloudstack/backup/NativeBackupProvider.java b/api/src/main/java/org/apache/cloudstack/backup/NativeBackupProvider.java new file mode 100644 index 000000000000..0874f14a5e7b --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/backup/NativeBackupProvider.java @@ -0,0 +1,123 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.backup; + +import com.cloud.storage.Volume; +import com.cloud.utils.Pair; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.snapshot.VMSnapshot; +import org.apache.cloudstack.framework.config.ConfigKey; + +public interface NativeBackupProvider extends BackupProvider { + String VM_WORK_JOB_HANDLER = NativeBackupService.class.getSimpleName(); + + ConfigKey backupCompressionTimeout = new ConfigKey<>("Advanced", Integer.class, "backup.compression.timeout", "28800", "Backup compression timeout (in " + + "seconds). Will only start counting once the backup compression async job actually starts.", true, ConfigKey.Scope.Cluster); + + ConfigKey backupCompressionMinimumFreeStorage = new ConfigKey<>("Advanced", Double.class, "backup.compression.minimum.free.storage", "1", "The minimum " + + "amount of free storage that should be available to start the compression. This configuration uses a multiplier on the backup size, by default, it needs the same " + + "amount of free storage as the backup uses while uncompressed.", true, ConfigKey.Scope.Zone); + + ConfigKey backupCompressionCoroutines = new ConfigKey<>("Advanced", Integer.class, "backup.compression.coroutines", "1", "Number of parallel coroutines " + + "for the compression process. This is translated to qemu-img '-m' parameter.", true, ConfigKey.Scope.Cluster); + + ConfigKey backupCompressionRateLimit = new ConfigKey<>("Advanced", Integer.class, "backup.compression.rate.limit", "0", "Limit the compression rate to " + + "this configuration's value (in MB/s). Values lower than 1 disable the limit.", true, ConfigKey.Scope.Cluster); + + /** + * Actually execute the backup after being queued. + * */ + default Pair orchestrateTakeBackup(Backup backup, boolean quiesceVm, boolean isolated) { + return null; + } + + /** + * Actually delete the backup after being queued. + * */ + default Boolean orchestrateDeleteBackup(Backup backup, boolean forced) { + return null; + } + + /** + * Actually restore the backup after being queued. + * */ + default Boolean orchestrateRestoreVMFromBackup(Backup backup, VirtualMachine vm, boolean quickRestore, Long hostId, boolean sameVmAsBackup) { + return null; + } + + /** + * This method should be overwritten by any backup providers that want to schedule their backup restore jobs in the same queue as the VM jobs. + * Otherwise, just use the restoreBackedUpVolume method. + * */ + default Pair orchestrateRestoreBackedUpVolume(Backup backup, VirtualMachine vm, Backup.VolumeInfo backupVolumeInfo, String hostIp, boolean quickRestore) { + return null; + } + + /** + * This method should be overwritten by any native backup providers that want to allow backup compression through ACS.
+ * The compression is done in two steps:
+ * 1) Compress the backup to a different file;
+ * 2) Switch the old file for the newly compressed one.
+ *

+ * This method is supposed to execute step 1. + * + * @return + */ + default boolean startBackupCompression(long backupId, long hostId) { + return false; + } + + /** + * This method should be overwritten by any native backup providers that want to allow backup compression through ACS.
+ * The compression is done in two steps:
+ * 1) Compress the backup to a different file;
+ * 2) Switch the old file for the newly compressed one.
+ *

+ * This method is supposed to execute step 2. + * + * @return + */ + default boolean finalizeBackupCompression(long backupId, long hostId) { + return false; + } + + /** + * This method should be overwritten by any native backup providers that allow volume detach but need to prepare it beforehand. + * */ + default void prepareVolumeForDetach(Volume volume, VirtualMachine virtualMachine) { + } + + /** + * This method should be overwritten by any native backup providers that allow volume migration but need to prepare it beforehand. + * */ + default void prepareVolumeForMigration(Volume volume, VirtualMachine virtualMachine) { + } + + /** + * This method should be overwritten by any native backup providers that must update metadata regarding a volume after certain operations (such as after a volume migration). + * */ + default void updateVolumeId(VirtualMachine virtualMachine, long oldVolumeId, long newVolumeId) { + } + + /** + * This method should be overwritten by any native backup providers that are compatible with VM Snapshots but need to prepare the VM to be reverted. + * Currently, the only strategy that calls this method is the {@code KvmFileBasedStorageVmSnapshotStrategy}. + * */ + default void prepareVmForSnapshotRevert(VMSnapshot vmSnapshot, VirtualMachine virtualMachine) { + } +} \ No newline at end of file diff --git a/api/src/main/java/org/apache/cloudstack/backup/NativeBackupService.java b/api/src/main/java/org/apache/cloudstack/backup/NativeBackupService.java new file mode 100644 index 000000000000..92a5a368193e --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/backup/NativeBackupService.java @@ -0,0 +1,42 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Command; +import com.cloud.agent.api.to.DataTO; +import com.cloud.storage.Volume; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.snapshot.VMSnapshot; + +public interface NativeBackupService { + + void configureChainInfo(DataTO volumeTo, Command cmd); + + void cleanupBackupMetadata(long volumeId); + + void prepareVolumeForDetach(Volume volume, VirtualMachine virtualMachine); + + void prepareVolumeForMigration(Volume volume); + + void prepareVmForSnapshotRevert(VMSnapshot vmSnapshot); + + void updateVolumeId(long oldVolumeId, long newVolumeId); + + boolean startBackupCompression(long backupId, long hostId, long zoneId); + + boolean finalizeBackupCompression(long backupId, long hostId, long zoneId); +} diff --git a/api/src/main/java/org/apache/cloudstack/query/QueryService.java b/api/src/main/java/org/apache/cloudstack/query/QueryService.java index 5cd67ffe9bad..80f0251f0d23 100644 --- a/api/src/main/java/org/apache/cloudstack/query/QueryService.java +++ b/api/src/main/java/org/apache/cloudstack/query/QueryService.java @@ -41,6 +41,8 @@ import org.apache.cloudstack.api.command.user.account.ListProjectAccountsCmd; import org.apache.cloudstack.api.command.user.address.ListQuarantinedIpsCmd; import org.apache.cloudstack.api.command.user.affinitygroup.ListAffinityGroupsCmd; +import org.apache.cloudstack.api.command.user.backup.ListBackupCompressionJobsCmd; +import org.apache.cloudstack.api.command.user.backup.nativeoffering.ListNativeBackupOfferingsCmd; import org.apache.cloudstack.api.command.user.bucket.ListBucketsCmd; import org.apache.cloudstack.api.command.user.event.ListEventsCmd; import org.apache.cloudstack.api.command.user.iso.ListIsosCmd; @@ -63,6 +65,7 @@ import org.apache.cloudstack.api.command.user.zone.ListZonesCmd; import org.apache.cloudstack.api.response.AccountResponse; import org.apache.cloudstack.api.response.AsyncJobResponse; +import org.apache.cloudstack.api.response.BackupCompressionJobResponse; import org.apache.cloudstack.api.response.BucketResponse; import org.apache.cloudstack.api.response.DetailOptionsResponse; import org.apache.cloudstack.api.response.DiskOfferingResponse; @@ -76,6 +79,7 @@ import org.apache.cloudstack.api.response.IpQuarantineResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.ManagementServerResponse; +import org.apache.cloudstack.api.response.NativeBackupOfferingResponse; import org.apache.cloudstack.api.response.ObjectStoreResponse; import org.apache.cloudstack.api.response.ProjectAccountResponse; import org.apache.cloudstack.api.response.ProjectInvitationResponse; @@ -222,4 +226,8 @@ public interface QueryService { ListResponse searchForObjectStores(ListObjectStoragePoolsCmd listObjectStoragePoolsCmd); ListResponse searchForBuckets(ListBucketsCmd listBucketsCmd); + + ListResponse listNativeBackupOfferings(ListNativeBackupOfferingsCmd cmd); + + ListResponse listBackupCompressionJobs(ListBackupCompressionJobsCmd cmd); } diff --git a/api/src/main/java/org/apache/cloudstack/secstorage/heuristics/HeuristicType.java b/api/src/main/java/org/apache/cloudstack/secstorage/heuristics/HeuristicType.java index f23e4b0b633b..489e9bf54f23 100644 --- a/api/src/main/java/org/apache/cloudstack/secstorage/heuristics/HeuristicType.java +++ b/api/src/main/java/org/apache/cloudstack/secstorage/heuristics/HeuristicType.java @@ -18,8 +18,8 @@ /** * The type of the heuristic used in the allocation process of secondary storage resources. - * Valid options are: {@link #ISO}, {@link #SNAPSHOT}, {@link #TEMPLATE} and {@link #VOLUME} + * Valid options are: {@link #ISO}, {@link #SNAPSHOT}, {@link #TEMPLATE}, {@link #VOLUME} and {@link #BACKUP} */ public enum HeuristicType { - ISO, SNAPSHOT, TEMPLATE, VOLUME + ISO, SNAPSHOT, TEMPLATE, VOLUME, BACKUP } diff --git a/client/pom.xml b/client/pom.xml index b8dffe65d4fb..43052ee6ecc1 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -612,6 +612,11 @@ cloud-plugin-backup-nas ${project.version} + + org.apache.cloudstack + cloud-plugin-backup-kvm-native-incremental-backup + ${project.version} + org.apache.cloudstack cloud-plugin-integrations-kubernetes-service diff --git a/core/src/main/java/com/cloud/agent/api/MigrateBackupsBetweenSecondaryStoragesCommand.java b/core/src/main/java/com/cloud/agent/api/MigrateBackupsBetweenSecondaryStoragesCommand.java new file mode 100644 index 000000000000..ed9c7c526f12 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/MigrateBackupsBetweenSecondaryStoragesCommand.java @@ -0,0 +1,41 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +import com.cloud.agent.api.to.DataStoreTO; +import com.cloud.agent.api.to.DataTO; + +import java.util.List; + +public class MigrateBackupsBetweenSecondaryStoragesCommand extends MigrateBetweenSecondaryStoragesCommand { + + List> backupChain; + + public MigrateBackupsBetweenSecondaryStoragesCommand() { + } + + public MigrateBackupsBetweenSecondaryStoragesCommand(List> backupChain, DataStoreTO srcDataStore, DataStoreTO destDataStore) { + super(srcDataStore, destDataStore); + this.backupChain = backupChain; + } + + public List> getBackupChain() { + return backupChain; + } +} \ No newline at end of file diff --git a/core/src/main/java/com/cloud/agent/api/MigrateBetweenSecondaryStoragesCommand.java b/core/src/main/java/com/cloud/agent/api/MigrateBetweenSecondaryStoragesCommand.java new file mode 100644 index 000000000000..e2bbd15bd986 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/MigrateBetweenSecondaryStoragesCommand.java @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +import com.cloud.agent.api.to.DataStoreTO; + +public abstract class MigrateBetweenSecondaryStoragesCommand extends Command { + + DataStoreTO srcDataStore; + DataStoreTO destDataStore; + + public MigrateBetweenSecondaryStoragesCommand() { + } + + public MigrateBetweenSecondaryStoragesCommand(DataStoreTO srcDataStore, DataStoreTO destDataStore) { + this.srcDataStore = srcDataStore; + this.destDataStore = destDataStore; + } + + @Override + public boolean executeInSequence() { + return false; + } + + public DataStoreTO getSrcDataStore() { + return srcDataStore; + } + + public DataStoreTO getDestDataStore() { + return destDataStore; + } +} \ No newline at end of file diff --git a/core/src/main/java/com/cloud/agent/api/MigrateBetweenSecondaryStoragesCommandAnswer.java b/core/src/main/java/com/cloud/agent/api/MigrateBetweenSecondaryStoragesCommandAnswer.java new file mode 100644 index 000000000000..dda8a52df5ad --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/MigrateBetweenSecondaryStoragesCommandAnswer.java @@ -0,0 +1,41 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +import com.cloud.utils.Pair; + +import java.util.List; + +public class MigrateBetweenSecondaryStoragesCommandAnswer extends Answer { + + List> migratedResourcesIdAndCheckpointPath; + + public MigrateBetweenSecondaryStoragesCommandAnswer() { + } + + public MigrateBetweenSecondaryStoragesCommandAnswer(MigrateBetweenSecondaryStoragesCommand cmd, boolean success, String result, List> migratedResourcesIdAndCheckpointPath) { + super(cmd, success, result); + this.migratedResourcesIdAndCheckpointPath = migratedResourcesIdAndCheckpointPath; + } + + public List> getMigratedResources() { + return migratedResourcesIdAndCheckpointPath; + } +} \ No newline at end of file diff --git a/core/src/main/java/com/cloud/agent/api/storage/MergeDiskOnlyVmSnapshotCommand.java b/core/src/main/java/com/cloud/agent/api/storage/MergeDiskOnlyVmSnapshotCommand.java index b6396c24d10a..1a47d97d5e25 100644 --- a/core/src/main/java/com/cloud/agent/api/storage/MergeDiskOnlyVmSnapshotCommand.java +++ b/core/src/main/java/com/cloud/agent/api/storage/MergeDiskOnlyVmSnapshotCommand.java @@ -19,28 +19,28 @@ package com.cloud.agent.api.storage; import com.cloud.agent.api.Command; -import com.cloud.vm.VirtualMachine; +import org.apache.cloudstack.storage.to.DeltaMergeTreeTO; import java.util.List; public class MergeDiskOnlyVmSnapshotCommand extends Command { - private List snapshotMergeTreeToList; - private VirtualMachine.State vmState; + private List snapshotMergeTreeToList; + private boolean isVmRunning; private String vmName; - public MergeDiskOnlyVmSnapshotCommand(List snapshotMergeTreeToList, VirtualMachine.State vmState, String vmName) { + public MergeDiskOnlyVmSnapshotCommand(List snapshotMergeTreeToList, boolean isVmRunning, String vmName) { this.snapshotMergeTreeToList = snapshotMergeTreeToList; - this.vmState = vmState; + this.isVmRunning = isVmRunning; this.vmName = vmName; } - public List getSnapshotMergeTreeToList() { + public List getDeltaMergeTreeToList() { return snapshotMergeTreeToList; } - public VirtualMachine.State getVmState() { - return vmState; + public boolean isVmRunning() { + return isVmRunning; } public String getVmName() { diff --git a/core/src/main/java/com/cloud/storage/resource/StorageProcessor.java b/core/src/main/java/com/cloud/storage/resource/StorageProcessor.java index dd8e2abcd643..31c384eab3a0 100644 --- a/core/src/main/java/com/cloud/storage/resource/StorageProcessor.java +++ b/core/src/main/java/com/cloud/storage/resource/StorageProcessor.java @@ -85,4 +85,8 @@ public interface StorageProcessor { public Answer checkDataStoreStoragePolicyCompliance(CheckDataStoreStoragePolicyComplianceCommand cmd); public Answer syncVolumePath(SyncVolumePathCommand cmd); + + default Answer deleteBackup(DeleteCommand cmd) { + return new Answer(cmd, false, "Operation not implemented"); + } } diff --git a/core/src/main/java/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java b/core/src/main/java/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java index 318c069b0b0b..3d2608c0c4b8 100644 --- a/core/src/main/java/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java +++ b/core/src/main/java/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java @@ -154,6 +154,8 @@ protected Answer execute(DeleteCommand cmd) { answer = processor.deleteVolume(cmd); } else if (data.getObjectType() == DataObjectType.SNAPSHOT) { answer = processor.deleteSnapshot(cmd); + } else if (data.getObjectType() == DataObjectType.BACKUP) { + answer = processor.deleteBackup(cmd); } else { answer = new Answer(cmd, false, "unsupported type"); } diff --git a/core/src/main/java/org/apache/cloudstack/backup/CompressBackupCommand.java b/core/src/main/java/org/apache/cloudstack/backup/CompressBackupCommand.java new file mode 100644 index 000000000000..a551f246a0bb --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/CompressBackupCommand.java @@ -0,0 +1,78 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Command; +import org.apache.cloudstack.storage.to.DeltaMergeTreeTO; + +import java.util.List; + +public class CompressBackupCommand extends Command { + + private List backupDeltasToCompress; + + private List backupChainImageStoreUrls; + + private long minFreeStorage; + + private Backup.CompressionLibrary compressionLib; + + private int coroutines; + + private int rateLimit; + + public CompressBackupCommand(List backupDeltasToCompress, List backupChainImageStoreUrls, long minFreeStorage, Backup.CompressionLibrary compressionLib, int coroutines, int rateLimit) { + this.backupChainImageStoreUrls = backupChainImageStoreUrls; + this.backupDeltasToCompress = backupDeltasToCompress; + this.minFreeStorage = minFreeStorage; + this.compressionLib = compressionLib; + this.coroutines = coroutines; + this.rateLimit = rateLimit; + } + + public List getBackupDeltasToCompress() { + return backupDeltasToCompress; + } + + public List getBackupChainImageStoreUrls() { + return backupChainImageStoreUrls; + } + + public long getMinFreeStorage() { + return minFreeStorage; + } + + public Backup.CompressionLibrary getCompressionLib() { + return compressionLib; + } + + public int getCoroutines() { + return coroutines; + } + + public int getRateLimit() { + return rateLimit; + } + + @Override + public boolean executeInSequence() { + return false; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/ConsolidateVolumesAnswer.java b/core/src/main/java/org/apache/cloudstack/backup/ConsolidateVolumesAnswer.java new file mode 100644 index 000000000000..9f230deb8d75 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/ConsolidateVolumesAnswer.java @@ -0,0 +1,37 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.Command; +import org.apache.cloudstack.storage.to.VolumeObjectTO; + +import java.util.List; + +public class ConsolidateVolumesAnswer extends Answer { + + private List successfullyConsolidatedVolumes; + + public ConsolidateVolumesAnswer(Command command, boolean success, String details, List successfullyConsolidatedVolumes) { + super(command, success, details); + this.successfullyConsolidatedVolumes = successfullyConsolidatedVolumes; + } + + public List getSuccessfullyConsolidatedVolumes() { + return successfullyConsolidatedVolumes; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/ConsolidateVolumesCommand.java b/core/src/main/java/org/apache/cloudstack/backup/ConsolidateVolumesCommand.java new file mode 100644 index 000000000000..7b2bc2245939 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/ConsolidateVolumesCommand.java @@ -0,0 +1,56 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Command; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.storage.to.VolumeObjectTO; + +import java.util.List; +import java.util.stream.Collectors; + +public class ConsolidateVolumesCommand extends Command { + + private List volumesToConsolidate; + + private List secondaryStorageUuids; + + private String vmName; + + public ConsolidateVolumesCommand(List volumesToConsolidate, List secondaryStorageUuids, String vmName) { + this.volumesToConsolidate = volumesToConsolidate.stream().map(vol -> (VolumeObjectTO)vol.getTO()).collect(Collectors.toList()); + this.secondaryStorageUuids = secondaryStorageUuids; + this.vmName = vmName; + } + + public List getVolumesToConsolidate() { + return volumesToConsolidate; + } + + public List getSecondaryStorageUuids() { + return secondaryStorageUuids; + } + + public String getVmName() { + return vmName; + } + + @Override + public boolean executeInSequence() { + return false; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/FinalizeBackupCompressionCommand.java b/core/src/main/java/org/apache/cloudstack/backup/FinalizeBackupCompressionCommand.java new file mode 100644 index 000000000000..a4cbb69611f7 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/FinalizeBackupCompressionCommand.java @@ -0,0 +1,49 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Command; +import org.apache.cloudstack.storage.to.BackupDeltaTO; + +import java.util.List; + +public class FinalizeBackupCompressionCommand extends Command { + private boolean cleanup; + + private List backupDeltaTO; + + public FinalizeBackupCompressionCommand(boolean cleanup, List backupDeltaTO) { + this.cleanup = cleanup; + this.backupDeltaTO = backupDeltaTO; + } + + public boolean isCleanup() { + return cleanup; + } + + public List getBackupDeltaTOList() { + return backupDeltaTO; + } + + @Override + public boolean executeInSequence() { + return false; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/RestoreKnibBackupAnswer.java b/core/src/main/java/org/apache/cloudstack/backup/RestoreKnibBackupAnswer.java new file mode 100644 index 000000000000..889d7c2d5260 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/RestoreKnibBackupAnswer.java @@ -0,0 +1,41 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.Command; + +import java.util.Set; + +public class RestoreKnibBackupAnswer extends Answer { + + private Set secondaryStorageUuids; + + public RestoreKnibBackupAnswer(Command command, Set secondaryStorageUuids) { + super(command); + this.secondaryStorageUuids = secondaryStorageUuids; + } + + public RestoreKnibBackupAnswer(Command command, Exception e, Set secondaryStorageUuids) { + super(command, e); + this.secondaryStorageUuids = secondaryStorageUuids; + } + + public Set getSecondaryStorageUuids() { + return secondaryStorageUuids; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/RestoreKnibBackupCommand.java b/core/src/main/java/org/apache/cloudstack/backup/RestoreKnibBackupCommand.java new file mode 100644 index 000000000000..19fc64a2ecc0 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/RestoreKnibBackupCommand.java @@ -0,0 +1,66 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Command; +import com.cloud.utils.Pair; +import org.apache.cloudstack.storage.to.BackupDeltaTO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; + +import java.util.Set; + +public class RestoreKnibBackupCommand extends Command { + + private Set deltasToRemove; + + private Set> backupAndVolumePairs; + + private Set secondaryStorageUrls; + + private boolean quickRestore; + + public RestoreKnibBackupCommand(Set deltasToRemove, Set> backupAndVolumePairs, Set secondaryStorageUrls, + boolean quickRestore) { + this.deltasToRemove = deltasToRemove; + this.backupAndVolumePairs = backupAndVolumePairs; + this.secondaryStorageUrls = secondaryStorageUrls; + this.quickRestore = quickRestore; + } + + @Override + public boolean executeInSequence() { + return false; + } + + public Set getDeltasToRemove() { + return deltasToRemove; + } + + public Set> getBackupAndVolumePairs() { + return backupAndVolumePairs; + } + + public Set getSecondaryStorageUrls() { + return secondaryStorageUrls; + } + + public boolean isQuickRestore() { + return quickRestore; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/TakeKnibBackupAnswer.java b/core/src/main/java/org/apache/cloudstack/backup/TakeKnibBackupAnswer.java new file mode 100644 index 000000000000..e413814b75ac --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/TakeKnibBackupAnswer.java @@ -0,0 +1,59 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.Command; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.BackupException; + +import java.util.Map; + +public class TakeKnibBackupAnswer extends Answer { + + private Map mapVolumeUuidToNewVolumePath; + private Map> mapVolumeUuidToDeltaPathOnSecondaryAndSize; + private boolean isVmConsistent = true; + + public TakeKnibBackupAnswer(Command command, boolean success, Map mapVolumeUuidToNewVolumePath, + Map> mapVolumeUuidToDeltaPathOnSecondaryAndSize) { + super(command, success, null); + this.mapVolumeUuidToNewVolumePath = mapVolumeUuidToNewVolumePath; + this.mapVolumeUuidToDeltaPathOnSecondaryAndSize = mapVolumeUuidToDeltaPathOnSecondaryAndSize; + } + + public TakeKnibBackupAnswer(Command command, Exception e) { + super(command, e); + if (e instanceof BackupException) { + this.isVmConsistent = ((BackupException)e).isVmConsistent(); + } + } + + public Map getMapVolumeUuidToNewVolumePath() { + return mapVolumeUuidToNewVolumePath; + } + + public Map> getMapVolumeUuidToDeltaPathOnSecondaryAndSize() { + return mapVolumeUuidToDeltaPathOnSecondaryAndSize; + } + + public boolean isVmConsistent() { + return isVmConsistent; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/backup/TakeKnibBackupCommand.java b/core/src/main/java/org/apache/cloudstack/backup/TakeKnibBackupCommand.java new file mode 100644 index 000000000000..136799057eba --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/backup/TakeKnibBackupCommand.java @@ -0,0 +1,92 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Command; +import org.apache.cloudstack.storage.to.KnibTO; + +import java.util.List; + +public class TakeKnibBackupCommand extends Command { + + private boolean quiesceVm; + + private boolean runningVM; + + private boolean endChain; + + private String vmName; + + private String imageStoreUrl; + + private List backupChainImageStoreUrls; + + private List knibTOs; + + private boolean isolated; + + public TakeKnibBackupCommand(boolean quiesceVm, boolean runningVM, boolean endChain, String vmName, String imageStoreUrl, List backupChainImageStoreUrls, List knibTOs, boolean isolated) { + this.quiesceVm = quiesceVm; + this.runningVM = runningVM; + this.endChain = endChain; + this.vmName = vmName; + this.imageStoreUrl = imageStoreUrl; + this.backupChainImageStoreUrls = backupChainImageStoreUrls; + this.knibTOs = knibTOs; + this.isolated = isolated; + } + + public boolean isQuiesceVm() { + return quiesceVm; + } + + public boolean isRunningVM() { + return runningVM; + } + + public boolean isEndChain() { + return endChain; + } + + public String getVmName() { + return vmName; + } + + public String getImageStoreUrl() { + return imageStoreUrl; + } + + public List getBackupChainImageStoreUrls() { + return backupChainImageStoreUrls; + } + + public List getKnibTOs() { + return knibTOs; + } + + public boolean isIsolated() { + return isolated; + } + + @Override + public boolean executeInSequence() { + return false; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/storage/command/BackupDeleteAnswer.java b/core/src/main/java/org/apache/cloudstack/storage/command/BackupDeleteAnswer.java new file mode 100644 index 000000000000..6cc48ea296d1 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/storage/command/BackupDeleteAnswer.java @@ -0,0 +1,36 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +package org.apache.cloudstack.storage.command; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.Command; + +public class BackupDeleteAnswer extends Answer { + + private long backupId; + + public BackupDeleteAnswer(Command command, boolean success, String details) { + super(command, success, details); + backupId = ((DeleteCommand) command).getData().getId(); + } + + public long getBackupId() { + return backupId; + } +} diff --git a/core/src/main/java/org/apache/cloudstack/storage/command/DeleteCommand.java b/core/src/main/java/org/apache/cloudstack/storage/command/DeleteCommand.java index 6f82fa97818d..9aa6f26b5d9c 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/command/DeleteCommand.java +++ b/core/src/main/java/org/apache/cloudstack/storage/command/DeleteCommand.java @@ -24,6 +24,8 @@ public final class DeleteCommand extends StorageSubSystemCommand { private DataTO data; + private boolean deleteChain; + public DeleteCommand(final DataTO data) { super(); this.data = data; @@ -42,6 +44,14 @@ public DataTO getData() { return data; } + public void setDeleteChain(boolean deleteChain) { + this.deleteChain = deleteChain; + } + + public boolean isDeleteChain() { + return deleteChain; + } + @Override public void setExecuteInSequence(final boolean inSeq) { diff --git a/core/src/main/java/org/apache/cloudstack/storage/command/RevertSnapshotCommand.java b/core/src/main/java/org/apache/cloudstack/storage/command/RevertSnapshotCommand.java index 174302252a55..42926d37cdfe 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/command/RevertSnapshotCommand.java +++ b/core/src/main/java/org/apache/cloudstack/storage/command/RevertSnapshotCommand.java @@ -25,6 +25,8 @@ public final class RevertSnapshotCommand extends StorageSubSystemCommand { private SnapshotObjectTO dataOnPrimaryStorage; private boolean _executeInSequence = false; + private boolean deleteChain; + public RevertSnapshotCommand(SnapshotObjectTO data, SnapshotObjectTO dataOnPrimaryStorage) { super(); this.data = data; @@ -43,6 +45,14 @@ public SnapshotObjectTO getDataOnPrimaryStorage() { return dataOnPrimaryStorage; } + public boolean isDeleteChain() { + return deleteChain; + } + + public void setDeleteChain(boolean deleteChain) { + this.deleteChain = deleteChain; + } + @Override public void setExecuteInSequence(final boolean executeInSequence) { _executeInSequence = executeInSequence; diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/BackupDeltaTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/BackupDeltaTO.java new file mode 100644 index 000000000000..20f60556fc2e --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/storage/to/BackupDeltaTO.java @@ -0,0 +1,87 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.to; + +import com.cloud.agent.api.to.DataObjectType; +import com.cloud.agent.api.to.DataStoreTO; +import com.cloud.agent.api.to.DataTO; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.storage.Storage; +import org.apache.commons.lang3.builder.ReflectionToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; + +public class BackupDeltaTO implements DataTO { + private DataStoreTO dataStoreTO; + + private Hypervisor.HypervisorType hypervisorType; + + private String path; + + private Storage.ImageFormat format; + + // When set, represents the Backup ID, not the delta ID. + private long id = 0; + + public BackupDeltaTO(DataStoreTO dataStoreTO, Hypervisor.HypervisorType hypervisorType, String path) { + this.dataStoreTO = dataStoreTO; + this.hypervisorType = hypervisorType; + this.path = path; + this.format = Storage.ImageFormat.QCOW2; + } + + public BackupDeltaTO(long id, DataStoreTO dataStoreTO, Hypervisor.HypervisorType hypervisorType, String path) { + this(dataStoreTO, hypervisorType, path); + this.id = id; + } + + @Override + public DataObjectType getObjectType() { + return DataObjectType.BACKUP; + } + + @Override + public DataStoreTO getDataStore() { + return dataStoreTO; + } + + @Override + public Hypervisor.HypervisorType getHypervisorType() { + return hypervisorType; + } + + @Override + public String getPath() { + return path; + } + + @Override + public long getId() { + return id; + } + + public void setId(long id) { + this.id = id; + } + + public Storage.ImageFormat getFormat() { + return this.format; + } + @Override + public String toString() { + return new ReflectionToStringBuilder(this, ToStringStyle.JSON_STYLE).setExcludeFieldNames("id").toString(); + } +} diff --git a/core/src/main/java/com/cloud/agent/api/storage/SnapshotMergeTreeTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/DeltaMergeTreeTO.java similarity index 74% rename from core/src/main/java/com/cloud/agent/api/storage/SnapshotMergeTreeTO.java rename to core/src/main/java/org/apache/cloudstack/storage/to/DeltaMergeTreeTO.java index 78f23105e192..7d316c9f78c3 100644 --- a/core/src/main/java/com/cloud/agent/api/storage/SnapshotMergeTreeTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/DeltaMergeTreeTO.java @@ -16,24 +16,32 @@ * specific language governing permissions and limitations * under the License. */ -package com.cloud.agent.api.storage; +package org.apache.cloudstack.storage.to; import com.cloud.agent.api.to.DataTO; import org.apache.commons.lang3.builder.ReflectionToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; import java.util.List; -public class SnapshotMergeTreeTO { +public class DeltaMergeTreeTO { + + VolumeObjectTO volumeObjectTO; DataTO parent; DataTO child; List grandChildren; - public SnapshotMergeTreeTO(DataTO parent, DataTO child, List grandChildren) { + public DeltaMergeTreeTO(VolumeObjectTO volumeObjectTO, DataTO parent, DataTO child, List grandChildren) { + this.volumeObjectTO = volumeObjectTO; this.parent = parent; this.child = child; this.grandChildren = grandChildren; } + public VolumeObjectTO getVolumeObjectTO() { + return volumeObjectTO; + } + public DataTO getParent() { return parent; } @@ -52,6 +60,6 @@ public void addGrandChild(DataTO grandChild) { @Override public String toString() { - return ReflectionToStringBuilder.toString(this); + return ReflectionToStringBuilder.toString(this, ToStringStyle.JSON_STYLE); } } diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/KnibTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/KnibTO.java new file mode 100644 index 000000000000..a589c1c23037 --- /dev/null +++ b/core/src/main/java/org/apache/cloudstack/storage/to/KnibTO.java @@ -0,0 +1,67 @@ +package org.apache.cloudstack.storage.to; +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; +import org.apache.commons.lang3.builder.ReflectionToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; + +import java.util.List; +import java.util.stream.Collectors; + +public class KnibTO { + + private String pathBackupParentOnSecondary; + private VolumeObjectTO volumeObjectTO; + + private DeltaMergeTreeTO deltaMergeTreeTO; + + List vmSnapshotDeltaPaths; + + public KnibTO(VolumeObjectTO volumeObjectTO, List snapshotDataStoreVOs) { + this.volumeObjectTO = volumeObjectTO; + this.vmSnapshotDeltaPaths = snapshotDataStoreVOs.stream().map(SnapshotDataStoreVO::getInstallPath).collect(Collectors.toList()); + } + + public String getPathBackupParentOnSecondary() { + return pathBackupParentOnSecondary; + } + + public VolumeObjectTO getVolumeObjectTO() { + return volumeObjectTO; + } + + public DeltaMergeTreeTO getDeltaMergeTreeTO() { + return deltaMergeTreeTO; + } + + public List getVmSnapshotDeltaPaths() { + return vmSnapshotDeltaPaths; + } + + public void setPathBackupParentOnSecondary(String pathBackupParentOnSecondary) { + this.pathBackupParentOnSecondary = pathBackupParentOnSecondary; + } + + public void setDeltaMergeTreeTO(DeltaMergeTreeTO deltaMergeTreeTO) { + this.deltaMergeTreeTO = deltaMergeTreeTO; + } + + @Override + public String toString() { + return ReflectionToStringBuilder.toString(this, ToStringStyle.JSON_STYLE); + } +} diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java index 827403ac5ef8..df98149faab6 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java @@ -32,11 +32,12 @@ import com.cloud.storage.Volume; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; +import java.io.Serializable; import java.util.Arrays; import java.util.List; import java.util.Set; -public class VolumeObjectTO extends DownloadableObjectTO implements DataTO { +public class VolumeObjectTO extends DownloadableObjectTO implements DataTO, Serializable { private String uuid; private Volume.Type volumeType; private DataStoreTO dataStore; diff --git a/core/src/main/resources/META-INF/cloudstack/backup/spring-core-lifecycle-backup-context-inheritable.xml b/core/src/main/resources/META-INF/cloudstack/backup/spring-core-lifecycle-backup-context-inheritable.xml index 175d45e26752..790bbd83e805 100644 --- a/core/src/main/resources/META-INF/cloudstack/backup/spring-core-lifecycle-backup-context-inheritable.xml +++ b/core/src/main/resources/META-INF/cloudstack/backup/spring-core-lifecycle-backup-context-inheritable.xml @@ -29,4 +29,9 @@ + + + + + diff --git a/core/src/main/resources/META-INF/cloudstack/core/spring-core-registry-core-context.xml b/core/src/main/resources/META-INF/cloudstack/core/spring-core-registry-core-context.xml index 01c568d78916..be5aeeffcf19 100644 --- a/core/src/main/resources/META-INF/cloudstack/core/spring-core-registry-core-context.xml +++ b/core/src/main/resources/META-INF/cloudstack/core/spring-core-registry-core-context.xml @@ -339,6 +339,10 @@ class="org.apache.cloudstack.spring.lifecycle.registry.ExtensionRegistry"> + + + diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java index 6f8c46304567..54918b2a1944 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java @@ -122,7 +122,7 @@ VolumeInfo moveVolume(VolumeInfo volume, long destPoolDcId, Long destPoolPodId, DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Long size, Long minIops, Long maxIops, VirtualMachine vm, VirtualMachineTemplate template, Account owner, Long deviceId); - VolumeInfo createVolumeOnPrimaryStorage(VirtualMachine vm, VolumeInfo volume, HypervisorType rootDiskHyperType, StoragePool storagePool) throws NoTransitionException; + VolumeInfo createVolumeOnPrimaryStorage(VirtualMachine vm, VolumeInfo volume, HypervisorType rootDiskHyperType, StoragePool storagePool, Long clusterId, Long podId) throws NoTransitionException; void release(VirtualMachineProfile profile); diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index 3c62738f9ed5..080373b59f59 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -233,6 +233,9 @@ public interface StorageManager extends StorageService { "while adding a new Secondary Storage. If the copy operation fails, the system falls back to downloading the template from the source URL.", true, ConfigKey.Scope.Zone, null); + ConfigKey AgentMaxDataMigrationWaitTime = new ConfigKey<>("Advanced", Integer.class, "agent.max.data.migration.wait.time", "3600", + "The maximum time (in seconds) that the secondary storage data migration command sent to the KVM Agent will be executed before a timeout occurs.", true, ConfigKey.Scope.Cluster); + /** * should we execute in sequence not involving any storages? * @return true if commands should execute in sequence diff --git a/engine/components-api/src/main/java/com/cloud/vm/VmWorkDeleteBackup.java b/engine/components-api/src/main/java/com/cloud/vm/VmWorkDeleteBackup.java new file mode 100644 index 000000000000..b9d2907ef780 --- /dev/null +++ b/engine/components-api/src/main/java/com/cloud/vm/VmWorkDeleteBackup.java @@ -0,0 +1,38 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.vm; + +public class VmWorkDeleteBackup extends VmWork { + + private long backupId; + + private boolean forced; + + public VmWorkDeleteBackup(long userId, long accountId, long vmId, String handlerName, long backupId, boolean forced) { + super(userId, accountId, vmId, handlerName); + this.backupId = backupId; + this.forced = forced; + } + + public long getBackupId() { + return backupId; + } + + public boolean isForced() { + return forced; + } +} diff --git a/engine/components-api/src/main/java/com/cloud/vm/VmWorkRestoreBackup.java b/engine/components-api/src/main/java/com/cloud/vm/VmWorkRestoreBackup.java new file mode 100644 index 000000000000..421430cfbe9d --- /dev/null +++ b/engine/components-api/src/main/java/com/cloud/vm/VmWorkRestoreBackup.java @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.vm; + +public class VmWorkRestoreBackup extends VmWork { + + private long backupId; + + private boolean quickRestore; + + private Long hostId; + + public VmWorkRestoreBackup(long userId, long accountId, long vmId, String handlerName, long backupId, boolean quickRestore, Long hostId) { + super(userId, accountId, vmId, handlerName); + this.backupId = backupId; + this.quickRestore = quickRestore; + this.hostId = hostId; + } + + public long getBackupId() { + return backupId; + } + + public boolean isQuickRestore() { + return quickRestore; + } + + public Long getHostId() { + return hostId; + } +} diff --git a/engine/components-api/src/main/java/com/cloud/vm/VmWorkRestoreVolumeBackupAndAttach.java b/engine/components-api/src/main/java/com/cloud/vm/VmWorkRestoreVolumeBackupAndAttach.java new file mode 100644 index 000000000000..f50d94abaa89 --- /dev/null +++ b/engine/components-api/src/main/java/com/cloud/vm/VmWorkRestoreVolumeBackupAndAttach.java @@ -0,0 +1,55 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.vm; + +import org.apache.cloudstack.backup.Backup; + +public class VmWorkRestoreVolumeBackupAndAttach extends VmWork { + + private long backupId; + + private Backup.VolumeInfo backupVolumeInfo; + + private String hostIp; + + private boolean quickRestore; + + public VmWorkRestoreVolumeBackupAndAttach(long userId, long accountId, long vmId, String handlerName, long backupId, Backup.VolumeInfo backupVolumeInfo, + String hostIp, boolean quickRestore) { + super(userId, accountId, vmId, handlerName); + this.backupId = backupId; + this.backupVolumeInfo = backupVolumeInfo; + this.hostIp = hostIp; + this.quickRestore = quickRestore; + } + + public long getBackupId() { + return backupId; + } + + public Backup.VolumeInfo getBackupVolumeInfo() { + return backupVolumeInfo; + } + + public String getHostIp() { + return hostIp; + } + + public boolean isQuickRestore() { + return quickRestore; + } +} \ No newline at end of file diff --git a/engine/components-api/src/main/java/com/cloud/vm/VmWorkTakeBackup.java b/engine/components-api/src/main/java/com/cloud/vm/VmWorkTakeBackup.java new file mode 100644 index 000000000000..57367d368b86 --- /dev/null +++ b/engine/components-api/src/main/java/com/cloud/vm/VmWorkTakeBackup.java @@ -0,0 +1,50 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.vm; + +public class VmWorkTakeBackup extends VmWork { + + private long backupId; + + private boolean quiesceVm; + + private boolean isolated; + + public VmWorkTakeBackup(long userId, long accountId, long vmId, long backupId, String handlerName, boolean quiesceVm, boolean isolated) { + super(userId, accountId, vmId, handlerName); + this.quiesceVm = quiesceVm; + this.backupId = backupId; + this.isolated = isolated; + } + + public boolean isQuiesceVm() { + return quiesceVm; + } + + public long getBackupId() { + return backupId; + } + + public boolean isIsolated() { + return isolated; + } + + @Override + public String toString() { + return super.toStringAfterRemoveParams(null, null); + } +} diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index e8796fb02529..c1684267b583 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -489,7 +489,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac static final ConfigKey ClusterVMMetaDataSyncInterval = new ConfigKey("Advanced", Integer.class, "vmmetadata.sync.interval", "180", "Cluster VM metadata sync interval in seconds", false); - static final ConfigKey VmJobCheckInterval = new ConfigKey("Advanced", + public static final ConfigKey VmJobCheckInterval = new ConfigKey("Advanced", Long.class, "vm.job.check.interval", "3000", "Interval in milliseconds to check if the job is complete", false); static final ConfigKey VmJobTimeout = new ConfigKey("Advanced", @@ -1673,6 +1673,7 @@ public void orchestrateStart(final String vmUuid, final Map templates, List snapshots, List volumes) { + public boolean filesReadyToMigrate(Long srcDataStoreId, List templates, List snapshots, List volumes, List backups) { State[] validStates = {State.Ready, State.Allocated, State.Destroying, State.Destroyed, State.Failed}; boolean isReady = true; for (TemplateDataStoreVO template : templates) { @@ -109,14 +117,48 @@ public boolean filesReadyToMigrate(Long srcDataStoreId, List backups) { + List invalidBackupStates = Arrays.asList(Backup.Status.BackingUp, Backup.Status.Restoring); + List invalidBackupCompressionStatus = Arrays.asList(Backup.CompressionStatus.Compressing, Backup.CompressionStatus.FinalizingCompression); + + List> backupChains; + Set backupIdsAlreadyInChain = new HashSet<>(); + + for (NativeBackupJoinVO backup : backups) { + if (backup.getStatus() == Backup.Status.BackedUp && !backupIdsAlreadyInChain.contains(backup.getId())) { + backupChains = createBackupChain(backup); + backupChains.forEach(list -> backupIdsAlreadyInChain.add(list.stream().map(BackupObject::getId).findFirst().get())); + + for (List backupVolumeChain : backupChains) { + BackupObject backupObject = backupVolumeChain.get(0); + + if (invalidBackupStates.contains(backupObject.getStatus())) { + logger.debug("Migration is not possible because backup {} is in {} state.", backupObject.getUuid(), backupObject.getStatus()); + return false; + } + + if (invalidBackupCompressionStatus.contains(backupObject.getCompressionStatus())) { + logger.debug("Migration is not possible because backup {} is currently being compressed. Current compression status: {}.", backupObject.getUuid(), backupObject.getCompressionStatus()); + return false; + } + } + } + } + + return true; + } + private boolean filesReadyToMigrate(Long srcDataStoreId) { List templates = templateDataStoreDao.listByStoreId(srcDataStoreId); List snapshots = snapshotDataStoreDao.listByStoreId(srcDataStoreId, DataStoreRole.Image); List volumes = volumeDataStoreDao.listByStoreId(srcDataStoreId); - return filesReadyToMigrate(srcDataStoreId, templates, snapshots, volumes); + List backups = nativeBackupJoinDao.listByImageStoreId(srcDataStoreId); + + return filesReadyToMigrate(srcDataStoreId, templates, snapshots, volumes, backups); } protected void checkIfCompleteMigrationPossible(ImageStoreService.MigrationPolicy policy, Long srcDataStoreId) { @@ -175,19 +217,58 @@ protected List getSortedValidSourcesList(DataStore srcDataStore, Map return files; } - protected List getSortedValidSourcesList(DataStore srcDataStore, Map, Long>> snapshotChains, - Map, Long>> childTemplates) { + Map, Long>> childTemplates, Map>, Long>> backupChains) { List files = new ArrayList<>(); files.addAll(getAllReadyTemplates(srcDataStore, childTemplates)); files.addAll(getAllReadySnapshotsAndChains(srcDataStore, snapshotChains)); files.addAll(getAllReadyVolumes(srcDataStore)); + files.addAll(getAllReadyBackupsAndChains(srcDataStore, backupChains)); files = sortFilesOnSize(files, snapshotChains); return files; } + protected List getAllReadyBackupsAndChains(DataStore srcDataStore, Map>, Long>> backupChains) { + List backups = nativeBackupJoinDao.listByImageStoreId(srcDataStore.getId()); + return getAllReadyBackupsAndChains(backupChains, backups); + } + + private List getAllReadyBackupsAndChains(Map>, Long>> backupsChains, List backups) { + Set backupIdsToMigrate = backups.stream().map(NativeBackupJoinVO::getId).collect(Collectors.toSet()); + List> backupChains; + Set backupIdsAlreadyInChain = new HashSet<>(); + List files = new LinkedList<>(); + + for (NativeBackupJoinVO backup : backups) { + long backupId = backup.getId(); + + if (backup.getStatus() == Backup.Status.BackedUp && !backupIdsAlreadyInChain.contains(backupId)) { + backupChains = createBackupChain(backup); + backupChains.forEach(list -> backupIdsAlreadyInChain.add(list.stream().map(BackupObject::getId).findFirst().get())); + BackupObject parent = backupChains.get(0).get(0); + files.add(parent); + backupsChains.put(parent, new Pair<>(backupChains, backupChains.stream().map(list -> getTotalChainSize(list.stream() + .filter(back -> backupIdsToMigrate.contains(parent.getId())).collect(Collectors.toList())) + ).reduce(Long::sum).get())); + } + } + + return (List) (List) files; + } + + private List> createBackupChain(NativeBackupJoinVO backup) { + List> chain = new LinkedList<>(); + BackupObject backupObject = BackupObject.getBackupObject(backup); + + chain.addAll(backupObject.getParents(backup.getParentId())); + chain.add(nativeBackupJoinDao.listById(backup.getId()).stream().map(BackupObject::getBackupObject).collect(Collectors.toList())); + chain.addAll(backupObject.getChildren()); + + return chain; + } + protected List sortFilesOnSize(List files, Map, Long>> snapshotChains) { Collections.sort(files, new Comparator() { @Override diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index 933b4e0c5ce6..f5a27313dd3c 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -25,9 +25,13 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Random; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -36,10 +40,22 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.MigrateBackupsBetweenSecondaryStoragesCommand; +import com.cloud.agent.api.MigrateBetweenSecondaryStoragesCommandAnswer; import com.cloud.dc.dao.DataCenterDao; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.template.TemplateManager; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; import org.apache.cloudstack.api.response.MigrationResponse; +import org.apache.cloudstack.backup.BackupDetailVO; +import org.apache.cloudstack.backup.dao.BackupDetailsDao; import org.apache.cloudstack.engine.orchestration.service.StorageOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -57,6 +73,7 @@ import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.ImageStoreService.MigrationPolicy; +import org.apache.cloudstack.storage.backup.BackupObject; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; @@ -115,6 +132,12 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra TemplateDataFactory templateDataFactory; @Inject DataCenterDao dcDao; + @Inject + AgentManager agentManager; + @Inject + HostDao hostDao; + @Inject + BackupDetailsDao backupDetailDao; ConfigKey ImageStoreImbalanceThreshold = new ConfigKey<>("Advanced", Double.class, @@ -128,6 +151,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra private final Map zoneExecutorMap = new HashMap<>(); private final Map zonePendingWorkCountMap = new HashMap<>(); + private final Map zoneKvmIncrementalExecutorMap = new ConcurrentHashMap<>(); @Override public String getConfigComponentName() { @@ -171,7 +195,9 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto DataStore srcDatastore = dataStoreManager.getDataStore(srcDataStoreId, DataStoreRole.Image); Map, Long>> snapshotChains = new HashMap<>(); Map, Long>> childTemplates = new HashMap<>(); - files = migrationHelper.getSortedValidSourcesList(srcDatastore, snapshotChains, childTemplates); + Map>, Long>> backupChains = new HashMap<>(); + files = migrationHelper.getSortedValidSourcesList(srcDatastore, snapshotChains, childTemplates, backupChains); + if (files.isEmpty()) { return new MigrationResponse(String.format("No files in Image store: %s to migrate", srcDatastore), migrationPolicy.toString(), true); @@ -227,7 +253,7 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto } if (shouldMigrate(chosenFileForMigration, srcDatastore.getId(), destDatastoreId, storageCapacities, snapshotChains, childTemplates, migrationPolicy)) { - storageCapacities = migrateAway(chosenFileForMigration, storageCapacities, snapshotChains, childTemplates, srcDatastore, destDatastoreId, futures); + storageCapacities = migrateAway(chosenFileForMigration, storageCapacities, snapshotChains, childTemplates, backupChains, srcDatastore, destDatastoreId, futures); } else { if (migrationPolicy == MigrationPolicy.BALANCE) { continue; @@ -256,7 +282,7 @@ public MigrationResponse migrateResources(Long srcImgStoreId, Long destImgStoreI List templates = templateDataStoreDao.listByStoreIdAndTemplateIds(srcImgStoreId, templateIdList); List snapshots = snapshotDataStoreDao.listByStoreAndSnapshotIds(srcImgStoreId, DataStoreRole.Image, snapshotIdList); - if (!migrationHelper.filesReadyToMigrate(srcImgStoreId, templates, snapshots, Collections.emptyList())) { + if (!migrationHelper.filesReadyToMigrate(srcImgStoreId, templates, snapshots, Collections.emptyList(), Collections.emptyList())) { throw new CloudRuntimeException("Migration failed as there are data objects which are not Ready - i.e, they may be in Migrating, creating, copying, etc. states"); } files = migrationHelper.getSortedValidSourcesList(srcDatastore, snapshotChains, childTemplates, templates, snapshots); @@ -291,7 +317,7 @@ public MigrationResponse migrateResources(Long srcImgStoreId, Long destImgStoreI } if (storageCapacityBelowThreshold(storageCapacities, destImgStoreId)) { - storageCapacities = migrateAway(chosenFileForMigration, storageCapacities, snapshotChains, childTemplates, srcDatastore, destImgStoreId, futures); + storageCapacities = migrateAway(chosenFileForMigration, storageCapacities, snapshotChains, childTemplates, null, srcDatastore, destImgStoreId, futures); } else { message = "Migration failed. Destination store doesn't have enough capacity for migration"; success = false; @@ -355,15 +381,89 @@ protected Map> migrateAway( Map, Long>> snapshotChains, Map, Long>> templateChains, + Map>, Long>> backupChains, DataStore srcDatastore, Long destDatastoreId, List> futures) { Long fileSize = migrationHelper.getFileSize(chosenFileForMigration, snapshotChains, templateChains); storageCapacities = assumeMigrate(storageCapacities, srcDatastore.getId(), destDatastoreId, fileSize); + DataStore destDataStore = dataStoreManager.getDataStore(destDatastoreId, DataStoreRole.Image); + + boolean isKvmIncrementalBackup = backupChains != null && chosenFileForMigration instanceof BackupObject && backupChains.containsKey(chosenFileForMigration); + + if (isKvmIncrementalBackup) { + MigrateKvmIncrementalBackupTask task = new MigrateKvmIncrementalBackupTask(chosenFileForMigration, backupChains, srcDatastore, destDataStore); + futures.add(submitKvmIncrementalMigration(srcDatastore.getScope().getScopeId(), task)); + logger.debug("Incremental backup migration {} submitted to incremental pool.", chosenFileForMigration.getUuid()); + } else { + createMigrateDataTask(chosenFileForMigration, snapshotChains, templateChains, srcDatastore, destDataStore, futures); + } + + return storageCapacities; + } + + private void migrateKvmIncrementalBackupChain(DataObject chosenFileForMigration, Map>, Long>> backupChains, DataStore srcDatastore, DataStore destDataStore) { + Transaction.execute((TransactionCallback) status -> { + MigrateBetweenSecondaryStoragesCommandAnswer answer = null; + + try { + List> backupChain = backupChains.get(chosenFileForMigration).first(); + MigrateBackupsBetweenSecondaryStoragesCommand migrateBetweenSecondaryStoragesCmd = new MigrateBackupsBetweenSecondaryStoragesCommand(backupChain.stream().map(list -> list.stream().map(BackupObject::getTO).collect(Collectors.toList())) + .collect(Collectors.toList()), srcDatastore.getTO(), destDataStore.getTO()); + + HostVO host = getAvailableHost(((BackupObject) chosenFileForMigration).getZoneId()); + if (host == null) { + throw new CloudRuntimeException("No hosts found to send migrate command."); + } + + migrateBetweenSecondaryStoragesCmd.setWait(StorageManager.AgentMaxDataMigrationWaitTime.valueIn(host.getClusterId())); + answer = (MigrateBetweenSecondaryStoragesCommandAnswer) agentManager.send(host.getId(), migrateBetweenSecondaryStoragesCmd); + if (answer == null || !answer.getResult()) { + logger.warn("Unable to migrate backups [{}].", backupChain); + throw new CloudRuntimeException("Unable to migrate KVM incremental backups to another secondary storage"); + } + + } catch (final OperationTimedoutException | AgentUnavailableException e) { + throw new CloudRuntimeException("Error while migrating KVM incremental backup chain. Check the logs for more information.", e); + } finally { + if (answer != null) { + updateBackupsReference(destDataStore, answer); + } + } + return answer.getResult(); + }); + } + + private void updateBackupsReference(DataStore destDataStore, MigrateBetweenSecondaryStoragesCommandAnswer answer) { + for (Pair backupIdAndUpdatedCheckpointPath : answer.getMigratedResources()) { + Long backupId = backupIdAndUpdatedCheckpointPath.first(); + BackupDetailVO backupDetail = backupDetailDao.findDetail(backupId, BackupDetailsDao.IMAGE_STORE_ID); + String destDataStoreId = String.valueOf(destDataStore.getId()); + + if (backupDetail == null) { + logger.warn("No details found for backup [{}]. Creating new entry with image store ID [{}].", backupId, destDataStoreId); + backupDetailDao.addDetail(backupId, BackupDetailsDao.IMAGE_STORE_ID, destDataStoreId, false); + continue; + } + + backupDetail.setValue(destDataStoreId); + backupDetailDao.update(backupDetail.getId(), backupDetail); + } + } - MigrateDataTask task = new MigrateDataTask(chosenFileForMigration, srcDatastore, dataStoreManager.getDataStore(destDatastoreId, DataStoreRole.Image)); - if (chosenFileForMigration instanceof SnapshotInfo ) { + private HostVO getAvailableHost(long zoneId) throws AgentUnavailableException, OperationTimedoutException { + List hosts = hostDao.listByDataCenterIdAndHypervisorType(zoneId, Hypervisor.HypervisorType.KVM); + if (CollectionUtils.isNotEmpty(hosts)) { + return hosts.get(new Random().nextInt(hosts.size())); + } + + return null; + } + + private void createMigrateDataTask(DataObject chosenFileForMigration, Map, Long>> snapshotChains, Map, Long>> templateChains, DataStore srcDatastore, DataStore destDataStore, List> futures) { + MigrateDataTask task = new MigrateDataTask(chosenFileForMigration, srcDatastore, destDataStore); + if (chosenFileForMigration instanceof SnapshotInfo) { task.setSnapshotChains(snapshotChains); } if (chosenFileForMigration instanceof TemplateInfo) { @@ -371,7 +471,6 @@ protected Map> migrateAway( } futures.add(submit(srcDatastore.getScope().getScopeId(), task)); logger.debug("Migration of {}: {} is initiated.", chosenFileForMigration.getType().name(), chosenFileForMigration.getUuid()); - return storageCapacities; } protected Future submit(Long zoneId, Callable task) { @@ -390,6 +489,13 @@ protected Future submit(Long zoneId, Callable task) { } + protected Future submitKvmIncrementalMigration(Long zoneId, Callable task) { + if (!zoneKvmIncrementalExecutorMap.containsKey(zoneId)) { + zoneKvmIncrementalExecutorMap.put(zoneId, Executors.newSingleThreadExecutor()); + } + return zoneKvmIncrementalExecutorMap.get(zoneId).submit(task); + } + protected void scaleExecutorIfNecessary(Long zoneId) { long activeSsvms = migrationHelper.activeSSVMCount(zoneId); long totalJobs = activeSsvms * numConcurrentCopyTasksPerSSVM; @@ -666,4 +772,32 @@ public TemplateApiResult call() { return result; } } + + private class MigrateKvmIncrementalBackupTask implements Callable { + private final DataObject chosenFile; + private final Map>, Long>> backupChains; + private final DataStore srcDataStore; + private final DataStore destDataStore; + + public MigrateKvmIncrementalBackupTask(DataObject chosenFile, Map>, Long>> backupChains, DataStore srcDataStore, DataStore destDataStore) { + this.chosenFile = chosenFile; + this.backupChains = backupChains; + this.srcDataStore = srcDataStore; + this.destDataStore = destDataStore; + } + + @Override + public DataObjectResult call() { + try { + migrateKvmIncrementalBackupChain(chosenFile, backupChains, srcDataStore, destDataStore); + return new DataObjectResult(chosenFile); + } catch (Exception e) { + logger.warn("Failed migrating incremental backup {} due to {}.", chosenFile.getUuid(), e); + DataObjectResult result = new DataObjectResult(chosenFile); + result.setResult(e.toString()); + return result; + } + } + } + } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index e8c75afa81c5..21247ac725e8 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -50,6 +50,7 @@ import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd; import org.apache.cloudstack.api.command.admin.volume.MigrateVolumeCmdByAdmin; import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd; +import org.apache.cloudstack.backup.NativeBackupService; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; @@ -280,6 +281,9 @@ public enum UserVmCloneType { @Inject private DataStoreProviderManager dataStoreProviderMgr; + @Inject + private NativeBackupService nativeBackupService; + private final StateMachine2 _volStateMachine; protected List _storagePoolAllocators; @@ -1190,21 +1194,27 @@ private VolumeInfo copyVolume(StoragePool rootDiskPool, VolumeInfo volumeInfo, V } @Override - public VolumeInfo createVolumeOnPrimaryStorage(VirtualMachine vm, VolumeInfo volumeInfo, HypervisorType rootDiskHyperType, StoragePool storagePool) throws NoTransitionException { + public VolumeInfo createVolumeOnPrimaryStorage(VirtualMachine vm, VolumeInfo volumeInfo, HypervisorType rootDiskHyperType, StoragePool storagePool, Long clusterId, Long podId) + throws NoTransitionException { String volumeToString = getReflectOnlySelectedFields(volumeInfo.getVolume()); VirtualMachineTemplate rootDiskTmplt = _entityMgr.findById(VirtualMachineTemplate.class, vm.getTemplateId()); DataCenter dcVO = _entityMgr.findById(DataCenter.class, vm.getDataCenterId()); - logger.trace("storage-pool {}/{} is associated with pod {}",storagePool.getName(), storagePool.getUuid(), storagePool.getPodId()); - Long podId = storagePool.getPodId() != null ? storagePool.getPodId() : vm.getPodIdToDeployIn(); + + if (storagePool != null) { + logger.trace("storage-pool {}/{} is associated with pod {}", storagePool.getName(), storagePool.getUuid(), storagePool.getPodId()); + podId = storagePool.getPodId() != null ? storagePool.getPodId() : vm.getPodIdToDeployIn(); + clusterId = storagePool.getClusterId(); + logger.trace("storage-pool {}/{} is associated with cluster {}",storagePool.getName(), storagePool.getUuid(), clusterId); + } + Pod pod = _entityMgr.findById(Pod.class, podId); ServiceOffering svo = _entityMgr.findById(ServiceOffering.class, vm.getServiceOfferingId()); DiskOffering diskVO = _entityMgr.findById(DiskOffering.class, volumeInfo.getDiskOfferingId()); - Long clusterId = storagePool.getClusterId(); - logger.trace("storage-pool {}/{} is associated with cluster {}",storagePool.getName(), storagePool.getUuid(), clusterId); + Long hostId = vm.getHostId(); - if (hostId == null && storagePool.isLocal()) { + if (hostId == null && storagePool != null && storagePool.isLocal()) { List poolHosts = storagePoolHostDao.listByPoolId(storagePool.getId()); if (poolHosts.size() > 0) { hostId = poolHosts.get(0).getHostId(); @@ -1452,6 +1462,7 @@ public Volume migrateVolume(Volume volume, StoragePool destPool) throws StorageU _snapshotDao.updateVolumeIds(vol.getId(), result.getVolume().getId()); _snapshotDataStoreDao.updateVolumeIds(vol.getId(), result.getVolume().getId()); } + nativeBackupService.updateVolumeId(vol.getId(), result.getVolume().getId()); } return result.getVolume(); } catch (InterruptedException | ExecutionException e) { @@ -1508,6 +1519,8 @@ public void migrateVolumes(VirtualMachine vm, VirtualMachineTO vmTo, Host srcHos throw new CloudRuntimeException(String.format("Failed to find the destination storage pool [%s] to migrate the volume [%s] to.", storagePoolToString, volumeToString)); } + nativeBackupService.prepareVolumeForMigration(volume); + volumeMap.put(volFactory.getVolume(volume.getId()), (DataStore)destPool); } diff --git a/engine/orchestration/src/main/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml b/engine/orchestration/src/main/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml index 17c5002c718b..87d362aef7a2 100644 --- a/engine/orchestration/src/main/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml +++ b/engine/orchestration/src/main/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml @@ -88,6 +88,7 @@ + diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java index 090b019334f4..a38cdb58ebd1 100644 --- a/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java +++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java @@ -116,6 +116,8 @@ public interface HostDao extends GenericDao, StateDao listIdsForUpEnabledByZoneAndHypervisor(Long zoneId, HypervisorType hypervisorType); + List findRoutingByClusterId(Long clusterId); + List findByClusterIdAndEncryptionSupport(Long clusterId); /** @@ -134,6 +136,8 @@ public interface HostDao extends GenericDao, StateDao listAllHostsByZoneAndHypervisorType(long zoneId, HypervisorType hypervisorType); + List listAllRoutingHostsByZoneAndHypervisorType(long zoneId, HypervisorType hypervisorType); + List listAllHostsThatHaveNoRuleTag(Host.Type type, Long clusterId, Long podId, Long dcId); HostVO findByPublicIp(String publicIp); diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java index 2d8fcca6cdb7..17ca401a2dd8 100644 --- a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java @@ -1333,6 +1333,14 @@ public List listIdsForUpEnabledByZoneAndHypervisor(Long zoneId, Hypervisor return listIdsBy(null, Status.Up, ResourceState.Enabled, hypervisorType, zoneId, null, null); } + @Override + public List findRoutingByClusterId(Long clusterId) { + SearchCriteria sc = ClusterSearch.create(); + sc.setParameters("clusterId", clusterId); + sc.setParameters("type", Type.Routing); + return listBy(sc); + } + @Override public List findByClusterIdAndEncryptionSupport(Long clusterId) { SearchBuilder hostCapabilitySearch = _detailsDao.createSearchBuilder(); @@ -1448,6 +1456,16 @@ public List listAllHostsByZoneAndHypervisorType(long zoneId, HypervisorT return listBy(sc); } + @Override + public List listAllRoutingHostsByZoneAndHypervisorType(long zoneId, HypervisorType hypervisorType) { + SearchCriteria sc = DcSearch.create(); + sc.setParameters("dc", zoneId); + sc.setParameters("hypervisorType", hypervisorType.toString()); + sc.setParameters("type", Type.Routing); + + return listBy(sc); + } + @Override public List listAllHostsThatHaveNoRuleTag(Type type, Long clusterId, Long podId, Long dcId) { SearchCriteria sc = searchBuilderFindByIdTypeClusterIdPodIdDcIdAndWithoutRuleTag.create(); diff --git a/engine/schema/src/main/java/com/cloud/storage/SnapshotVO.java b/engine/schema/src/main/java/com/cloud/storage/SnapshotVO.java index 4a504333344f..be7ba2843321 100644 --- a/engine/schema/src/main/java/com/cloud/storage/SnapshotVO.java +++ b/engine/schema/src/main/java/com/cloud/storage/SnapshotVO.java @@ -248,6 +248,10 @@ public Date getRemoved() { return removed; } + public void setRemoved(Date removed) { + this.removed = removed; + } + @Override public State getState() { return state; diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDaoImpl.java index 80e1b7d4d4be..336c13301383 100755 --- a/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/SnapshotDaoImpl.java @@ -56,10 +56,18 @@ public class SnapshotDaoImpl extends GenericDaoBase implements // TODO: we should remove these direct sqls private static final String GET_LAST_SNAPSHOT = "SELECT snapshots.id FROM snapshot_store_ref, snapshots where snapshots.id = snapshot_store_ref.snapshot_id AND snapshosts.volume_id = ? AND snapshot_store_ref.role = ? ORDER BY created DESC"; - private static final String VOLUME_ID = "volumeId"; - private static final String NOT_TYPE = "notType"; + private static final String TYPE = "type"; private static final String STATUS = "status"; + private static final String VERSION = "version"; + private static final String ACCOUNT_ID = "accountId"; + private static final String REMOVED = "removed"; + private static final String NOT_TYPE = "notType"; + private static final String ID = "id"; + private static final String INSTANCE_ID = "instanceId"; + private static final String STATE = "state"; + private static final String INSTANCE_VOLUMES = "instanceVolumes"; + private static final String INSTANCE_SNAPSHOTS = "instanceSnapshots"; private SearchBuilder snapshotIdsSearch; private SearchBuilder VolumeIdSearch; @@ -83,9 +91,9 @@ public class SnapshotDaoImpl extends GenericDaoBase implements @Override public List listByVolumeIdTypeNotDestroyed(long volumeId, Type type) { SearchCriteria sc = VolumeIdTypeNotDestroyedSearch.create(); - sc.setParameters("volumeId", volumeId); - sc.setParameters("type", type.ordinal()); - sc.setParameters("status", State.Destroyed); + sc.setParameters(VOLUME_ID, volumeId); + sc.setParameters(TYPE, type.ordinal()); + sc.setParameters(STATUS, State.Destroyed); return listBy(sc, null); } @@ -102,28 +110,28 @@ public List listByVolumeId(long volumeId) { @Override public List listByVolumeId(Filter filter, long volumeId) { SearchCriteria sc = VolumeIdSearch.create(); - sc.setParameters("volumeId", volumeId); + sc.setParameters(VOLUME_ID, volumeId); return listBy(sc, filter); } @Override public List listByVolumeIdIncludingRemoved(long volumeId) { SearchCriteria sc = VolumeIdSearch.create(); - sc.setParameters("volumeId", volumeId); + sc.setParameters(VOLUME_ID, volumeId); return listIncludingRemovedBy(sc, null); } public List listByVolumeIdType(Filter filter, long volumeId, Type type) { SearchCriteria sc = VolumeIdTypeSearch.create(); - sc.setParameters("volumeId", volumeId); - sc.setParameters("type", type.ordinal()); + sc.setParameters(VOLUME_ID, volumeId); + sc.setParameters(TYPE, type.ordinal()); return listBy(sc, filter); } public List listByVolumeIdVersion(Filter filter, long volumeId, String version) { SearchCriteria sc = VolumeIdVersionSearch.create(); - sc.setParameters("volumeId", volumeId); - sc.setParameters("version", version); + sc.setParameters(VOLUME_ID, volumeId); + sc.setParameters(VERSION, version); return listBy(sc, filter); } @@ -133,60 +141,60 @@ public SnapshotDaoImpl() { @PostConstruct protected void init() { VolumeIdSearch = createSearchBuilder(); - VolumeIdSearch.and("volumeId", VolumeIdSearch.entity().getVolumeId(), SearchCriteria.Op.EQ); + VolumeIdSearch.and(VOLUME_ID, VolumeIdSearch.entity().getVolumeId(), SearchCriteria.Op.EQ); VolumeIdSearch.done(); VolumeIdTypeSearch = createSearchBuilder(); - VolumeIdTypeSearch.and("volumeId", VolumeIdTypeSearch.entity().getVolumeId(), SearchCriteria.Op.EQ); - VolumeIdTypeSearch.and("type", VolumeIdTypeSearch.entity().getSnapshotType(), SearchCriteria.Op.EQ); + VolumeIdTypeSearch.and(VOLUME_ID, VolumeIdTypeSearch.entity().getVolumeId(), SearchCriteria.Op.EQ); + VolumeIdTypeSearch.and(TYPE, VolumeIdTypeSearch.entity().getSnapshotType(), SearchCriteria.Op.EQ); VolumeIdTypeSearch.done(); VolumeIdTypeNotDestroyedSearch = createSearchBuilder(); - VolumeIdTypeNotDestroyedSearch.and("volumeId", VolumeIdTypeNotDestroyedSearch.entity().getVolumeId(), SearchCriteria.Op.EQ); - VolumeIdTypeNotDestroyedSearch.and("type", VolumeIdTypeNotDestroyedSearch.entity().getSnapshotType(), SearchCriteria.Op.EQ); - VolumeIdTypeNotDestroyedSearch.and("status", VolumeIdTypeNotDestroyedSearch.entity().getState(), SearchCriteria.Op.NEQ); + VolumeIdTypeNotDestroyedSearch.and(VOLUME_ID, VolumeIdTypeNotDestroyedSearch.entity().getVolumeId(), SearchCriteria.Op.EQ); + VolumeIdTypeNotDestroyedSearch.and(TYPE, VolumeIdTypeNotDestroyedSearch.entity().getSnapshotType(), SearchCriteria.Op.EQ); + VolumeIdTypeNotDestroyedSearch.and(STATUS, VolumeIdTypeNotDestroyedSearch.entity().getState(), SearchCriteria.Op.NEQ); VolumeIdTypeNotDestroyedSearch.done(); VolumeIdVersionSearch = createSearchBuilder(); - VolumeIdVersionSearch.and("volumeId", VolumeIdVersionSearch.entity().getVolumeId(), SearchCriteria.Op.EQ); - VolumeIdVersionSearch.and("version", VolumeIdVersionSearch.entity().getVersion(), SearchCriteria.Op.EQ); + VolumeIdVersionSearch.and(VOLUME_ID, VolumeIdVersionSearch.entity().getVolumeId(), SearchCriteria.Op.EQ); + VolumeIdVersionSearch.and(VERSION, VolumeIdVersionSearch.entity().getVersion(), SearchCriteria.Op.EQ); VolumeIdVersionSearch.done(); AccountIdSearch = createSearchBuilder(); - AccountIdSearch.and("accountId", AccountIdSearch.entity().getAccountId(), SearchCriteria.Op.EQ); + AccountIdSearch.and(ACCOUNT_ID, AccountIdSearch.entity().getAccountId(), SearchCriteria.Op.EQ); AccountIdSearch.done(); StatusSearch = createSearchBuilder(); - StatusSearch.and("volumeId", StatusSearch.entity().getVolumeId(), SearchCriteria.Op.EQ); - StatusSearch.and("status", StatusSearch.entity().getState(), SearchCriteria.Op.IN); + StatusSearch.and(VOLUME_ID, StatusSearch.entity().getVolumeId(), SearchCriteria.Op.EQ); + StatusSearch.and(STATUS, StatusSearch.entity().getState(), SearchCriteria.Op.IN); StatusSearch.done(); notInStatusSearch = createSearchBuilder(); - notInStatusSearch.and("volumeId", notInStatusSearch.entity().getVolumeId(), SearchCriteria.Op.EQ); - notInStatusSearch.and("status", notInStatusSearch.entity().getState(), SearchCriteria.Op.NOTIN); + notInStatusSearch.and(VOLUME_ID, notInStatusSearch.entity().getVolumeId(), SearchCriteria.Op.EQ); + notInStatusSearch.and(STATUS, notInStatusSearch.entity().getState(), SearchCriteria.Op.NOTIN); notInStatusSearch.done(); CountSnapshotsByAccount = createSearchBuilder(Long.class); CountSnapshotsByAccount.select(null, Func.COUNT, null); - CountSnapshotsByAccount.and("account", CountSnapshotsByAccount.entity().getAccountId(), SearchCriteria.Op.EQ); - CountSnapshotsByAccount.and("status", CountSnapshotsByAccount.entity().getState(), SearchCriteria.Op.NIN); - CountSnapshotsByAccount.and("removed", CountSnapshotsByAccount.entity().getRemoved(), SearchCriteria.Op.NULL); + CountSnapshotsByAccount.and(ACCOUNT_ID, CountSnapshotsByAccount.entity().getAccountId(), SearchCriteria.Op.EQ); + CountSnapshotsByAccount.and(STATUS, CountSnapshotsByAccount.entity().getState(), SearchCriteria.Op.NIN); + CountSnapshotsByAccount.and(REMOVED, CountSnapshotsByAccount.entity().getRemoved(), SearchCriteria.Op.NULL); CountSnapshotsByAccount.done(); InstanceIdSearch = createSearchBuilder(); - InstanceIdSearch.and("status", InstanceIdSearch.entity().getState(), SearchCriteria.Op.IN); + InstanceIdSearch.and(STATUS, InstanceIdSearch.entity().getState(), SearchCriteria.Op.IN); snapshotIdsSearch = createSearchBuilder(); - snapshotIdsSearch.and("id", snapshotIdsSearch.entity().getId(), SearchCriteria.Op.IN); + snapshotIdsSearch.and(ID, snapshotIdsSearch.entity().getId(), SearchCriteria.Op.IN); SearchBuilder instanceSearch = _instanceDao.createSearchBuilder(); - instanceSearch.and("instanceId", instanceSearch.entity().getId(), SearchCriteria.Op.EQ); + instanceSearch.and(INSTANCE_ID, instanceSearch.entity().getId(), SearchCriteria.Op.EQ); SearchBuilder volumeSearch = _volumeDao.createSearchBuilder(); - volumeSearch.and("state", volumeSearch.entity().getState(), SearchCriteria.Op.EQ); - volumeSearch.join("instanceVolumes", instanceSearch, instanceSearch.entity().getId(), volumeSearch.entity().getInstanceId(), JoinType.INNER); + volumeSearch.and(STATE, volumeSearch.entity().getState(), SearchCriteria.Op.EQ); + volumeSearch.join(INSTANCE_VOLUMES, instanceSearch, instanceSearch.entity().getId(), volumeSearch.entity().getInstanceId(), JoinType.INNER); - InstanceIdSearch.join("instanceSnapshots", volumeSearch, volumeSearch.entity().getId(), InstanceIdSearch.entity().getVolumeId(), JoinType.INNER); + InstanceIdSearch.join(INSTANCE_SNAPSHOTS, volumeSearch, volumeSearch.entity().getId(), InstanceIdSearch.entity().getVolumeId(), JoinType.INNER); InstanceIdSearch.done(); volumeIdAndTypeNotInSearch = createSearchBuilder(); @@ -218,8 +226,8 @@ public long getLastSnapshot(long volumeId, DataStoreRole role) { @Override public Long countSnapshotsForAccount(long accountId) { SearchCriteria sc = CountSnapshotsByAccount.create(); - sc.setParameters("account", accountId); - sc.setParameters("status", State.Error, State.Destroyed); + sc.setParameters(ACCOUNT_ID, accountId); + sc.setParameters(STATUS, State.Error, State.Destroyed); return customSearch(sc, null).get(0); } @@ -228,19 +236,19 @@ public List listByInstanceId(long instanceId, Snapshot.State... stat SearchCriteria sc = InstanceIdSearch.create(); if (status != null && status.length != 0) { - sc.setParameters("status", (Object[])status); + sc.setParameters(STATUS, (Object[])status); } - sc.setJoinParameters("instanceSnapshots", "state", Volume.State.Ready); - sc.setJoinParameters("instanceVolumes", "instanceId", instanceId); + sc.setJoinParameters(INSTANCE_SNAPSHOTS, STATE, Volume.State.Ready); + sc.setJoinParameters(INSTANCE_VOLUMES, INSTANCE_ID, instanceId); return listBy(sc, null); } @Override public List listByStatus(long volumeId, Snapshot.State... status) { SearchCriteria sc = StatusSearch.create(); - sc.setParameters("volumeId", volumeId); - sc.setParameters("status", (Object[])status); + sc.setParameters(VOLUME_ID, volumeId); + sc.setParameters(STATUS, (Object[])status); return listBy(sc, null); } @@ -261,7 +269,7 @@ public boolean remove(Long id) { @Override public List listAllByStatus(Snapshot.State... status) { SearchCriteria sc = StatusSearch.create(); - sc.setParameters("status", (Object[])status); + sc.setParameters(STATUS, (Object[])status); return listBy(sc, null); } @@ -275,7 +283,7 @@ public List listAllByStatusIncludingRemoved(Snapshot.State... status @Override public List listByIds(Object... ids) { SearchCriteria sc = snapshotIdsSearch.create(); - sc.setParameters("id", ids); + sc.setParameters(ID, ids); return listBy(sc, null); } @@ -293,7 +301,7 @@ public boolean updateState(State currentState, Event event, State nextState, Sna @Override public void updateVolumeIds(long oldVolId, long newVolId) { SearchCriteria sc = VolumeIdSearch.create(); - sc.setParameters("volumeId", oldVolId); + sc.setParameters(VOLUME_ID, oldVolId); SnapshotVO snapshot = createForUpdate(); snapshot.setVolumeId(newVolId); UpdateBuilder ub = getUpdateBuilder(snapshot); @@ -303,8 +311,8 @@ public void updateVolumeIds(long oldVolId, long newVolId) { @Override public List listByStatusNotIn(long volumeId, Snapshot.State... status) { SearchCriteria sc = this.notInStatusSearch.create(); - sc.setParameters("volumeId", volumeId); - sc.setParameters("status", (Object[]) status); + sc.setParameters(VOLUME_ID, volumeId); + sc.setParameters(STATUS, (Object[]) status); return listBy(sc, null); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupCompressionJobType.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupCompressionJobType.java new file mode 100644 index 000000000000..2101e27f1c9b --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupCompressionJobType.java @@ -0,0 +1,21 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +public enum BackupCompressionJobType { + StartCompression, FinalizeCompression +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupCompressionJobVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupCompressionJobVO.java new file mode 100644 index 000000000000..39e1d62acf5b --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupCompressionJobVO.java @@ -0,0 +1,166 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.api.InternalIdentity; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; +import javax.persistence.Temporal; +import javax.persistence.TemporalType; +import java.util.Date; + +@Entity +@Table(name = "backup_compression_job") +public class BackupCompressionJobVO implements InternalIdentity, Comparable { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "backup_id") + private long backupId; + + @Column(name = "instance_id") + private long instanceId; + + @Column(name = "host_id") + private Long hostId; + + @Column(name = "zone_id") + private long zoneId; + + @Column(name = "attempts") + private int attempts; + + @Column (name = "type") + private BackupCompressionJobType type; + + @Column(name = GenericDao.CREATED_COLUMN) + private Date created; + + @Column(name = "scheduled_start_time") + @Temporal(value = TemporalType.TIMESTAMP) + private Date scheduledStartTime; + + @Column(name = "start_time") + @Temporal(value = TemporalType.TIMESTAMP) + private Date startTime; + + @Column(name = GenericDao.REMOVED_COLUMN) + @Temporal(value = TemporalType.TIMESTAMP) + private Date removed; + + public BackupCompressionJobVO() { + } + + public BackupCompressionJobVO(long backupId, long zoneId, long instanceId, BackupCompressionJobType type) { + this.created = new Date(); + this.backupId = backupId; + this.zoneId = zoneId; + this.instanceId = instanceId; + this.type = type; + this.scheduledStartTime = this.created; + } + + @Override + public long getId() { + return id; + } + + public long getBackupId() { + return backupId; + } + + public long getInstanceId() { + return instanceId; + } + + public Long getHostId() { + return hostId; + } + + public void setHostId(Long hostId) { + this.hostId = hostId; + } + + public long getZoneId() { + return zoneId; + } + + public void setZoneId(Long zoneId) { + this.zoneId = zoneId; + } + + public int getAttempts() { + return attempts; + } + + public void setAttempts(int attempts) { + this.attempts = attempts; + } + + public BackupCompressionJobType getType() { + return type; + } + + public Date getCreated() { + return created; + } + + public Date getScheduledStartTime() { + return scheduledStartTime; + } + + public void setScheduledStartTime(Date scheduledStartTime) { + this.scheduledStartTime = scheduledStartTime; + } + + public Date getStartTime() { + return startTime; + } + + public void setStartTime(Date startTime) { + this.startTime = startTime; + } + + public Date getRemoved() { + return removed; + } + + public void setRemoved(Date removed) { + this.removed = removed; + } + + @Override + public int compareTo(BackupCompressionJobVO that) { + return this.created.compareTo(that.created); + } + + @Override + public String toString() { + return ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "backupId", "zoneId", "hostId", "created", "scheduledStartTime", "startTime", "attempts", + "type"); + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java index 1ee2cff78b65..aa02bb077163 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupScheduleVO.java @@ -74,10 +74,14 @@ public class BackupScheduleVO implements BackupSchedule { @Column(name = "domain_id") Long domainId; + @Column(name = "isolated") + private boolean isolated; + public BackupScheduleVO() { } - public BackupScheduleVO(Long vmId, DateUtil.IntervalType scheduleType, String schedule, String timezone, Date scheduledTimestamp, int maxBackups, Boolean quiesceVM, Long accountId, Long domainId) { + public BackupScheduleVO(Long vmId, DateUtil.IntervalType scheduleType, String schedule, String timezone, Date scheduledTimestamp, int maxBackups, Boolean quiesceVM, + Long accountId, Long domainId, boolean isolated) { this.vmId = vmId; this.scheduleType = (short) scheduleType.ordinal(); this.schedule = schedule; @@ -87,6 +91,7 @@ public BackupScheduleVO(Long vmId, DateUtil.IntervalType scheduleType, String sc this.quiesceVM = quiesceVM; this.accountId = accountId; this.domainId = domainId; + this.isolated = isolated; } @Override @@ -197,4 +202,13 @@ public void setAccountId(Long accountId) { public void setDomainId(Long domainId) { this.domainId = domainId; } + + @Override + public boolean isIsolated() { + return isolated; + } + + public void setIsolated(boolean isolated) { + this.isolated = isolated; + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java index 0f8a10fb7be6..f8f3d05ebf7a 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupVO.java @@ -19,7 +19,6 @@ import com.cloud.utils.db.GenericDao; import com.google.gson.Gson; - import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; import org.apache.commons.lang3.StringUtils; @@ -66,7 +65,7 @@ public class BackupVO implements Backup { private String externalId; @Column(name = "type") - private String backupType; + private String type; @Column(name = "date") @Temporal(value = TemporalType.DATE) @@ -81,6 +80,9 @@ public class BackupVO implements Backup { @Column(name = "protected_size") private Long protectedSize; + @Column(name = "uncompressed_size") + private Long uncompressedSize; + @Enumerated(value = EnumType.STRING) @Column(name = "status") private Backup.Status status; @@ -103,11 +105,30 @@ public class BackupVO implements Backup { @Column(name = "backup_schedule_id") private Long backupScheduleId; + @Column(name = "compression_status") + private CompressionStatus compressionStatus; + @Transient Map details; public BackupVO() { this.uuid = UUID.randomUUID().toString(); + this.compressionStatus = CompressionStatus.Uncompressed; + } + + public BackupVO(String name, long vmId, long backupOfferingId, long accountId, long domainId, long zoneId, long virtualSize, Status status, Long backupScheduleId) { + this.name = name; + this.vmId = vmId; + this.backupOfferingId = backupOfferingId; + this.accountId = accountId; + this.domainId = domainId; + this.zoneId = zoneId; + this.protectedSize = virtualSize; + this.status = status; + this.setType("FULL"); + this.uuid = UUID.randomUUID().toString(); + this.backupScheduleId = backupScheduleId; + this.compressionStatus = CompressionStatus.Uncompressed; } @Override @@ -144,12 +165,13 @@ public void setExternalId(String externalId) { this.externalId = externalId; } + @Override public String getType() { - return backupType; + return type; } public void setType(String type) { - this.backupType = type; + this.type = type; } @Override @@ -288,4 +310,20 @@ public Long getBackupScheduleId() { public void setBackupScheduleId(Long backupScheduleId) { this.backupScheduleId = backupScheduleId; } + + public CompressionStatus getCompressionStatus() { + return compressionStatus; + } + + public void setCompressionStatus(CompressionStatus compressionStatus) { + this.compressionStatus = compressionStatus; + } + + public Long getUncompressedSize() { + return uncompressedSize; + } + + public void setUncompressedSize(Long uncompressedSize) { + this.uncompressedSize = uncompressedSize; + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/NativeBackupDataStoreVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/NativeBackupDataStoreVO.java new file mode 100644 index 000000000000..abff22aa8cbb --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/NativeBackupDataStoreVO.java @@ -0,0 +1,95 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. +package org.apache.cloudstack.backup; + +import org.apache.cloudstack.api.InternalIdentity; +import org.apache.commons.lang3.builder.ReflectionToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; + +import javax.persistence.Column; +import javax.persistence.Entity; + +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + + +@Entity +@Table(name = "native_backup_store_ref") +public class NativeBackupDataStoreVO implements InternalIdentity { + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "backup_id") + private long backupId; + + @Column(name = "volume_id") + private long volumeId; + + @Column (name = "device_id") + private long deviceId; + + @Column(name = "path") + private String backupPath; + + public NativeBackupDataStoreVO() { + } + + public NativeBackupDataStoreVO(long backupId, long volumeId, long deviceId, String backupPath) { + this.backupId = backupId; + this.volumeId = volumeId; + this.deviceId = deviceId; + this.backupPath = backupPath; + } + + @Override + public long getId() { + return id; + } + + public long getBackupId() { + return backupId; + } + + public long getVolumeId() { + return volumeId; + } + + public long getDeviceId() { + return deviceId; + } + + public String getBackupPath() { + return backupPath; + } + + public void setVolumeId(long volumeId) { + this.volumeId = volumeId; + } + + public void setBackupPath(String backupPath) { + this.backupPath = backupPath; + } + + @Override + public String toString() { + return ReflectionToStringBuilder.toString(this, ToStringStyle.JSON_STYLE); + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/NativeBackupJoinVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/NativeBackupJoinVO.java new file mode 100644 index 000000000000..1a0bef23a8b7 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/NativeBackupJoinVO.java @@ -0,0 +1,190 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.backup; + +import com.google.gson.Gson; +import org.apache.commons.lang3.BooleanUtils; +import org.apache.commons.lang3.builder.ReflectionToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; +import org.apache.commons.lang3.StringUtils; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.EnumType; +import javax.persistence.Enumerated; +import javax.persistence.Id; +import javax.persistence.Table; +import javax.persistence.Temporal; +import javax.persistence.TemporalType; +import java.util.Arrays; +import java.util.Collections; +import java.util.Date; +import java.util.List; + +@Entity +@Table(name = "native_backup_view") +public class NativeBackupJoinVO { + + @Id + @Column(name="id") + private long id; + + @Column(name = "uuid") + private String uuid; + + @Column(name = "vm_id") + private long vmId; + + @Column(name = "backed_volumes", length = 65535) + private String backedUpVolumes; + + @Column(name = "backup_offering_id") + private long backupOfferingId; + + @Column(name = "image_store_id") + private long imageStoreId; + + @Column(name = "parent_id") + private long parentId; + + @Column(name = "type") + private String type; + + @Column(name = "date") + @Temporal(value = TemporalType.DATE) + private Date date; + + @Enumerated(value = EnumType.STRING) + @Column(name = "status") + private Backup.Status status; + + @Enumerated(value = EnumType.STRING) + @Column(name = "compression_status") + private Backup.CompressionStatus compressionStatus; + + @Column(name = "end_of_chain") + private Boolean endOfChain; + + @Column(name = "current") + private Boolean current; + + @Column(name = "image_store_path") + private String imageStorePath; + + @Column(name = "zone_id") + private long zoneId; + + @Column(name = "size") + private long size; + + @Column(name = "protected_size") + private long protectedSize; + + @Column(name = "volume_id") + private long volumeId; + + @Column(name = "isolated") + private Boolean isolated; + + public NativeBackupJoinVO() { + } + + public long getId() { + return id; + } + + public String getUuid() { + return uuid; + } + + public long getVmId() { + return vmId; + } + + public List getBackedUpVolumes() { + if (StringUtils.isEmpty(this.backedUpVolumes)) { + return Collections.emptyList(); + } + return Arrays.asList(new Gson().fromJson(this.backedUpVolumes, Backup.VolumeInfo[].class)); + } + + public long getBackupOfferingId() { + return backupOfferingId; + } + + public long getImageStoreId() { + return imageStoreId; + } + + public long getParentId() { + return parentId; + } + + public String getType() { + return type; + } + + public Date getDate() { + return date; + } + + public Backup.Status getStatus() { + return status; + } + + public Boolean getEndOfChain() { + return BooleanUtils.isTrue(endOfChain); + } + + public Boolean getCurrent() { + return BooleanUtils.isTrue(current); + } + + public String getImageStorePath() { + return imageStorePath; + } + + public long getZoneId() { + return zoneId; + } + + public long getSize() { + return size; + } + + public long getProtectedSize() { + return protectedSize; + } + + public long getVolumeId() { + return volumeId; + } + + public Boolean getIsolated() { + return BooleanUtils.isTrue(isolated); + } + + public Backup.CompressionStatus getCompressionStatus() { + return compressionStatus; + } + + @Override + public String toString() { + return ReflectionToStringBuilder.toString(this, ToStringStyle.JSON_STYLE); + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/NativeBackupOfferingVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/NativeBackupOfferingVO.java new file mode 100644 index 000000000000..91f26c1b0f8f --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/NativeBackupOfferingVO.java @@ -0,0 +1,175 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import org.apache.commons.lang3.ObjectUtils; +import org.apache.commons.lang3.builder.ReflectionToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; +import javax.persistence.Temporal; +import javax.persistence.TemporalType; +import java.util.Date; +import java.util.UUID; + +@Entity +@Table(name = "native_backup_offering") +public class NativeBackupOfferingVO implements NativeBackupOffering { + + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "uuid") + private String uuid; + + @Column(name = "name") + private String name; + + @Column(name = "compress") + private boolean compress; + + @Column(name = "compression_library") + private Backup.CompressionLibrary compressionLibrary; + + @Column (name = "validate") + private boolean validate; + + @Column(name = "allow_quick_restore") + private boolean allowQuickRestore; + + @Column(name = "allow_extract_file") + private boolean allowExtractFile; + + @Column(name = "backup_chain_size") + private Integer backupChainSize; + + @Column(name = "created") + @Temporal(value = TemporalType.TIMESTAMP) + private Date created; + + @Column(name = "removed") + @Temporal(value = TemporalType.TIMESTAMP) + private Date removed; + + public NativeBackupOfferingVO() { + this.uuid = UUID.randomUUID().toString(); + this.created = new Date(); + this.compressionLibrary = Backup.CompressionLibrary.zstd; + } + + public NativeBackupOfferingVO(String name, boolean compress, boolean validate, boolean allowQuickRestore, boolean allowExtractFile, Integer backupChainSize, Backup.CompressionLibrary compressionLibrary) { + this(); + this.name = name; + this.compress = compress; + this.validate = validate; + this.allowQuickRestore = allowQuickRestore; + this.allowExtractFile = allowExtractFile; + this.backupChainSize = backupChainSize; + this.compressionLibrary = ObjectUtils.defaultIfNull(compressionLibrary, Backup.CompressionLibrary.zstd); + } + + @Override + public long getId() { + return id; + } + + @Override + public String getUuid() { + return uuid; + } + + @Override + public String getExternalId() { + return uuid; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getDescription() { + return name; + } + + @Override + public long getZoneId() { + return -1; + } + + @Override + public boolean isUserDrivenBackupAllowed() { + return true; + } + + @Override + public String getProvider() { + return "knib"; + } + + @Override + public boolean isCompress() { + return compress; + } + + public Backup.CompressionLibrary getCompressionLibrary() { + return compressionLibrary; + } + + @Override + public boolean isValidate() { + return validate; + } + + @Override + public boolean isAllowQuickRestore() { + return allowQuickRestore; + } + + @Override + public boolean isAllowExtractFile() { + return allowExtractFile; + } + + @Override + public Integer getBackupChainSize() { + return backupChainSize; + } + + @Override + public Date getCreated() { + return created; + } + + @Override + public Date getRemoved() { + return removed; + } + + @Override + public String toString() { + return ReflectionToStringBuilder.toString(this, ToStringStyle.JSON_STYLE); + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/NativeBackupStoragePoolVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/NativeBackupStoragePoolVO.java new file mode 100644 index 000000000000..625fd9afbe43 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/NativeBackupStoragePoolVO.java @@ -0,0 +1,101 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. +package org.apache.cloudstack.backup; + +import org.apache.cloudstack.api.InternalIdentity; +import org.apache.commons.lang3.builder.ReflectionToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +@Entity +@Table(name = "native_backup_pool_ref") +public class NativeBackupStoragePoolVO implements InternalIdentity { + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "backup_id") + private long backupId; + + @Column(name = "storage_pool_id") + private long storagePoolId; + + @Column(name = "volume_id") + private long volumeId; + + @Column(name = "backup_delta_path") + private String backupDeltaPath; + + @Column(name = "backup_parent_path") + private String backupDeltaParentPath; + + public NativeBackupStoragePoolVO() { + } + + public NativeBackupStoragePoolVO(long backupId, long storagePoolId, long volumeId, String backupDeltaPath, String backupDeltaParentPath) { + this.backupId = backupId; + this.storagePoolId = storagePoolId; + this.volumeId = volumeId; + this.backupDeltaPath = backupDeltaPath; + this.backupDeltaParentPath = backupDeltaParentPath; + } + + @Override + public long getId() { + return id; + } + + public long getBackupId() { + return backupId; + } + + public long getStoragePoolId() { + return storagePoolId; + } + + public long getVolumeId() { + return volumeId; + } + + public String getBackupDeltaPath() { + return backupDeltaPath; + } + + public String getBackupDeltaParentPath() { + return backupDeltaParentPath; + } + + public void setBackupDeltaPath(String backupDeltaPath) { + this.backupDeltaPath = backupDeltaPath; + } + + public void setBackupDeltaParentPath(String backupDeltaParentPath) { + this.backupDeltaParentPath = backupDeltaParentPath; + } + + @Override + public String toString() { + return ReflectionToStringBuilder.toString(this, ToStringStyle.JSON_STYLE); + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupCompressionJobDao.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupCompressionJobDao.java new file mode 100644 index 000000000000..d289ce64957f --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupCompressionJobDao.java @@ -0,0 +1,35 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. +package org.apache.cloudstack.backup.dao; + +import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.backup.BackupCompressionJobType; +import org.apache.cloudstack.backup.BackupCompressionJobVO; + +import java.util.Date; +import java.util.List; + +public interface BackupCompressionJobDao extends GenericDao { + + List listExecutingJobsByZoneIdAndJobType(long zoneId, BackupCompressionJobType type); + + List listWaitingJobsAndScheduledToBeforeNow(long zoneId); + + List listExecutingJobsByHostsAndStartTimeBefore(Object[] hostIds, Date date); + + void update(BackupCompressionJobVO job); +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupCompressionJobDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupCompressionJobDaoImpl.java new file mode 100644 index 000000000000..fd4e4bdabf32 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupCompressionJobDaoImpl.java @@ -0,0 +1,99 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. +package org.apache.cloudstack.backup.dao; + +import com.cloud.utils.DateUtil; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import org.apache.cloudstack.backup.BackupCompressionJobType; +import org.apache.cloudstack.backup.BackupCompressionJobVO; + +import javax.annotation.PostConstruct; +import java.util.Date; +import java.util.List; + +public class BackupCompressionJobDaoImpl extends GenericDaoBase implements BackupCompressionJobDao { + private SearchBuilder executingBeforeAndHostInSearch; + private SearchBuilder scheduledAndNotStartedSearch; + + private SearchBuilder executingAndZoneIdAndTypeSearch; + + private static final String HOST_ID = "host_id"; + private static final String TYPE = "type"; + private static final String START_TIME = "start_time"; + private static final String SCHEDULED = "scheduled"; + private static final String ZONE_ID = "zone_id"; + + @PostConstruct + protected void init() { + executingBeforeAndHostInSearch = createSearchBuilder(); + executingBeforeAndHostInSearch.and(HOST_ID, executingBeforeAndHostInSearch.entity().getHostId(), SearchCriteria.Op.IN); + executingBeforeAndHostInSearch.and(START_TIME, executingBeforeAndHostInSearch.entity().getStartTime(), SearchCriteria.Op.LTEQ); + executingBeforeAndHostInSearch.done(); + + scheduledAndNotStartedSearch = createSearchBuilder(); + scheduledAndNotStartedSearch.and(SCHEDULED, scheduledAndNotStartedSearch.entity().getScheduledStartTime(), SearchCriteria.Op.LTEQ); + scheduledAndNotStartedSearch.and(START_TIME, scheduledAndNotStartedSearch.entity().getStartTime(), SearchCriteria.Op.NULL); + scheduledAndNotStartedSearch.and(ZONE_ID, scheduledAndNotStartedSearch.entity().getZoneId(), SearchCriteria.Op.EQ); + scheduledAndNotStartedSearch.done(); + + executingAndZoneIdAndTypeSearch = createSearchBuilder(); + executingAndZoneIdAndTypeSearch.and(START_TIME, executingAndZoneIdAndTypeSearch.entity().getStartTime(), SearchCriteria.Op.NNULL); + executingAndZoneIdAndTypeSearch.and(ZONE_ID, executingAndZoneIdAndTypeSearch.entity().getZoneId(), SearchCriteria.Op.EQ); + executingAndZoneIdAndTypeSearch.and(TYPE, executingAndZoneIdAndTypeSearch.entity().getType(), SearchCriteria.Op.EQ); + executingAndZoneIdAndTypeSearch.done(); + } + + @Override + public List listExecutingJobsByZoneIdAndJobType(long zoneId, BackupCompressionJobType type) { + SearchCriteria sc = executingAndZoneIdAndTypeSearch.create(); + sc.setParameters(TYPE, type); + sc.setParameters(ZONE_ID, zoneId); + + return listBy(sc); + } + + @Override + public List listWaitingJobsAndScheduledToBeforeNow(long zoneId) { + SearchCriteria sc = scheduledAndNotStartedSearch.create(); + + sc.setParameters(SCHEDULED, DateUtil.now()); + sc.setParameters(ZONE_ID, zoneId); + + Filter filter = new Filter(BackupCompressionJobVO.class, "scheduledStartTime", true); + return listBy(sc, filter); + } + + @Override + public List listExecutingJobsByHostsAndStartTimeBefore(Object[] hostIds, Date date) { + SearchCriteria sc = executingBeforeAndHostInSearch.create(); + sc.setParameters(HOST_ID, hostIds); + sc.setParameters(START_TIME, date); + + return listBy(sc); + } + + + @Override + @DB + public void update(BackupCompressionJobVO job) { + super.update(job.getId(), job); + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDaoImpl.java index fd29da72c718..756eef66c735 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDaoImpl.java @@ -90,6 +90,8 @@ protected void init() { backupSearch.and("external_id", backupSearch.entity().getExternalId(), SearchCriteria.Op.EQ); backupSearch.and("backup_offering_id", backupSearch.entity().getBackupOfferingId(), SearchCriteria.Op.EQ); backupSearch.and("zone_id", backupSearch.entity().getZoneId(), SearchCriteria.Op.EQ); + backupSearch.and("status", backupSearch.entity().getStatus(), SearchCriteria.Op.IN); + backupSearch.and("created_before", backupSearch.entity().getDate(), SearchCriteria.Op.LT); backupSearch.done(); backupVmSearchInZone = createSearchBuilder(Long.class); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDetailsDao.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDetailsDao.java index 664650074bce..a94baad4278d 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDetailsDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDetailsDao.java @@ -23,4 +23,15 @@ public interface BackupDetailsDao extends GenericDao, ResourceDetailsDao { + String END_OF_CHAIN = "end_of_chain"; + + String CURRENT = "current"; + + String IMAGE_STORE_ID = "image_store_id"; + + String PARENT_ID = "parent_id"; + + String ISOLATED = "isolated"; + + void removeDetailsExcept(long backupId, String exception); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDetailsDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDetailsDaoImpl.java index 08c7192af909..5f257c23892c 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDetailsDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupDetailsDaoImpl.java @@ -17,13 +17,39 @@ package org.apache.cloudstack.backup.dao; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; import org.apache.cloudstack.backup.BackupDetailVO; import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; import org.springframework.stereotype.Component; +import javax.annotation.PostConstruct; + @Component public class BackupDetailsDaoImpl extends ResourceDetailsDaoBase implements BackupDetailsDao { + private SearchBuilder backupDetailSearch; + + private static final String BACKUP_ID = "backup_id"; + + private static final String KEY = "key"; + + @PostConstruct + protected void init() { + backupDetailSearch = createSearchBuilder(); + backupDetailSearch.and(BACKUP_ID, backupDetailSearch.entity().getResourceId(), SearchCriteria.Op.EQ); + backupDetailSearch.and(KEY, backupDetailSearch.entity().getName(), SearchCriteria.Op.NEQ); + backupDetailSearch.done(); + } + + @Override + public void removeDetailsExcept(long backupId, String exception) { + SearchCriteria sc = backupDetailSearch.create(); + sc.setParameters(BACKUP_ID, backupId); + sc.setParameters(KEY, exception); + super.expunge(sc); + } + @Override public void addDetail(long resourceId, String key, String value, boolean display) { super.addDetail(new BackupDetailVO(resourceId, key, value, display)); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupScheduleDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupScheduleDaoImpl.java index d9cf7b63680b..f43fc3308ca4 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupScheduleDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupScheduleDaoImpl.java @@ -99,6 +99,7 @@ public BackupScheduleResponse newBackupScheduleResponse(BackupSchedule schedule) response.setSchedule(schedule.getSchedule()); response.setTimezone(schedule.getTimezone()); response.setMaxBackups(schedule.getMaxBackups()); + response.setIsolated(schedule.isIsolated()); if (schedule.getQuiesceVM() != null) { response.setQuiesceVM(schedule.getQuiesceVM()); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupDataStoreDao.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupDataStoreDao.java new file mode 100644 index 000000000000..3948b273ab49 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupDataStoreDao.java @@ -0,0 +1,33 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. +package org.apache.cloudstack.backup.dao; + +import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.backup.NativeBackupDataStoreVO; + +import java.util.List; + +public interface NativeBackupDataStoreDao extends GenericDao { + + List listByBackupId(long backupId); + + NativeBackupDataStoreVO findByBackupIdAndVolumeId(long backupId, long volumeId); + + void expungeByBackupId(long backupId); + + void updateVolumeId(long oldVolumeId, long newVolumeId); +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupDataStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupDataStoreDaoImpl.java new file mode 100644 index 000000000000..9f8fd9a0befa --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupDataStoreDaoImpl.java @@ -0,0 +1,74 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. +package org.apache.cloudstack.backup.dao; + +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.UpdateBuilder; +import org.apache.cloudstack.backup.NativeBackupDataStoreVO; + +import javax.annotation.PostConstruct; +import java.util.List; + +public class NativeBackupDataStoreDaoImpl extends GenericDaoBase implements NativeBackupDataStoreDao { + + private SearchBuilder backupSearch; + + private static final String BACKUP_ID = "backup_id"; + private static final String VOLUME_ID = "volume_id"; + + @PostConstruct + protected void init() { + backupSearch = createSearchBuilder(); + backupSearch.and(BACKUP_ID, backupSearch.entity().getBackupId(), SearchCriteria.Op.EQ); + backupSearch.and(VOLUME_ID, backupSearch.entity().getVolumeId(), SearchCriteria.Op.EQ); + backupSearch.done(); + } + + @Override + public List listByBackupId(long backupId) { + SearchCriteria sc = backupSearch.create(); + sc.setParameters(BACKUP_ID, backupId); + return listBy(sc); + } + + @Override + public NativeBackupDataStoreVO findByBackupIdAndVolumeId(long backupId, long volumeId) { + SearchCriteria sc = backupSearch.create(); + sc.setParameters(BACKUP_ID, backupId); + sc.setParameters(VOLUME_ID, volumeId); + return findOneBy(sc); + } + + @Override + public void expungeByBackupId(long backupId) { + SearchCriteria sc = backupSearch.create(); + sc.setParameters(BACKUP_ID, backupId); + expunge(sc); + } + + @Override + public void updateVolumeId(long oldVolumeId, long newVolumeId) { + SearchCriteria sc = backupSearch.create(); + sc.setParameters(VOLUME_ID, oldVolumeId); + NativeBackupDataStoreVO delta = createForUpdate(); + delta.setVolumeId(newVolumeId); + UpdateBuilder ub = getUpdateBuilder(delta); + update(ub, sc, null); + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupJoinDao.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupJoinDao.java new file mode 100644 index 000000000000..bf7395ee5aad --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupJoinDao.java @@ -0,0 +1,40 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. +package org.apache.cloudstack.backup.dao; + +import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.backup.NativeBackupJoinVO; + +import java.util.Date; +import java.util.List; + +public interface NativeBackupJoinDao extends GenericDao { + + List listByBackedUpAndVmIdAndDateBeforeOrAfterOrderBy(long vmId, Date date, boolean before, boolean ascending); + + List listIncludingRemovedByVmIdAndBeforeDateOrderByCreatedDesc(long vmId, Date beforeDate); + + NativeBackupJoinVO findCurrent(long vmId); + + NativeBackupJoinVO findByParentId(long parentId); + + List listByImageStoreId(long imageStoreId); + + List listById(long id); + + List listByParentId(long parentId); +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupJoinDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupJoinDaoImpl.java new file mode 100644 index 000000000000..6165fb21f055 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupJoinDaoImpl.java @@ -0,0 +1,129 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup.dao; + +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import org.apache.cloudstack.backup.Backup; +import org.apache.cloudstack.backup.NativeBackupJoinVO; + +import javax.annotation.PostConstruct; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; + +public class NativeBackupJoinDaoImpl extends GenericDaoBase implements NativeBackupJoinDao { + + private static final String ID = "id"; + private static final String VM_ID = "vm_id"; + private static final String STATUS = "status"; + private static final String CREATED_BEFORE = "created_before"; + private static final String CREATED_AFTER = "created_after"; + private static final String CURRENT = "current"; + private static final String ISOLATED = "isolated"; + private static final String PARENT_ID = "parent_id"; + private static final String IMAGE_STORE_ID = "image_store_id"; + private SearchBuilder backupSearch; + private SearchBuilder allBackupsSearch; + + @PostConstruct + protected void init() { + backupSearch = createSearchBuilder(); + backupSearch.and(VM_ID, backupSearch.entity().getVmId(), SearchCriteria.Op.EQ); + backupSearch.and(STATUS, backupSearch.entity().getStatus(), SearchCriteria.Op.IN); + backupSearch.and(CREATED_BEFORE, backupSearch.entity().getDate(), SearchCriteria.Op.LT); + backupSearch.and(CREATED_AFTER, backupSearch.entity().getDate(), SearchCriteria.Op.GT); + backupSearch.and(CURRENT, backupSearch.entity().getCurrent(), SearchCriteria.Op.EQ); + backupSearch.and(PARENT_ID, backupSearch.entity().getParentId(), SearchCriteria.Op.EQ); + backupSearch.and(ISOLATED, backupSearch.entity().getIsolated(), SearchCriteria.Op.EQ); + backupSearch.groupBy(backupSearch.entity().getId()); + backupSearch.done(); + + allBackupsSearch = createSearchBuilder(); + allBackupsSearch.and(ID, allBackupsSearch.entity().getId(), SearchCriteria.Op.EQ); + allBackupsSearch.and(STATUS, allBackupsSearch.entity().getStatus(), SearchCriteria.Op.IN); + allBackupsSearch.and(PARENT_ID, allBackupsSearch.entity().getParentId(), SearchCriteria.Op.EQ); + allBackupsSearch.and(IMAGE_STORE_ID, allBackupsSearch.entity().getImageStoreId(), SearchCriteria.Op.EQ); + allBackupsSearch.done(); + } + @Override + public List listByBackedUpAndVmIdAndDateBeforeOrAfterOrderBy(long vmId, Date date, boolean before, boolean ascending) { + SearchCriteria sc = backupSearch.create(); + sc.setParameters(VM_ID, vmId); + sc.setParameters(STATUS, Backup.Status.BackedUp); + if (before) { + sc.setParameters(CREATED_BEFORE, date); + } else { + sc.setParameters(CREATED_AFTER, date); + } + sc.setParameters(ISOLATED, Boolean.FALSE.toString()); + Filter filter = new Filter(NativeBackupJoinVO.class, "date", ascending); + return new ArrayList<>(listBy(sc, filter)); + } + + @Override + public List listIncludingRemovedByVmIdAndBeforeDateOrderByCreatedDesc(long vmId, Date beforeDate) { + SearchCriteria sc = backupSearch.create(); + sc.setParameters(VM_ID, vmId); + sc.setParameters(STATUS, Backup.Status.BackedUp, Backup.Status.Removed); + sc.setParameters(CREATED_BEFORE, beforeDate); + sc.setParameters(ISOLATED, Boolean.FALSE.toString()); + Filter filter = new Filter(NativeBackupJoinVO.class, "date", false); + return new ArrayList<>(listIncludingRemovedBy(sc, filter)); + } + + @Override + public NativeBackupJoinVO findCurrent(long vmId) { + SearchCriteria sc = backupSearch.create(); + sc.setParameters(VM_ID, vmId); + sc.setParameters(CURRENT, Boolean.TRUE.toString()); + return findOneBy(sc); + } + + @Override + public NativeBackupJoinVO findByParentId(long parentId) { + SearchCriteria sc = backupSearch.create(); + sc.setParameters(PARENT_ID, parentId); + return findOneIncludingRemovedBy(sc); + } + + @Override + public List listByImageStoreId(long imageStoreId) { + SearchCriteria sc = allBackupsSearch.create(); + sc.setParameters(IMAGE_STORE_ID, imageStoreId); + sc.setParameters(STATUS, Backup.Status.BackedUp); + return listBy(sc); + } + + @Override + public List listById(long id) { + SearchCriteria sc = allBackupsSearch.create(); + sc.setParameters(ID, id); + sc.setParameters(STATUS, Backup.Status.BackedUp); + return listBy(sc); + } + + @Override + public List listByParentId(long parentId) { + SearchCriteria sc = allBackupsSearch.create(); + sc.setParameters(PARENT_ID, parentId); + sc.setParameters(STATUS, Backup.Status.BackedUp); + return listBy(sc); + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupOfferingDao.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupOfferingDao.java new file mode 100644 index 000000000000..0003dd5b81c8 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupOfferingDao.java @@ -0,0 +1,23 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup.dao; + +import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.backup.NativeBackupOfferingVO; + +public interface NativeBackupOfferingDao extends GenericDao { +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupOfferingDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupOfferingDaoImpl.java new file mode 100644 index 000000000000..6a459996e738 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupOfferingDaoImpl.java @@ -0,0 +1,23 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup.dao; + +import com.cloud.utils.db.GenericDaoBase; +import org.apache.cloudstack.backup.NativeBackupOfferingVO; + +public class NativeBackupOfferingDaoImpl extends GenericDaoBase implements NativeBackupOfferingDao { +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupStoragePoolDao.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupStoragePoolDao.java new file mode 100644 index 000000000000..9bd662f1ed52 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupStoragePoolDao.java @@ -0,0 +1,33 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. +package org.apache.cloudstack.backup.dao; + +import com.cloud.utils.db.GenericDao; +import org.apache.cloudstack.backup.NativeBackupStoragePoolVO; + +import java.util.List; + +public interface NativeBackupStoragePoolDao extends GenericDao { + + List listByBackupId(long backupId); + + NativeBackupStoragePoolVO findOneByVolumeId(long volumeId); + + void expungeByBackupId(long backupId); + + void expungeByVolumeId(long volumeId); +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupStoragePoolDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupStoragePoolDaoImpl.java new file mode 100644 index 000000000000..3eb023a73ad0 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/NativeBackupStoragePoolDaoImpl.java @@ -0,0 +1,70 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. +package org.apache.cloudstack.backup.dao; + +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import org.apache.cloudstack.backup.NativeBackupStoragePoolVO; + +import javax.annotation.PostConstruct; +import java.util.List; + +public class NativeBackupStoragePoolDaoImpl extends GenericDaoBase implements NativeBackupStoragePoolDao { + + private SearchBuilder backupSearch; + + private static final String BACKUP_ID = "backup_id"; + + private static final String VOLUME_ID = "volume_id"; + + @PostConstruct + protected void init() { + backupSearch = createSearchBuilder(); + backupSearch.and(BACKUP_ID, backupSearch.entity().getBackupId(), SearchCriteria.Op.EQ); + backupSearch.and(VOLUME_ID, backupSearch.entity().getVolumeId(), SearchCriteria.Op.EQ); + backupSearch.done(); + } + + @Override + public List listByBackupId(long backupId) { + SearchCriteria sc = backupSearch.create(); + sc.setParameters(BACKUP_ID, backupId); + return listBy(sc); + } + + @Override + public NativeBackupStoragePoolVO findOneByVolumeId(long volumeId) { + SearchCriteria sc = backupSearch.create(); + sc.setParameters(VOLUME_ID, volumeId); + return findOneBy(sc); + } + + @Override + public void expungeByBackupId(long backupId) { + SearchCriteria sc = backupSearch.create(); + sc.setParameters(BACKUP_ID, backupId); + expunge(sc); + } + + @Override + public void expungeByVolumeId(long volumeId) { + SearchCriteria sc = backupSearch.create(); + sc.setParameters(VOLUME_ID, volumeId); + expunge(sc); + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java index 6aeee1ad1ccd..c6c2705390a8 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java @@ -97,6 +97,8 @@ public interface SnapshotDataStoreDao extends GenericDao findByVolume(long snapshotId, long volumeId, DataStoreRole role); + void expungeBySnapshotIdAndStoreRole(long snapshotId, DataStoreRole role); + /** * List all snapshots in 'snapshot_store_ref' by volume and data store role. Therefore, it is possible to list all snapshots that are in the primary storage or in the secondary storage. */ diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java index cdf903407c17..47759561b8cb 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java @@ -63,7 +63,7 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase searchFilteringStoreIdEqStoreRoleEqStateNeqRefCntNeq; protected SearchBuilder searchFilteringStoreIdEqStateEqStoreRoleEqIdEqUpdateCountEqSnapshotIdEqVolumeIdEq; private SearchBuilder stateSearch; @@ -75,7 +75,9 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase storeSnapshotDownloadStatusSearch; private SearchBuilder searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqKVMCheckpointNotNull; private SearchBuilder searchFilterStateAndDownloadUrlNotNullAndDownloadUrlCreatedBefore; - private SearchBuilder searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq; + private SearchBuilder searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqPathEq; + + private SearchBuilder searchFilteringVolumeIdAndStateAndCreatedAfter; private SearchBuilder searchBySnapshotId; @@ -148,7 +150,7 @@ public boolean configure(String name, Map params) throws Configu idStateNeqSearch = createSearchBuilder(); idStateNeqSearch.and(SNAPSHOT_ID, idStateNeqSearch.entity().getSnapshotId(), SearchCriteria.Op.EQ); - idStateNeqSearch.and(STATE, idStateNeqSearch.entity().getState(), SearchCriteria.Op.NEQ); + idStateNeqSearch.and(STATE, idStateNeqSearch.entity().getState(), SearchCriteria.Op.NIN); idStateNeqSearch.done(); snapshotVOSearch = snapshotDao.createSearchBuilder(); @@ -192,17 +194,24 @@ public boolean configure(String name, Map params) throws Configu searchFilterStateAndDownloadUrlNotNullAndDownloadUrlCreatedBefore.and(URL_CREATED_BEFORE, searchFilterStateAndDownloadUrlNotNullAndDownloadUrlCreatedBefore.entity().getExtractUrlCreated(), SearchCriteria.Op.LT); searchFilterStateAndDownloadUrlNotNullAndDownloadUrlCreatedBefore.done(); - searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq = createSearchBuilder(); - searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq.and(STATE, searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq.entity().getState(), SearchCriteria.Op.EQ); - searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq.and(VOLUME_ID, searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq.entity().getVolumeId(), SearchCriteria.Op.EQ); - searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq.and(STORE_ROLE, searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq.entity().getRole(), SearchCriteria.Op.EQ); - searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq.and(STORE_ID, searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq.entity().getDataStoreId(), SearchCriteria.Op.IN); + searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqPathEq = createSearchBuilder(); + searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqPathEq.and(STATE, searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqPathEq.entity().getState(), SearchCriteria.Op.EQ); + searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqPathEq.and(VOLUME_ID, searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqPathEq.entity().getVolumeId(), SearchCriteria.Op.EQ); + searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqPathEq.and(STORE_ROLE, searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqPathEq.entity().getRole(), SearchCriteria.Op.EQ); + searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqPathEq.and(STORE_ID, searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqPathEq.entity().getDataStoreId(), SearchCriteria.Op.IN); + searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqPathEq.and(INSTALL_PATH, searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqPathEq.entity().getInstallPath(), SearchCriteria.Op.EQ); + searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqPathEq.done(); searchBySnapshotId = createSearchBuilder(); searchBySnapshotId.and(SNAPSHOT_ID, searchBySnapshotId.entity().getSnapshotId(), SearchCriteria.Op.EQ); searchBySnapshotId.and(STATE, searchBySnapshotId.entity().getState(), SearchCriteria.Op.EQ); searchBySnapshotId.done(); + searchFilteringVolumeIdAndStateAndCreatedAfter = createSearchBuilder(); + searchFilteringVolumeIdAndStateAndCreatedAfter.and(STATE, searchFilteringVolumeIdAndStateAndCreatedAfter.entity().getState(), SearchCriteria.Op.EQ); + searchFilteringVolumeIdAndStateAndCreatedAfter.and(VOLUME_ID, searchFilteringVolumeIdAndStateAndCreatedAfter.entity().getVolumeId(), SearchCriteria.Op.EQ); + searchFilteringVolumeIdAndStateAndCreatedAfter.and(CREATED, searchFilteringVolumeIdAndStateAndCreatedAfter.entity().getCreated(), SearchCriteria.Op.GT); + searchFilteringVolumeIdAndStateAndCreatedAfter.done(); return true; } @@ -350,7 +359,7 @@ public SnapshotDataStoreVO findParent(DataStoreRole role, Long storeId, Long zon if (kvmIncrementalSnapshot && Hypervisor.HypervisorType.KVM.equals(hypervisorType)) { sc = searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqKVMCheckpointNotNull.create(); } else { - sc = searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEq.create(); + sc = searchFilteringStoreIdInVolumeIdEqStoreRoleEqStateEqPathEq.create(); } sc.setParameters(VOLUME_ID, volumeId); @@ -454,6 +463,12 @@ public SnapshotDataStoreVO findBySnapshotIdInAnyState(long snapshotId, DataStore return findOneBy(sc); } + @Override + public void expungeBySnapshotIdAndStoreRole(long snapshotId, DataStoreRole role) { + SearchCriteria sc = createSearchCriteriaBySnapshotIdAndStoreRole(snapshotId, role); + expunge(sc); + } + @Override public List listAllByVolumeAndDataStore(long volumeId, DataStoreRole role) { SearchCriteria sc = searchFilteringStoreIdEqStateEqStoreRoleEqIdEqUpdateCountEqSnapshotIdEqVolumeIdEq.create(); diff --git a/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml b/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml index 0656d5e3c440..9714b6adfb64 100644 --- a/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml +++ b/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-daos-context.xml @@ -272,6 +272,11 @@ + + + + + diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42210to42300.sql b/engine/schema/src/main/resources/META-INF/db/schema-42210to42300.sql index d330ecd0c0d5..fb0635671650 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42210to42300.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42210to42300.sql @@ -49,3 +49,68 @@ CREATE TABLE IF NOT EXISTS `cloud`.`webhook_filter` ( INDEX `i_webhook_filter__webhook_id`(`webhook_id`), CONSTRAINT `fk_webhook_filter__webhook_id` FOREIGN KEY(`webhook_id`) REFERENCES `webhook`(`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; + +-- KNIB + +CREATE TABLE IF NOT EXISTS `cloud`.`native_backup_pool_ref` ( + `id` bigint NOT NULL UNIQUE AUTO_INCREMENT, + `backup_id` bigint unsigned NOT NULL COMMENT 'The backup ID. Foreign key that points to the backups table.', + `storage_pool_id` bigint unsigned NOT NULL COMMENT 'The storage ID. Foreign key that points to the storage_pool table.', + `volume_id` bigint unsigned NOT NULL COMMENT 'The volumes ID. Foreign key that points to the volumes table.', + `backup_delta_path` varchar(255) COMMENT 'Path of the created delta.', + `backup_parent_path` varchar(255) COMMENT 'Path of the created delta parent.', + PRIMARY KEY (`id`), + CONSTRAINT `fk_native_backup_pool_ref__backup_id` FOREIGN KEY (`backup_id`) REFERENCES `backups`(`id`), + CONSTRAINT `fk_native_backup_pool_ref__storage_pool_id` FOREIGN KEY (`storage_pool_id`) REFERENCES `storage_pool`(`id`), + CONSTRAINT `fk_native_backup_pool_ref__volume_id` FOREIGN KEY (`volume_id`) REFERENCES `volumes`(`id`) + ); + +CREATE TABLE IF NOT EXISTS `cloud`.`native_backup_store_ref` ( + `id` bigint NOT NULL UNIQUE AUTO_INCREMENT, + `backup_id` bigint unsigned NOT NULL COMMENT 'The backup ID. Foreign key that points to the backups table.', + `volume_id` bigint unsigned NOT NULL COMMENT 'The volume ID. Foreign key that points to the volumes table.', + `device_id` bigint unsigned COMMENT 'device ID of the volume', + `path` varchar(255) COMMENT 'Path of the backup.', + PRIMARY KEY (`id`), + CONSTRAINT `fk_native_backup_store_ref__backup_id` FOREIGN KEY (`backup_id`) REFERENCES `backups`(`id`), + CONSTRAINT `fk_native_backup_store_ref__volume_id` FOREIGN KEY (`volume_id`) REFERENCES `volumes`(`id`) + ); + + +CREATE TABLE IF NOT EXISTS `cloud`.`native_backup_offering` ( + `id` bigint NOT NULL UNIQUE AUTO_INCREMENT, + `uuid` varchar(40) NOT NULL, + `name` varchar(255) NOT NULL, + `compress` tinyint(1) UNSIGNED NOT NULL, + `validate` tinyint(1) UNSIGNED NOT NULL, + `allow_quick_restore` tinyint(1) UNSIGNED NOT NULL, + `allow_extract_file` tinyint(1) UNSIGNED NOT NULL, + `backup_chain_size` INT, + `compression_library` varchar(55) NOT NULL DEFAULT 'zstd', + `created` datetime NOT NULL, + `removed` datetime, + PRIMARY KEY (`id`) + ); + +CREATE TABLE IF NOT EXISTS `cloud`.`backup_compression_job` ( + `id` bigint NOT NULL UNIQUE AUTO_INCREMENT, + `backup_id` bigint unsigned NOT NULL COMMENT 'The backup ID. Foreign key that points to the backups table.', + `instance_id` bigint unsigned NOT NULL COMMENT 'The instance ID. Foreign key that points to the vm_instance table.', + `host_id` bigint unsigned COMMENT 'The host ID that is executing the compression. Foreign key that points to the host table.', + `zone_id` bigint unsigned NOT NULL COMMENT 'The zone ID of the where the VM is. Foreign key that points to the data_center table', + `attempts` int(32) unsigned NOT NULL DEFAULT 0, + `type` varchar(55) NOT NULL, + `created` datetime NOT NULL, + `scheduled_start_time` datetime NOT NULL, + `start_time` datetime, + `removed` datetime, + PRIMARY KEY (`id`), + CONSTRAINT `fk_backup_compression_job__backup_id` FOREIGN KEY (`backup_id`) REFERENCES `backups`(`id`), + CONSTRAINT `fk_backup_compression_job__instance_id` FOREIGN KEY (`instance_id`) REFERENCES `vm_instance`(`id`), + CONSTRAINT `fk_backup_compression_job__host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`), + CONSTRAINT `fk_backup_compression_job__zone_id` FOREIGN KEY (`zone_id`) REFERENCES `data_center`(`id`) + ); + +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.backups', 'uncompressed_size', 'bigint unsigned'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.backups', 'compression_status', 'varchar(55)'); +CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.backup_schedule', 'isolated', 'TINYINT(1) NOT NULL DEFAULT 0 COMMENT "Whether the scheduled backups will be isolated or not."'); diff --git a/engine/schema/src/main/resources/META-INF/db/views/cloud.native_backup_view.sql b/engine/schema/src/main/resources/META-INF/db/views/cloud.native_backup_view.sql new file mode 100644 index 000000000000..8ed51e24c129 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/views/cloud.native_backup_view.sql @@ -0,0 +1,47 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +-- VIEW `cloud`.`native_backup_view`; + +DROP VIEW IF EXISTS `cloud`.`native_backup_view`; +CREATE VIEW `cloud`.`native_backup_view` AS +SELECT b.id, + b.uuid, + b.vm_id, + b.backed_volumes, + b.type, + b.date, + b.status, + b.compression_status, + b.backup_offering_id, + b.size, + b.protected_size, + b.zone_id, + MAX(CASE WHEN bd.name = 'image_store_id' THEN bd.value END) image_store_id, + MAX(CASE WHEN bd.name = 'parent_id' THEN bd.value END) parent_id, + MAX(CASE WHEN bd.name = 'end_of_chain' THEN bd.value END) end_of_chain, + MAX(CASE WHEN bd.name = 'current' THEN bd.value END) current, + COALESCE(MAX(CASE WHEN bd.name = 'isolated' THEN bd.value END), 'false') isolated, + nbpr.volume_id, + nbsr.path image_store_path +FROM backups b +LEFT JOIN backup_details bd ON b.id = bd.backup_id +LEFT JOIN backup_offering bo ON b.backup_offering_id = bo.id +LEFT JOIN native_backup_store_ref nbsr ON b.id = nbsr.backup_id +LEFT JOIN native_backup_pool_ref nbpr ON nbpr.volume_id = nbsr.volume_id +WHERE bo.provider='knib' +GROUP BY b.id, nbsr.volume_id; diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index 61d2caa0e06f..0b38c62a992a 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -36,6 +36,7 @@ import com.cloud.agent.api.CheckVirtualMachineCommand; import com.cloud.agent.api.PrepareForMigrationAnswer; import com.cloud.resource.ResourceManager; +import org.apache.cloudstack.backup.NativeBackupService; import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; @@ -207,6 +208,9 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { @Inject ResourceManager resourceManager; + @Inject + private NativeBackupService nativeBackupService; + @Override public StrategyPriority canHandle(DataObject srcData, DataObject destData) { if (srcData instanceof SnapshotInfo) { @@ -2373,6 +2377,7 @@ private void handlePostMigration(boolean success, Map sr _snapshotDao.updateVolumeIds(srcVolumeInfo.getId(), destVolumeInfo.getId()); _snapshotDataStoreDao.updateVolumeIds(srcVolumeInfo.getId(), destVolumeInfo.getId()); } + nativeBackupService.updateVolumeId(srcVolumeInfo.getId(), destVolumeInfo.getId()); } else { try { diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java index 95345bdf9e0e..c7d369cf408f 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java @@ -30,6 +30,7 @@ import com.cloud.storage.Volume; import com.cloud.storage.snapshot.SnapshotManager; import com.cloud.vm.VirtualMachine; +import org.apache.cloudstack.backup.NativeBackupService; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionService; @@ -116,6 +117,8 @@ public class SnapshotServiceImpl implements SnapshotService { ConfigurationDao _configDao; @Inject HostDao hostDao; + @Inject + private NativeBackupService nativeBackupService; @Inject private HeuristicRuleHelper heuristicRuleHelper; @@ -603,6 +606,7 @@ protected Void revertSnapshotCallback(AsyncCallbackDispatcher volumeInfoToSnapshotObjectMap = new HashMap<>(); @@ -137,11 +152,13 @@ public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) { mergeOldSiblingWithOldParentIfOldParentIsDead(vmSnapshotDao.findByIdIncludingRemoved(vmSnapshotBeingDeleted.getParent()), userVm, hostId, volumeTOs); } else if (!isCurrent && numberOfChildren == 1) { VMSnapshotVO childSnapshot = snapshotChildren.get(0); - volumeSnapshotVos = mergeSnapshots(vmSnapshotBeingDeleted, childSnapshot, userVm, volumeTOs, hostId); + volumeSnapshotVos = mergeSnapshots(vmSnapshotBeingDeleted, childSnapshot, userVm, hostId); } + Date removedDate = DateUtil.now(); for (SnapshotVO snapshotVO : volumeSnapshotVos) { snapshotVO.setState(Snapshot.State.Destroyed); + snapshotVO.setRemoved(removedDate); snapshotDao.update(snapshotVO.getId(), snapshotVO); } @@ -179,7 +196,9 @@ public boolean revertVMSnapshot(VMSnapshot vmSnapshot) { transitStateWithoutThrow(vmSnapshotBeingReverted, VMSnapshot.Event.RevertRequested); - List volumeSnapshots = getVolumeSnapshotsAssociatedWithVmSnapshot(vmSnapshotBeingReverted); + nativeBackupService.prepareVmForSnapshotRevert(vmSnapshot); + + List volumeSnapshots = vmSnapshotHelper.getVolumeSnapshotsAssociatedWithKvmDiskOnlyVmSnapshot(vmSnapshotBeingReverted.getId()); List volumeSnapshotTos = volumeSnapshots.stream() .map(snapshot -> (SnapshotObjectTO) snapshotDataFactory.getSnapshot(snapshot.getSnapshotId(), snapshot.getDataStoreId(), DataStoreRole.Primary).getTO()) .collect(Collectors.toList()); @@ -268,7 +287,7 @@ private void mergeOldSiblingWithOldParentIfOldParentIsDead(VMSnapshotVO oldParen VMSnapshotVO oldSibling = oldSiblings.get(0); logger.debug("Merging VM snapshot [{}] with [{}] as the former was hidden and only the latter depends on it.", oldParent.getUuid(), oldSibling.getUuid()); - snapshotVos = mergeSnapshots(oldParent, oldSibling, userVm, volumeTOs, hostId); + snapshotVos = mergeSnapshots(oldParent, oldSibling, userVm, hostId); } for (SnapshotVO snapshotVO : snapshotVos) { @@ -338,8 +357,8 @@ public StrategyPriority canHandle(Long vmId, Long rootPoolId, boolean snapshotMe } BackupOfferingVO backupOffering = backupOfferingDao.findById(vm.getBackupOfferingId()); - if (backupOffering != null) { - logger.debug("{} as the VM has a backup offering. This strategy does not support snapshots on VMs with current backup providers.", cantHandleLog); + if (backupOffering != null && !backupOffering.getProvider().equals("knib")) { + logger.debug("{} as the VM has a backup offering for a provider that is not supported. This strategy only supports the knib backup provider.", cantHandleLog); return StrategyPriority.CANT_HANDLE; } @@ -347,7 +366,7 @@ public StrategyPriority canHandle(Long vmId, Long rootPoolId, boolean snapshotMe } private List deleteSnapshot(VMSnapshotVO vmSnapshotVO, Long hostId) { - List volumeSnapshots = getVolumeSnapshotsAssociatedWithVmSnapshot(vmSnapshotVO); + List volumeSnapshots = vmSnapshotHelper.getVolumeSnapshotsAssociatedWithKvmDiskOnlyVmSnapshot((vmSnapshotVO.getId())); List volumeSnapshotTOList = volumeSnapshots.stream() .map(snapshotDataStoreVO -> snapshotDataFactory.getSnapshot(snapshotDataStoreVO.getSnapshotId(), snapshotDataStoreVO.getDataStoreId(), DataStoreRole.Primary).getTO()) .collect(Collectors.toList()); @@ -368,7 +387,7 @@ private List deleteSnapshot(VMSnapshotVO vmSnapshotVO, Long hostId) return snapshotVOList; } - private List mergeSnapshots(VMSnapshotVO vmSnapshotVO, VMSnapshotVO childSnapshot, UserVmVO userVm, List volumeObjectTOS, Long hostId) { + private List mergeSnapshots(VMSnapshotVO vmSnapshotVO, VMSnapshotVO childSnapshot, UserVmVO userVm, Long hostId) { logger.debug("Merging VM snapshot [{}] with its child [{}].", vmSnapshotVO.getUuid(), childSnapshot.getUuid()); List snapshotGrandChildren = vmSnapshotDao.listByParentAndStateIn(childSnapshot.getId(), VMSnapshot.State.Ready, VMSnapshot.State.Hidden); @@ -378,18 +397,10 @@ private List mergeSnapshots(VMSnapshotVO vmSnapshotVO, VMSnapshotVO removeCurrentBackingChainSnapshotFromVmSnapshotList(snapshotGrandChildren, userVm); } - List snapshotMergeTreeToList = generateSnapshotMergeTrees(vmSnapshotVO, childSnapshot, snapshotGrandChildren); + List deltaMergeTreeTOs = generateDeltaMergeTrees(vmSnapshotVO, childSnapshot, snapshotGrandChildren, + !userVm.getState().equals(VirtualMachine.State.Running)); - if (childSnapshot.getCurrent() && !VirtualMachine.State.Running.equals(userVm.getState())) { - for (VolumeObjectTO volumeObjectTO : volumeObjectTOS) { - snapshotMergeTreeToList.stream().filter(snapshotTree -> Objects.equals(((SnapshotObjectTO) snapshotTree.getParent()).getVolume().getId(), volumeObjectTO.getId())) - .findFirst() - .orElseThrow(() -> new CloudRuntimeException(String.format("Failed to find volume snapshot for volume [%s].", volumeObjectTO.getUuid()))) - .addGrandChild(volumeObjectTO); - } - } - - MergeDiskOnlyVmSnapshotCommand mergeDiskOnlyVMSnapshotCommand = new MergeDiskOnlyVmSnapshotCommand(snapshotMergeTreeToList, userVm.getState(), userVm.getName()); + MergeDiskOnlyVmSnapshotCommand mergeDiskOnlyVMSnapshotCommand = new MergeDiskOnlyVmSnapshotCommand(deltaMergeTreeTOs, userVm.getState().equals(VirtualMachine.State.Running), userVm.getName()); Answer answer = agentMgr.easySend(hostId, mergeDiskOnlyVMSnapshotCommand); if (answer == null || !answer.getResult()) { throw new CloudRuntimeException(String.format("Failed to merge VM snapshot [%s] due to %s.", vmSnapshotVO.getUuid(), answer != null ? answer.getDetails() : "Communication failure")); @@ -397,15 +408,23 @@ private List mergeSnapshots(VMSnapshotVO vmSnapshotVO, VMSnapshotVO logger.debug("Updating metadata of VM snapshot [{}] and its child [{}].", vmSnapshotVO.getUuid(), childSnapshot.getUuid()); List snapshotVOList = new ArrayList<>(); - for (SnapshotMergeTreeTO snapshotMergeTreeTO : snapshotMergeTreeToList) { - SnapshotObjectTO childTO = (SnapshotObjectTO) snapshotMergeTreeTO.getChild(); - SnapshotObjectTO parentTO = (SnapshotObjectTO) snapshotMergeTreeTO.getParent(); - - SnapshotDataStoreVO childSnapshotDataStoreVO = snapshotDataStoreDao.findBySnapshotIdInAnyState(childTO.getId(), DataStoreRole.Primary); - childSnapshotDataStoreVO.setInstallPath(parentTO.getPath()); - snapshotDataStoreDao.update(childSnapshotDataStoreVO.getId(), childSnapshotDataStoreVO); + for (DeltaMergeTreeTO deltaMergeTreeTO : deltaMergeTreeTOs) { + DataTO childTO = deltaMergeTreeTO.getChild(); + SnapshotObjectTO parentTO = (SnapshotObjectTO) deltaMergeTreeTO.getParent(); + + if (childTO instanceof BackupDeltaTO) { + NativeBackupStoragePoolVO backupDelta = nativeBackupStoragePoolDao.findOneByVolumeId(parentTO.getVolume().getVolumeId()); + backupDelta.setBackupDeltaParentPath(parentTO.getPath()); + logger.debug("The child was also a KNIB backup delta, will update the backup delta metadata. Updating backupDeltaParentPath of backupDelta [{}] to [{}].", backupDelta.getId(), parentTO.getPath()); + nativeBackupStoragePoolDao.update(backupDelta.getId(), backupDelta); + } else { + SnapshotDataStoreVO childSnapshotDataStoreVO = snapshotDataStoreDao.findBySnapshotIdInAnyState(childTO.getId(), DataStoreRole.Primary); + childSnapshotDataStoreVO.setInstallPath(parentTO.getPath()); + logger.debug("Updating the child path [{}] to [{}].", childSnapshotDataStoreVO.getId(), parentTO.getPath()); + snapshotDataStoreDao.update(childSnapshotDataStoreVO.getId(), childSnapshotDataStoreVO); + } - snapshotDataStoreDao.expungeReferenceBySnapshotIdAndDataStoreRole(parentTO.getId(), childSnapshotDataStoreVO.getDataStoreId(), DataStoreRole.Primary); + snapshotDataStoreDao.expungeBySnapshotIdAndStoreRole(parentTO.getId(), DataStoreRole.Primary); snapshotVOList.add(snapshotDao.findById(parentTO.getId())); } @@ -417,18 +436,31 @@ private List mergeSnapshots(VMSnapshotVO vmSnapshotVO, VMSnapshotVO private List mergeCurrentDeltaOnSnapshot(VMSnapshotVO vmSnapshotVo, UserVmVO userVmVO, Long hostId, List volumeObjectTOS) { logger.debug("Merging VM snapshot [{}] with the current volume delta.", vmSnapshotVo.getUuid()); - List snapshotMergeTreeTOList = new ArrayList<>(); - List volumeSnapshots = getVolumeSnapshotsAssociatedWithVmSnapshot(vmSnapshotVo); + List deltaMergeTreeTOs = new ArrayList<>(); + List volumeSnapshots = vmSnapshotHelper.getVolumeSnapshotsAssociatedWithKvmDiskOnlyVmSnapshot(vmSnapshotVo.getId()); for (VolumeObjectTO volumeObjectTO : volumeObjectTOS) { SnapshotDataStoreVO volumeParentSnapshot = volumeSnapshots.stream().filter(snapshot -> Objects.equals(snapshot.getVolumeId(), volumeObjectTO.getId())) .findFirst() .orElseThrow(() -> new CloudRuntimeException(String.format("Failed to find volume snapshot for volume [%s].", volumeObjectTO.getUuid()))); DataTO parentSnapshot = snapshotDataFactory.getSnapshot(volumeParentSnapshot.getSnapshotId(), volumeParentSnapshot.getDataStoreId(), DataStoreRole.Primary).getTO(); - snapshotMergeTreeTOList.add(new SnapshotMergeTreeTO(parentSnapshot, volumeObjectTO, new ArrayList<>())); + NativeBackupStoragePoolVO backupDelta = nativeBackupStoragePoolDao.findOneByVolumeId(volumeObjectTO.getVolumeId()); + + if (backupDelta != null && backupDelta.getBackupDeltaPath().equals(volumeObjectTO.getPath())) { + logger.debug("The current volume delta is also a KNIB backup delta. Will merge the snapshot delta of volume [{}] with the parent backup delta at [{}].", + volumeObjectTO.getUuid(), backupDelta.getBackupDeltaParentPath()); + BackupDeltaTO childTo = new BackupDeltaTO(volumeObjectTO.getDataStore(), Hypervisor.HypervisorType.KVM, backupDelta.getBackupDeltaParentPath()); + ArrayList grandChildren = new ArrayList<>(); + if (userVmVO.getState().equals(VirtualMachine.State.Stopped)) { + grandChildren.add(new BackupDeltaTO(volumeObjectTO.getDataStore(), Hypervisor.HypervisorType.KVM, backupDelta.getBackupDeltaPath())); + } + deltaMergeTreeTOs.add(new DeltaMergeTreeTO(volumeObjectTO, parentSnapshot, childTo, grandChildren)); + } else { + deltaMergeTreeTOs.add(new DeltaMergeTreeTO(volumeObjectTO, parentSnapshot, volumeObjectTO, new ArrayList<>())); + } } - MergeDiskOnlyVmSnapshotCommand mergeDiskOnlyVMSnapshotCommand = new MergeDiskOnlyVmSnapshotCommand(snapshotMergeTreeTOList, userVmVO.getState(), userVmVO.getName()); + MergeDiskOnlyVmSnapshotCommand mergeDiskOnlyVMSnapshotCommand = new MergeDiskOnlyVmSnapshotCommand(deltaMergeTreeTOs, userVmVO.getState().equals(VirtualMachine.State.Running), userVmVO.getName()); Answer answer = agentMgr.easySend(hostId, mergeDiskOnlyVMSnapshotCommand); if (answer == null || !answer.getResult()) { @@ -437,13 +469,20 @@ private List mergeCurrentDeltaOnSnapshot(VMSnapshotVO vmSnapshotVo, logger.debug("Updating metadata of VM snapshot [{}].", vmSnapshotVo.getUuid()); List snapshotVOList = new ArrayList<>(); - for (SnapshotMergeTreeTO snapshotMergeTreeTO : snapshotMergeTreeTOList) { - VolumeObjectTO volumeObjectTO = (VolumeObjectTO) snapshotMergeTreeTO.getChild(); - SnapshotObjectTO parentTO = (SnapshotObjectTO) snapshotMergeTreeTO.getParent(); - - VolumeVO volumeVO = volumeDao.findById(volumeObjectTO.getId()); - volumeVO.setPath(parentTO.getPath()); - volumeDao.update(volumeVO.getId(), volumeVO); + for (DeltaMergeTreeTO deltaMergeTreeTO : deltaMergeTreeTOs) { + DataTO dataTO = deltaMergeTreeTO.getChild(); + SnapshotObjectTO parentTO = (SnapshotObjectTO) deltaMergeTreeTO.getParent(); + VolumeVO volumeVO = volumeDao.findById(parentTO.getVolume().getId()); + + if (dataTO instanceof BackupDeltaTO) { + logger.debug("The child of deltaMergeTree [{}] is a backupDeltaTO, thus, we will update the backup delta metadata.", deltaMergeTreeTO); + NativeBackupStoragePoolVO backupDelta = nativeBackupStoragePoolDao.findOneByVolumeId(parentTO.getVolume().getVolumeId()); + backupDelta.setBackupDeltaParentPath(parentTO.getPath()); + nativeBackupStoragePoolDao.update(backupDelta.getId(), backupDelta); + } else { + volumeVO.setPath(parentTO.getPath()); + volumeDao.update(volumeVO.getId(), volumeVO); + } snapshotDataStoreDao.expungeReferenceBySnapshotIdAndDataStoreRole(parentTO.getId(), volumeVO.getPoolId(), DataStoreRole.Primary); snapshotVOList.add(snapshotDao.findById(parentTO.getId())); @@ -583,55 +622,68 @@ private long createVolumeSnapshotMetadataAndCalculateVirtualSize(VMSnapshot vmSn return virtualSize; } - private List generateSnapshotMergeTrees(VMSnapshotVO parent, VMSnapshotVO child, List grandChildren) throws NoSuchElementException { + /** + * Generates the delta merge trees, taking native backups into account. + * */ + private List generateDeltaMergeTrees(VMSnapshotVO parent, VMSnapshotVO child, List grandChildren, boolean stoppedVm) throws NoSuchElementException { logger.debug("Generating list of Snapshot Merge Trees for the merge process of VM Snapshot [{}].", parent.getUuid()); - List snapshotMergeTrees = new ArrayList<>(); - List parentVolumeSnapshots = getVolumeSnapshotsAssociatedWithVmSnapshot(parent); - List childVolumeSnapshots = getVolumeSnapshotsAssociatedWithVmSnapshot(child); + List snapshotMergeTrees = new ArrayList<>(); + List parentVolumeSnapshots = vmSnapshotHelper.getVolumeSnapshotsAssociatedWithKvmDiskOnlyVmSnapshot(parent.getId()); + List childVolumeSnapshots = vmSnapshotHelper.getVolumeSnapshotsAssociatedWithKvmDiskOnlyVmSnapshot(child.getId()); List grandChildrenVolumeSnapshots = new ArrayList<>(); for (VMSnapshotVO grandChild : grandChildren) { - grandChildrenVolumeSnapshots.addAll(getVolumeSnapshotsAssociatedWithVmSnapshot(grandChild)); + grandChildrenVolumeSnapshots.addAll(vmSnapshotHelper.getVolumeSnapshotsAssociatedWithKvmDiskOnlyVmSnapshot(grandChild.getId())); } for (SnapshotDataStoreVO parentSnapshotDataStoreVO : parentVolumeSnapshots) { - DataTO parentTO = snapshotDataFactory.getSnapshot(parentSnapshotDataStoreVO.getSnapshotId(), parentSnapshotDataStoreVO.getDataStoreId(), DataStoreRole.Primary).getTO(); + SnapshotObjectTO parentTO = (SnapshotObjectTO) snapshotDataFactory.getSnapshot(parentSnapshotDataStoreVO.getSnapshotId(), parentSnapshotDataStoreVO.getDataStoreId(), DataStoreRole.Primary).getTO(); + VolumeObjectTO volumeObjectTO = parentTO.getVolume(); - DataTO childTO = childVolumeSnapshots.stream() + SnapshotDataStoreVO childVO = childVolumeSnapshots.stream() .filter(childSnapshot -> Objects.equals(parentSnapshotDataStoreVO.getVolumeId(), childSnapshot.getVolumeId())) - .map(snapshotDataStoreVO -> snapshotDataFactory.getSnapshot(snapshotDataStoreVO.getSnapshotId(), snapshotDataStoreVO.getDataStoreId(), DataStoreRole.Primary).getTO()) .findFirst().orElseThrow(() -> new CloudRuntimeException(String.format("Could not find child snapshot of parent [%s].", parentSnapshotDataStoreVO.getSnapshotId()))); - List grandChildrenTOList = grandChildrenVolumeSnapshots.stream() - .filter(grandChildSnapshot -> Objects.equals(parentSnapshotDataStoreVO.getVolumeId(), grandChildSnapshot.getVolumeId())) - .map(snapshotDataStoreVO -> snapshotDataFactory.getSnapshot(snapshotDataStoreVO.getSnapshotId(), snapshotDataStoreVO.getDataStoreId(), DataStoreRole.Primary).getTO()) - .collect(Collectors.toList()); + NativeBackupStoragePoolVO backupDelta = nativeBackupStoragePoolDao.findOneByVolumeId(childVO.getVolumeId()); + List grandChildrenTOList = new ArrayList<>(); + DataTO childTO = getChildAndGrandChildren(child, stoppedVm, parentSnapshotDataStoreVO, backupDelta, childVO, volumeObjectTO, grandChildrenTOList, + grandChildrenVolumeSnapshots); - snapshotMergeTrees.add(new SnapshotMergeTreeTO(parentTO, childTO, grandChildrenTOList)); + snapshotMergeTrees.add(new DeltaMergeTreeTO(volumeObjectTO, parentTO, childTO, grandChildrenTOList)); } - logger.debug("Generated the following list of Snapshot Merge Trees for the VM snapshot [{}]: [{}].", parent.getUuid(), snapshotMergeTrees); + logger.debug(String.format("Generated the following list of Snapshot Merge Trees for the VM snapshot [%s]: [%s].", parent.getUuid(), snapshotMergeTrees)); return snapshotMergeTrees; } /** - * For a given {@code VMSnapshotVO}, populates the {@code associatedVolumeSnapshots} list with all the volume snapshots that are - * part of the VMSnapshot. - * @param vmSnapshot the VMSnapshotVO that will have its size calculated - * @return the list that will be populated with the volume snapshots associated with the VM snapshot. + * Gets the correct children and grandchildren, taking Native backups into account. * */ - private List getVolumeSnapshotsAssociatedWithVmSnapshot(VMSnapshotVO vmSnapshot) { - List associatedVolumeSnapshots = new ArrayList<>(); - List snapshotDetailList = vmSnapshotDetailsDao.findDetails(vmSnapshot.getId(), KVM_FILE_BASED_STORAGE_SNAPSHOT); - for (VMSnapshotDetailsVO vmSnapshotDetailsVO : snapshotDetailList) { - SnapshotDataStoreVO snapshot = snapshotDataStoreDao.findOneBySnapshotAndDatastoreRole(Long.parseLong(vmSnapshotDetailsVO.getValue()), DataStoreRole.Primary); - if (snapshot == null) { - throw new CloudRuntimeException(String.format("Could not find snapshot for VM snapshot [%s].", vmSnapshot.getUuid())); + private DataTO getChildAndGrandChildren(VMSnapshotVO child, boolean stoppedVm, SnapshotDataStoreVO parentSnapshotDataStoreVO, NativeBackupStoragePoolVO backupDelta, + SnapshotDataStoreVO childVO, VolumeObjectTO volumeObjectTO, List grandChildrenTOList, List grandChildrenVolumeSnapshots) { + + DataTO childTO; + if (backupDelta != null && backupDelta.getBackupDeltaPath().equals(childVO.getInstallPath())) { + logger.debug("The child snapshot delta is also a backup delta. We will set the backup delta parent path [{}] as the child and the backup delta path [{}] " + + "as the grand-child.", backupDelta.getBackupDeltaParentPath(), backupDelta.getBackupDeltaPath()); + childTO = new BackupDeltaTO(volumeObjectTO.getDataStore(), Hypervisor.HypervisorType.KVM, backupDelta.getBackupDeltaParentPath()); + if (!child.getCurrent() && stoppedVm) { + grandChildrenTOList.add(new BackupDeltaTO(volumeObjectTO.getDataStore(), Hypervisor.HypervisorType.KVM, backupDelta.getBackupDeltaPath())); } - associatedVolumeSnapshots.add(snapshot); + } else { + childTO = snapshotDataFactory.getSnapshot(childVO.getSnapshotId(), childVO.getDataStoreId(), DataStoreRole.Primary).getTO(); + grandChildrenTOList.addAll(grandChildrenVolumeSnapshots.stream() + .filter(grandChildSnapshot -> Objects.equals(parentSnapshotDataStoreVO.getVolumeId(), grandChildSnapshot.getVolumeId())) + .map(snapshotDataStoreVO -> snapshotDataFactory.getSnapshot(snapshotDataStoreVO.getSnapshotId(), snapshotDataStoreVO.getDataStoreId(), DataStoreRole.Primary).getTO()) + .collect(Collectors.toList())); + } + + if (child.getCurrent() && stoppedVm) { + grandChildrenTOList.add(volumeObjectTO); } - return associatedVolumeSnapshots; + + return childTO; } /** diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/StorageVMSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/StorageVMSnapshotStrategy.java index e3f28a7012c2..7a2046333cb2 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/StorageVMSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/StorageVMSnapshotStrategy.java @@ -37,6 +37,8 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.commons.collections.CollectionUtils; @@ -95,6 +97,9 @@ public class StorageVMSnapshotStrategy extends DefaultVMSnapshotStrategy { @Inject VMSnapshotDetailsDao vmSnapshotDetailsDao; + @Inject + private SnapshotDataStoreDao snapshotDataStoreDao; + @Override public boolean configure(String name, Map params) throws ConfigurationException { return super.configure(name, params); @@ -367,6 +372,17 @@ public StrategyPriority canHandle(Long vmId, Long rootPoolId, boolean snapshotMe return StrategyPriority.CANT_HANDLE; } + for (VolumeVO volume : volumeDao.findByInstance(vmId)) { + List snapshots = snapshotDataStoreDao.listReadyByVolumeIdAndCheckpointPathNotNull(volume.getId()); + if (CollectionUtils.isNotEmpty(snapshots)) { + logger.debug( + "{} as VM has a volume with incremental snapshots {}. Incremental volume snapshots and StorageVmSnapshotStrategy are not compatible," + + " as restoring VM snapshots will erase the bitmaps and destroy snapshot chains.", + cantHandleLog, snapshots); + return StrategyPriority.CANT_HANDLE; + } + } + if (SnapshotManager.VmStorageSnapshotKvm.value() && !snapshotMemory) { return StrategyPriority.HYPERVISOR; } diff --git a/engine/storage/snapshot/src/test/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotStrategyKVMTest.java b/engine/storage/snapshot/src/test/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotStrategyKVMTest.java index 050c0246abaf..e997641db5f6 100644 --- a/engine/storage/snapshot/src/test/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotStrategyKVMTest.java +++ b/engine/storage/snapshot/src/test/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotStrategyKVMTest.java @@ -44,6 +44,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.cloudstack.test.utils.SpringUtils; @@ -442,5 +443,10 @@ public BackupOfferingDao backupOfferingDao() { public BackupManager backupManager() { return Mockito.mock(BackupManager.class); } + + @Bean + public SnapshotDataStoreDao snapshotDataStoreDao() { + return Mockito.mock(SnapshotDataStoreDao.class); + } } } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/backup/BackupObject.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/backup/BackupObject.java new file mode 100644 index 000000000000..5f15bdbc8c14 --- /dev/null +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/backup/BackupObject.java @@ -0,0 +1,198 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.storage.backup; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.to.DataObjectType; +import com.cloud.agent.api.to.DataTO; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.storage.DataStoreRole; +import com.cloud.utils.component.ComponentContext; +import org.apache.cloudstack.backup.Backup; +import org.apache.cloudstack.backup.NativeBackupJoinVO; +import org.apache.cloudstack.backup.dao.NativeBackupJoinDao; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.storage.to.BackupDeltaTO; +import org.apache.commons.collections.CollectionUtils; + +import javax.inject.Inject; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.stream.Collectors; + +public class BackupObject implements DataObject { + + private long id; + private String uuid; + private Long zoneId; + private Long size; + private long physicalSize; + private DataStore dataStore; + private String imageStorePath; + private Backup.Status status; + private Backup.CompressionStatus compressionStatus; + + @Inject + NativeBackupJoinDao nativeBackupJoinDao; + @Inject + DataStoreManager storeManager; + + public BackupObject() { + + } + + public static BackupObject getBackupObject(NativeBackupJoinVO nativeBackupJoinVO) { + BackupObject backupObject = ComponentContext.inject(BackupObject.class); + backupObject.configure(nativeBackupJoinVO); + return backupObject; + } + + private void configure(NativeBackupJoinVO nativeBackupJoin) { + this.id = nativeBackupJoin.getId(); + this.uuid = nativeBackupJoin.getUuid(); + this.zoneId = nativeBackupJoin.getZoneId(); + this.size = nativeBackupJoin.getProtectedSize(); + this.physicalSize = nativeBackupJoin.getSize(); + this.imageStorePath = nativeBackupJoin.getImageStorePath(); + this.status = nativeBackupJoin.getStatus(); + this.compressionStatus = nativeBackupJoin.getCompressionStatus(); + this.dataStore = storeManager.getDataStore(nativeBackupJoin.getImageStoreId(), DataStoreRole.Image); + } + + public List> getChildren() { + List> children = new ArrayList<>(); + + List backups = nativeBackupJoinDao.listByParentId(id); + while (CollectionUtils.isNotEmpty(backups)) { + children.add(backups.stream().map(BackupObject::getBackupObject).collect(Collectors.toList())); + backups = nativeBackupJoinDao.listByParentId(backups.get(0).getId()); + } + + return children; + } + + public List> getParents(long parentId) { + LinkedList> parents = new LinkedList<>(); + + List backups = nativeBackupJoinDao.listById(parentId); + while (CollectionUtils.isNotEmpty(backups)) { + parents.addFirst(backups.stream().map(BackupObject::getBackupObject).collect(Collectors.toList())); + backups = nativeBackupJoinDao.listById(backups.get(0).getParentId()); + } + + return parents; + } + + @Override + public long getId() { + return id; + } + + @Override + public String getUri() { + return ""; + } + + @Override + public DataTO getTO() { + DataTO to = dataStore.getDriver().getTO(this); + if (to == null) { + return new BackupDeltaTO(id, dataStore.getTO(), Hypervisor.HypervisorType.KVM, imageStorePath); + } + return to; + } + + @Override + public DataStore getDataStore() { + return dataStore; + } + + @Override + public Long getSize() { + return size; + } + + @Override + public long getPhysicalSize() { + return physicalSize; + } + + @Override + public DataObjectType getType() { + return DataObjectType.BACKUP; + } + + @Override + public String getUuid() { + return uuid; + } + + @Override + public boolean delete() { + return false; + } + + @Override + public void processEvent(ObjectInDataStoreStateMachine.Event event) { + } + + @Override + public void processEvent(ObjectInDataStoreStateMachine.Event event, Answer answer) { + } + + @Override + public void incRefCount() { + } + + @Override + public void decRefCount() { + } + + @Override + public Long getRefCount() { + return 0L; + } + + @Override + public String getName() { + return ""; + } + + public Long getZoneId() { + return zoneId; + } + + @Override + public String toString() { + return uuid; + } + + public Backup.CompressionStatus getCompressionStatus() { + return compressionStatus; + } + + public Backup.Status getStatus() { + return status; + } +} diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java index 55551772a08a..e7270ce7b52b 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java @@ -26,9 +26,16 @@ import javax.inject.Inject; import com.cloud.uservm.UserVm; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.VolumeApiServiceImpl; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.snapshot.VMSnapshotDetailsVO; +import com.cloud.vm.snapshot.dao.VMSnapshotDetailsDao; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.cloudstack.storage.vmsnapshot.VMSnapshotHelper; @@ -64,6 +71,12 @@ public class VMSnapshotHelperImpl implements VMSnapshotHelper { @Inject VolumeDataFactory volumeDataFactory; + @Inject + private VMSnapshotDetailsDao vmSnapshotDetailsDao; + + @Inject + private SnapshotDataStoreDao snapshotDataStoreDao; + StateMachine2 _vmSnapshottateMachine; public VMSnapshotHelperImpl() { @@ -115,10 +128,14 @@ public List getVolumeTOList(Long vmId) { List volumeTOs = new ArrayList(); List volumeVos = volumeDao.findByInstance(vmId); VolumeInfo volumeInfo = null; - for (VolumeVO volume : volumeVos) { - volumeInfo = volumeDataFactory.getVolume(volume.getId()); + try { + for (VolumeVO volume : volumeVos) { + volumeInfo = volumeDataFactory.getVolume(volume.getId()); - volumeTOs.add((VolumeObjectTO)volumeInfo.getTO()); + volumeTOs.add((VolumeObjectTO)volumeInfo.getTO()); + } + } catch (NullPointerException npe) { + throw new CloudRuntimeException(String.format("Unable to get list of volumeTOs for VM [%s]. Are the volumes created on storage already?", vmId), npe); } return volumeTOs; } @@ -150,6 +167,26 @@ public VMSnapshotTO getSnapshotWithParents(VMSnapshotVO snapshot) { return result; } + /** + * For a given {@code vmSnapshotId}, gets the list with all the volume snapshots that are part of the VMSnapshot. + * + * @param vmSnapshotId the id of the VM snapshot; + * @return the list that will be populated with the volume snapshots associated with the VM snapshot. + */ + @Override + public List getVolumeSnapshotsAssociatedWithKvmDiskOnlyVmSnapshot(long vmSnapshotId) { + List associatedVolumeSnapshots = new ArrayList<>(); + List snapshotDetailList = vmSnapshotDetailsDao.findDetails(vmSnapshotId, VolumeApiServiceImpl.KVM_FILE_BASED_STORAGE_SNAPSHOT); + for (VMSnapshotDetailsVO vmSnapshotDetailsVO : snapshotDetailList) { + SnapshotDataStoreVO snapshot = snapshotDataStoreDao.findOneBySnapshotAndDatastoreRole(Long.parseLong(vmSnapshotDetailsVO.getValue()), DataStoreRole.Primary); + if (snapshot == null) { + throw new CloudRuntimeException(String.format("Could not find snapshot for VM snapshot [%s].", vmSnapshotId)); + } + associatedVolumeSnapshots.add(snapshot); + } + return associatedVolumeSnapshots; + } + @Override public StoragePoolVO getStoragePoolForVM(UserVm vm) { List rootVolumes = volumeDao.findReadyRootVolumesByInstance(vm.getId()); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java index 6d6cb7b70a93..868d634a30a6 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java @@ -22,6 +22,7 @@ import com.cloud.uservm.UserVm; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import com.cloud.agent.api.VMSnapshotTO; @@ -39,6 +40,8 @@ public interface VMSnapshotHelper { VMSnapshotTO getSnapshotWithParents(VMSnapshotVO snapshot); + List getVolumeSnapshotsAssociatedWithKvmDiskOnlyVmSnapshot(long vmSnapshotId); + StoragePoolVO getStoragePoolForVM(UserVm vm); Storage.StoragePoolType getStoragePoolType(Long poolId); diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 436f991afbd1..29800d590d8a 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -36,6 +36,7 @@ import org.apache.cloudstack.annotation.AnnotationService; import org.apache.cloudstack.annotation.dao.AnnotationDao; import org.apache.cloudstack.api.command.user.volume.CheckAndRepairVolumeCmd; +import org.apache.cloudstack.backup.NativeBackupService; import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; @@ -221,6 +222,8 @@ public class VolumeServiceImpl implements VolumeService { private PassphraseDao passphraseDao; @Inject protected DiskOfferingDao diskOfferingDao; + @Inject + private NativeBackupService nativeBackupService; public VolumeServiceImpl() { } @@ -502,6 +505,8 @@ public Void deleteVolumeCallback(AsyncCallbackDispatcher snapStoreVOs = _snapshotStoreDao.listAllByVolumeAndDataStore(vo.getId(), DataStoreRole.Primary); for (SnapshotDataStoreVO snapStoreVo : snapStoreVOs) { diff --git a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/VmWorkJobVO.java b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/VmWorkJobVO.java index 41eaac598bf3..050fe4e5215c 100644 --- a/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/VmWorkJobVO.java +++ b/framework/jobs/src/main/java/org/apache/cloudstack/framework/jobs/impl/VmWorkJobVO.java @@ -69,6 +69,14 @@ public VmWorkJobVO(String related) { setRelated(related); } + public VmWorkJobVO(String related, long userId, long accountId, String cmd, Long instanceId, VirtualMachine.Type vmType, Step step) { + super(null, userId, accountId, cmd, null, instanceId, null, null); + setRelated(related); + this.vmType = vmType; + this.step = step; + this.vmInstanceId = instanceId; + } + public Step getStep() { return step; } diff --git a/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java b/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java index b228a9f8ce05..00bb353788f9 100644 --- a/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java +++ b/plugins/backup/dummy/src/main/java/org/apache/cloudstack/backup/DummyBackupProvider.java @@ -90,13 +90,14 @@ public boolean assignVMToBackupOffering(VirtualMachine vm, BackupOffering backup } @Override - public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) { + public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup, boolean quickRestore, Long hostId) { logger.debug("Restoring vm {} from backup {} on the Dummy Backup Provider", vm, backup); return true; } @Override - public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeInfo backupVolumeInfo, String hostIp, String dataStoreUuid, Pair vmNameAndState) { + public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeInfo backupVolumeInfo, String hostIp, String dataStoreUuid, + Pair vmNameAndState, VirtualMachine vm, boolean quickRestore) { final VolumeVO volume = volumeDao.findByUuid(backupVolumeInfo.getUuid()); final StoragePoolHostVO dataStore = storagePoolHostDao.findByUuid(dataStoreUuid); final DiskOffering diskOffering = diskOfferingDao.findByUuid(backupVolumeInfo.getDiskOfferingId()); @@ -153,7 +154,7 @@ public boolean willDeleteBackupsOnOfferingRemoval() { } @Override - public Pair takeBackup(VirtualMachine vm, Boolean quiesceVM) { + public Pair takeBackup(VirtualMachine vm, Boolean quiesceVM, boolean isolated) { logger.debug("Starting backup for VM {} on Dummy provider", vm); BackupVO backup = new BackupVO(); @@ -204,7 +205,7 @@ public void syncBackupStorageStats(Long zoneId) { } @Override - public Pair restoreBackupToVM(VirtualMachine vm, Backup backup, String hostIp, String dataStoreUuid) { + public Pair restoreBackupToVM(VirtualMachine vm, Backup backup, String hostIp, String dataStoreUuid, boolean quickrestore) { return new Pair<>(true, null); } } diff --git a/plugins/backup/knib/pom.xml b/plugins/backup/knib/pom.xml new file mode 100644 index 000000000000..152e3cbdbd82 --- /dev/null +++ b/plugins/backup/knib/pom.xml @@ -0,0 +1,50 @@ + + + 4.0.0 + cloud-plugin-backup-kvm-native-incremental-backup + Apache CloudStack Plugin - KVM Native Incremental Backup Plugin + + cloudstack-plugins + org.apache.cloudstack + 4.23.0.0-SNAPSHOT + ../../pom.xml + + + + org.apache.cloudstack + cloud-plugin-hypervisor-kvm + ${project.version} + + + org.apache.cloudstack + cloud-engine-components-api + ${project.version} + compile + + + org.apache.cloudstack + cloud-engine-orchestration + ${project.version} + compile + + + \ No newline at end of file diff --git a/plugins/backup/knib/src/main/java/org/apache/cloudstack/backup/KnibBackupProvider.java b/plugins/backup/knib/src/main/java/org/apache/cloudstack/backup/KnibBackupProvider.java new file mode 100644 index 000000000000..9d6c5fa86fc7 --- /dev/null +++ b/plugins/backup/knib/src/main/java/org/apache/cloudstack/backup/KnibBackupProvider.java @@ -0,0 +1,2331 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.backup; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.Command; +import com.cloud.agent.api.storage.MergeDiskOnlyVmSnapshotCommand; +import com.cloud.agent.api.to.DataStoreTO; +import com.cloud.agent.api.to.DataTO; +import com.cloud.agent.manager.Commands; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.resource.ResourceState; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.Storage; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeApiService; +import com.cloud.storage.VolumeApiServiceImpl; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.DateUtil; +import com.cloud.utils.Pair; +import com.cloud.utils.Predicate; +import com.cloud.utils.component.AdapterBase; +import com.cloud.utils.db.EntityManager; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionLegacy; +import com.cloud.utils.exception.BackupException; +import com.cloud.utils.exception.BackupProviderException; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.vm.UserVmManager; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VMInstanceDetailVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.VirtualMachineManagerImpl; +import com.cloud.vm.VmDetailConstants; +import com.cloud.vm.VmWork; +import com.cloud.vm.VmWorkConstants; +import com.cloud.vm.VmWorkDeleteBackup; +import com.cloud.vm.VmWorkRestoreBackup; +import com.cloud.vm.VmWorkRestoreVolumeBackupAndAttach; +import com.cloud.vm.VmWorkSerializer; +import com.cloud.vm.VmWorkTakeBackup; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDetailsDao; +import com.cloud.vm.snapshot.VMSnapshot; +import com.cloud.vm.snapshot.VMSnapshotDetailsVO; +import com.cloud.vm.snapshot.VMSnapshotVO; +import com.cloud.vm.snapshot.dao.VMSnapshotDao; +import com.cloud.vm.snapshot.dao.VMSnapshotDetailsDao; +import org.apache.cloudstack.backup.dao.BackupCompressionJobDao; +import org.apache.cloudstack.backup.dao.BackupDao; +import org.apache.cloudstack.backup.dao.BackupDetailsDao; +import org.apache.cloudstack.backup.dao.BackupOfferingDao; +import org.apache.cloudstack.backup.dao.NativeBackupDataStoreDao; +import org.apache.cloudstack.backup.dao.NativeBackupJoinDao; +import org.apache.cloudstack.backup.dao.NativeBackupOfferingDao; +import org.apache.cloudstack.backup.dao.NativeBackupStoragePoolDao; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; +import org.apache.cloudstack.framework.jobs.AsyncJob; +import org.apache.cloudstack.framework.jobs.AsyncJobExecutionContext; +import org.apache.cloudstack.framework.jobs.AsyncJobManager; +import org.apache.cloudstack.framework.jobs.Outcome; +import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; +import org.apache.cloudstack.framework.jobs.impl.OutcomeImpl; +import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO; +import org.apache.cloudstack.jobs.JobInfo; +import org.apache.cloudstack.secstorage.heuristics.HeuristicType; +import org.apache.cloudstack.storage.command.BackupDeleteAnswer; +import org.apache.cloudstack.storage.command.DeleteCommand; +import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.heuristics.HeuristicRuleHelper; +import org.apache.cloudstack.storage.to.BackupDeltaTO; +import org.apache.cloudstack.storage.to.DeltaMergeTreeTO; +import org.apache.cloudstack.storage.to.KnibTO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.cloudstack.storage.vmsnapshot.VMSnapshotHelper; +import org.apache.cloudstack.storage.volume.VolumeObject; +import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.lang3.BooleanUtils; +import org.apache.commons.lang3.ObjectUtils; +import org.apache.commons.lang3.StringUtils; + +import javax.inject.Inject; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; + +import static org.apache.cloudstack.backup.dao.BackupDetailsDao.CURRENT; +import static org.apache.cloudstack.backup.dao.BackupDetailsDao.END_OF_CHAIN; +import static org.apache.cloudstack.backup.dao.BackupDetailsDao.IMAGE_STORE_ID; +import static org.apache.cloudstack.backup.dao.BackupDetailsDao.ISOLATED; +import static org.apache.cloudstack.backup.dao.BackupDetailsDao.PARENT_ID; + +public class KnibBackupProvider extends AdapterBase implements NativeBackupProvider, Configurable { + protected ConfigKey backupChainSize = new ConfigKey<>("Advanced", Integer.class, "backup.chain.size", "8", "Determines the max size of a backup chain." + + " Currently only used by the KNIB provider. If cloud admins set it to 1 , all the backups will be full backups. With values lower than 1, the backup chain will be " + + "unlimited, unless it is stopped by another process. Please note that unlimited backup chains have a higher chance of getting corrupted, as new backups will be" + + " dependant on all of the older ones.", true, ConfigKey.Scope.Zone); + + protected ConfigKey backupTimeout = new ConfigKey<>("Advanced", Integer.class, "knib.timeout", "43200", "Timeout, in seconds, to execute KNIB commands. After the " + + "command times out, the Management Server will still wait for another knib.timeout seconds to receive a response from the Agent.", true, ConfigKey.Scope.Zone); + + @Inject + private AsyncJobManager jobManager; + @Inject + private EntityManager entityManager; + + @Inject + private VirtualMachineManager virtualMachineManager; + + @Inject + private UserVmDao userVmDao; + + @Inject + private VMInstanceDetailsDao userVmDetailsDao; + + @Inject + private VMSnapshotHelper vmSnapshotHelper; + + @Inject + private SnapshotDataStoreDao snapshotDataStoreDao; + + @Inject + private VMSnapshotDao vmSnapshotDao; + + @Inject + private VMSnapshotDetailsDao vmSnapshotDetailsDao; + + @Inject + private BackupDao backupDao; + + @Inject + private NativeBackupJoinDao nativeBackupJoinDao; + + @Inject + private BackupDetailsDao backupDetailDao; + + @Inject + private NativeBackupStoragePoolDao nativeBackupStoragePoolDao; + + @Inject + private NativeBackupDataStoreDao nativeBackupDataStoreDao; + + @Inject + private NativeBackupOfferingDao nativeBackupOfferingDao; + + @Inject + private BackupOfferingDao backupOfferingDao; + + @Inject + private HeuristicRuleHelper heuristicRuleHelper; + + @Inject + private DataStoreManager dataStoreManager; + + @Inject + private ImageStoreDao dataStoreDao; + + @Inject + private AgentManager agentManager; + + @Inject + private EndPointSelector endPointSelector; + + @Inject + private VolumeDao volumeDao; + + @Inject + private ImageStoreDao imageStoreDao; + + @Inject + private VolumeApiService volumeApiService; + + @Inject + private PrimaryDataStoreDao storagePoolDao; + + @Inject + private HostDao hostDao; + + @Inject + private UserVmManager userVmManager; + + @Inject + private VolumeOrchestrationService volumeOrchestrationService; + + @Inject + private VolumeDataFactory volumeDataFactory; + @Inject + private BackupCompressionJobDao backupCompressionJobDao; + + @Inject + private BackupManager backupManager; + + @Inject + private DiskOfferingDao diskOfferingDao; + + protected final List validChildStatesToRemoveBackup = List.of(Backup.Status.Expunged, Backup.Status.Error, Backup.Status.Failed); + + private final List supportedStoragePoolTypes = List.of(Storage.StoragePoolType.Filesystem, Storage.StoragePoolType.NetworkFilesystem, + Storage.StoragePoolType.SharedMountPoint); + + private final List allowedBackupStatesToRemove = List.of(Backup.Status.BackedUp, Backup.Status.Failed, Backup.Status.Error); + + private final List allowedBackupStatesToCompress = List.of(Backup.Status.BackedUp, Backup.Status.Restoring); + + private final List allowedVmStates = Arrays.asList(VirtualMachine.State.Running, VirtualMachine.State.Stopped); + @Override + public String getDescription() { + return "Native Incremental KVM Backup Plugin"; + } + + @Override + public List listBackupOfferings(Long zoneId) { + return new ArrayList<>(nativeBackupOfferingDao.listAll()); + } + + @Override + public boolean isValidProviderOffering(Long zoneId, String uuid) { + return nativeBackupOfferingDao.findByUuid(uuid) != null; + } + + @Override + public boolean assignVMToBackupOffering(VirtualMachine vm, BackupOffering backupOffering) { + logger.debug("Assigning VM [{}] to KNIB backup offering with name:[{}], uuid: [{}].", vm.getUuid(), backupOffering.getName(), backupOffering.getUuid()); + if (!Hypervisor.HypervisorType.KVM.equals(vm.getHypervisorType())) { + logger.error("KVM Native Incremental Backup provider is only supported for KVM."); + return false; + } + + for (VMSnapshotVO vmSnapshotVO : vmSnapshotDao.findByVmAndByType(vm.getId(), VMSnapshot.Type.Disk)) { + List vmSnapshotDetails = vmSnapshotDetailsDao.listDetails(vmSnapshotVO.getId()); + if (!vmSnapshotDetails.stream().allMatch(vmSnapshotDetailsVO -> vmSnapshotDetailsVO.getName().equals(VolumeApiServiceImpl.KVM_FILE_BASED_STORAGE_SNAPSHOT))) { + logger.error("KNIB is only supported with disk-only VM snapshots using [{}] strategy. Found a disk-only VM snapshot using another strategy for the VM.", + VolumeApiServiceImpl.KVM_FILE_BASED_STORAGE_SNAPSHOT); + logger.debug("Found VM snapshot details [{}].", () -> vmSnapshotDetails.stream().map(VMSnapshotDetailsVO::getName).collect(Collectors.toList())); + return false; + } + } + + return CollectionUtils.isEmpty(vmSnapshotDao.findByVmAndByType(vm.getId(), VMSnapshot.Type.DiskAndMemory)); + } + + @Override + public boolean removeVMFromBackupOffering(VirtualMachine vm) { + logger.info("Removing VM [{}] from KNIB backup offering.", vm.getUuid()); + + validateVmState(vm, "remove backup offering", VirtualMachine.State.Expunging, VirtualMachine.State.Destroyed); + NativeBackupJoinVO current = nativeBackupJoinDao.findCurrent(vm.getId()); + if (current == null) { + logger.debug("There is no current active chain, no need to do anything."); + return true; + } + + if (mergeCurrentBackupDeltas(current)) { + setEndOfChainAndRemoveCurrentForBackup(current); + return true; + } + UserVmVO vmVO = userVmDao.findById(vm.getId()); + logger.error("Failed to merge deltas for VM [{}] during backup offering removal process. Changing its state to [{}].", vm, VirtualMachine.State.BackupError); + vmVO.setState(VirtualMachine.State.BackupError); + userVmDao.update(vmVO.getId(), vmVO); + + return false; + } + + @Override + public boolean willDeleteBackupsOnOfferingRemoval() { + return false; + } + + @Override + public Pair takeBackup(VirtualMachine vm, Boolean quiesceVm, boolean isolated) { + logger.debug("Queueing backup on VM [{}].", vm.getUuid()); + Outcome outcome = createBackupThroughJobQueue(vm, ObjectUtils.defaultIfNull(quiesceVm, false), isolated); + + try { + outcome.get(); + } catch (InterruptedException | ExecutionException e) { + throw new CloudRuntimeException(String.format("Unable to retrieve result from job takeBackup due to [%s]. VM [%s].", e.getMessage(), vm.getUuid()), e); + } + + Object jobResult = jobManager.unmarshallResultObject(outcome.getJob()); + + if (jobResult instanceof BackupProviderException) { + throw (BackupProviderException) jobResult; + } else if (jobResult instanceof Throwable) { + throw new CloudRuntimeException(String.format("Exception while taking KVM native incremental backup for VM [%s]. Check the logs for more information.", vm.getUuid())); + } + + Pair result = (Pair)jobResult; + Pair returnValue = new Pair<>(result.first(), null); + if (result.first()) { + returnValue.second(backupDao.findById(result.second())); + } + return returnValue; + } + + @Override + public Pair orchestrateTakeBackup(Backup backup, boolean quiesceVm, boolean isolated) { + BackupVO backupVO = (BackupVO) backup; + long vmId = backup.getVmId(); + VirtualMachine userVm = virtualMachineManager.findById(vmId); + Long hostId = vmSnapshotHelper.pickRunningHost(vmId); + HostVO hostVO = hostDao.findById(hostId); + + if (hostVO.getStatus() == Status.Down || hostVO.getStatus() == Status.Disconnected) { + backupVO.setStatus(Backup.Status.Failed); + backupDao.update(backupVO.getId(), backupVO); + + logger.error("No available host found to create backup [{}] of VM [{}]. Setting the backup as Failed.", backupVO.getUuid(), userVm.getUuid()); + return new Pair<>(Boolean.FALSE, backup.getId()); + } + + List volumeTOs; + try { + validateVmState(userVm, "take backup"); + volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId()); + validateStorages(volumeTOs, userVm.getUuid()); + } catch (Exception e) { + backupVO.setStatus(Backup.Status.Failed); + backupDao.update(backupVO.getId(), backupVO); + throw e; + } + + logger.info("Starting VM backup process for VM [{}].", userVm.getUuid()); + + BackupOfferingVO backupOfferingVO = backupOfferingDao.findByIdIncludingRemoved(backup.getBackupOfferingId()); + NativeBackupOfferingVO nativeBackupOfferingVO = nativeBackupOfferingDao.findByUuidIncludingRemoved(backupOfferingVO.getExternalId()); + + backupVO.setDate(new Date()); + List backupChain = getBackupJoinParents(backupVO, true); + NativeBackupJoinVO parentBackup = null; + if (isolated) { + setBackupAsIsolated(backupVO); + } else { + parentBackup = getParentAndSetEndOfChain(backupVO, backupChain, nativeBackupOfferingVO); + } + NativeBackupJoinVO newBackupJoin = nativeBackupJoinDao.findById(backup.getId()); + boolean fullBackup = parentBackup == null; + List parentBackupDeltasOnPrimary = new ArrayList<>(); + List parentBackupDeltasOnSecondary = new ArrayList<>(); + List chainImageStoreUrls = null; + List knibTOs = new ArrayList<>(); + HashMap volumeUuidToDeltaPrimaryRef = new HashMap<>(); + HashMap volumeUuidToDeltaSecondaryRef = new HashMap<>(); + + if (!fullBackup) { + parentBackupDeltasOnPrimary = nativeBackupStoragePoolDao.listByBackupId(parentBackup.getId()); + parentBackupDeltasOnSecondary = nativeBackupDataStoreDao.listByBackupId(parentBackup.getId()); + + chainImageStoreUrls = getChainImageStoreUrls(backupChain); + } + + boolean runningVm = userVm.getState() == VirtualMachine.State.Running; + transitVmStateWithoutThrow(userVm, VirtualMachine.Event.BackupRequested, hostId); + updateBackupStatusToBackingUp(volumeTOs, backupVO); + + DataStore imageStore = getImageStoreForBackup(userVm.getDataCenterId(), backupVO); + createDetails(imageStore.getId(), fullBackup ? 0L : parentBackup.getId(), backupVO); + + List succeedingVmSnapshotList = getSucceedingVmSnapshotList(parentBackup); + VMSnapshotVO succeedingVmSnapshot = succeedingVmSnapshotList.isEmpty() ? null : succeedingVmSnapshotList.get(0); + + Map> volumeIdToSnapshotDataStoreList = mapVolumesToVmSnapshotReferences(volumeTOs, succeedingVmSnapshotList); + for (VolumeObjectTO volumeObjectTO : volumeTOs) { + KnibTO knibTO = new KnibTO(volumeObjectTO, volumeIdToSnapshotDataStoreList.getOrDefault(volumeObjectTO.getId(), new ArrayList<>())); + knibTOs.add(knibTO); + createDeltaReferences(fullBackup, newBackupJoin.getEndOfChain(), !succeedingVmSnapshotList.isEmpty(), runningVm, backup, parentBackupDeltasOnSecondary, + parentBackupDeltasOnPrimary, volumeUuidToDeltaPrimaryRef, volumeUuidToDeltaSecondaryRef, succeedingVmSnapshot, knibTO, isolated); + } + + TakeKnibBackupCommand command = new TakeKnibBackupCommand(quiesceVm, runningVm, newBackupJoin.getEndOfChain(), userVm.getInstanceName(), imageStore.getUri(), + chainImageStoreUrls, knibTOs, isolated); + + Answer answer = sendBackupCommand(hostId, command); + + if (answer == null || !answer.getResult()) { + processBackupFailure(answer, userVm, hostId, runningVm, backupVO); + return new Pair<>(Boolean.FALSE, null); + } + + processBackupSuccess(runningVm, volumeTOs, volumeUuidToDeltaPrimaryRef, volumeUuidToDeltaSecondaryRef, (TakeKnibBackupAnswer)answer, parentBackupDeltasOnPrimary, + succeedingVmSnapshotList, backupVO, fullBackup, userVm, hostId); + + if (!isolated) { + updateCurrentBackup(newBackupJoin); + } + + compressBackupAsyncIfHasOfferingSupport(newBackupJoin, backup.getZoneId()); + return new Pair<>(Boolean.TRUE, backupVO.getId()); + } + + @Override + public boolean deleteBackup(Backup backup, boolean forced) { + logger.debug("Queueing backup [{}] deletion.", backup.getUuid()); + Outcome outcome = deleteBackupThroughJobQueue(backup, forced); + + try { + outcome.get(); + } catch (InterruptedException | ExecutionException e) { + throw new CloudRuntimeException(String.format("Unable to retrieve result from job deleteBackup due to [%s]. Backup [%s].", e.getMessage(), backup.getUuid()), e); + } + + Object jobResult = jobManager.unmarshallResultObject(outcome.getJob()); + + if (jobResult instanceof Throwable) { + if (jobResult instanceof BackupProviderException) { + throw (BackupProviderException) jobResult; + } + throw new CloudRuntimeException(String.format("Exception while deleting KVM native incremental backup [%s]. Check the logs for more information.", backup.getUuid())); + } + + return BooleanUtils.isTrue((Boolean) jobResult); + } + + @Override + public Boolean orchestrateDeleteBackup(Backup backup, boolean forced) { + BackupVO backupVO = (BackupVO) backup; + + VirtualMachine virtualMachine = virtualMachineManager.findById(backup.getVmId()); + + if (virtualMachine != null) { + validateVmState(virtualMachine, "delete backup", VirtualMachine.State.Destroyed); + } + + logger.info("Starting delete process for backup [{}].", backupVO); + + if (!validateBackupStateForRemoval(backupVO.getId())) { + return false; + } + + checkErrorBackup(backupVO, virtualMachine); + if (deleteFailedBackup(backupVO)) { + return true; + } + + NativeBackupJoinVO childBackup = nativeBackupJoinDao.findByParentId(backup.getId()); + + if (childBackup != null && !validChildStatesToRemoveBackup.contains(childBackup.getStatus())) { + logger.debug("Backup [{}] has children that are not in one of the following states [{}]; will mark it as removed on the database but the files will not be deleted " + + "from secondary storage until the children are also expunged.", backup.getUuid(), validChildStatesToRemoveBackup); + backupVO.setStatus(Backup.Status.Removed); + backupDao.update(backupVO.getId(), backupVO); + return true; + } + + NativeBackupJoinVO backupJoinVO = nativeBackupJoinDao.findById(backup.getId()); + if (backupJoinVO.getCurrent()) { + if (!mergeCurrentBackupDeltas(backupJoinVO)) { + return false; + } + NativeBackupJoinVO parent = nativeBackupJoinDao.findById(backupJoinVO.getParentId()); + if (parent != null && parent.getStatus() == Backup.Status.BackedUp) { + backupDetailDao.persist(new BackupDetailVO(parent.getId(), END_OF_CHAIN, Boolean.TRUE.toString(), false)); + } + } + + Commands deleteCommands = new Commands(Command.OnError.Continue); + + DataStore dataStore = addBackupDeltasToDeleteCommand(backup.getId(), deleteCommands); + Pair, NativeBackupJoinVO> backupParentsToBeRemovedAndLastAliveBackup = getParentsToBeExpungedWithBackupAndAddThemToListOfDeleteCommands(backupVO, + deleteCommands); + + EndPoint endPoint = endPointSelector.select(dataStore); + if (endPoint == null) { + logger.error("Unable to find SSVM to delete backup [{}]. Check if SSVM is up for the zone.", backup); + throw new CloudRuntimeException(String.format("Unable to delete backup [%s]. Please check the logs.", backup.getUuid())); + } + Answer[] deleteAnswers; + try { + deleteAnswers = sendBackupCommands(endPoint.getId(), deleteCommands); + } catch (AgentUnavailableException | OperationTimedoutException e) { + throw new CloudRuntimeException(e); + } + + List removedBackupIds = backupParentsToBeRemovedAndLastAliveBackup.first().stream().map(NativeBackupJoinVO::getId).collect(Collectors.toList()); + removedBackupIds.add(backup.getId()); + + boolean isFailedSetEmpty = processRemoveBackupFailures(forced, deleteAnswers, removedBackupIds, backupJoinVO); + + processRemovedBackups(removedBackupIds); + + if (backupParentsToBeRemovedAndLastAliveBackup.second() != null) { + backupDetailDao.persist(new BackupDetailVO(backupParentsToBeRemovedAndLastAliveBackup.second().getId(), END_OF_CHAIN, Boolean.TRUE.toString(), false)); + } + + return isFailedSetEmpty; + } + + @Override + public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup, boolean quickRestore, Long hostId) { + logger.debug("Queueing backup [{}] restore for VM [{}].", backup.getUuid(), vm.getUuid()); + validateQuickRestore(backup, quickRestore); + + Outcome outcome = restoreVMFromBackupThroughJobQueue(vm, backup, quickRestore, hostId); + + try { + outcome.get(); + } catch (InterruptedException | ExecutionException e) { + throw new CloudRuntimeException(String.format("Unable to retrieve result from job restoreVMFromBackup due to [%s]. Backup [%s].", e.getMessage(), backup.getUuid()), e); + } finally { + BackupVO backupVO = backupDao.findById(backup.getId()); + backupVO.setStatus(Backup.Status.BackedUp); + backupDao.update(backupVO.getId(), backupVO); + } + + Object jobResult = jobManager.unmarshallResultObject(outcome.getJob()); + + handleRestoreException(backup, vm, jobResult); + + return BooleanUtils.isTrue((Boolean) jobResult); + } + + @Override + public Boolean orchestrateRestoreVMFromBackup(Backup backup, VirtualMachine vm, boolean quickRestore, Long hostId, boolean sameVmAsBackup) { + logger.info("Starting restore backup process for VM [{}] and backup [{}].", vm.getUuid(), backup); + validateNoVmSnapshots(vm); + long backupId = backup.getId(); + Pair isValidStateAndBackupVo = validateCompressionStateForRestoreAndGetBackup(backupId); + + if (!isValidStateAndBackupVo.first()) { + return false; + } + + NativeBackupJoinVO backupJoinVO = nativeBackupJoinDao.findById(backupId); + NativeBackupJoinVO currentBackup = sameVmAsBackup ? nativeBackupJoinDao.findCurrent(vm.getId()) : null; + List deltasOnPrimary = new ArrayList<>(); + if (currentBackup != null) { + deltasOnPrimary = nativeBackupStoragePoolDao.listByBackupId(currentBackup.getId()); + } + List deltasOnSecondary = nativeBackupDataStoreDao.listByBackupId(backupId); + List volumeTOs = vmSnapshotHelper.getVolumeTOList(vm.getId()); + + Set deltasToRemove = new HashSet<>(); + + List backupsWithoutVolumes = sameVmAsBackup ? getBackupsWithoutVolumes(deltasOnSecondary, volumeTOs) : List.of(); + + HostVO host; + try { + host = getHostToRestore(vm, quickRestore, hostId); // review for quick restore. + } catch (AgentUnavailableException e) { + throw new CloudRuntimeException(e); + } + + BackupVO backupVO = isValidStateAndBackupVo.second(); + List volumeInfos = backupVO.getBackedUpVolumes(); + if (sameVmAsBackup) { + createAndAttachVolumes(volumeInfos, backupsWithoutVolumes, vm, host); + // Get new volume references + volumeTOs = vmSnapshotHelper.getVolumeTOList(vm.getId()); + } + + Set> backupAndVolumePairs = generateBackupAndVolumePairsToRestore(deltasOnSecondary, volumeTOs, backupJoinVO, sameVmAsBackup); + + List deltasToBeMerged = List.of(); + if (sameVmAsBackup) { + List volumesNotPartOfTheBackup = getVolumesThatAreNotPartOfTheBackup(volumeTOs, deltasOnSecondary); + deltasToBeMerged = populateDeltasToRemoveAndToMergeAndUpdateVolumePaths(deltasOnPrimary, deltasToRemove, volumeTOs, volumesNotPartOfTheBackup, + vm.getUuid()); + } + Set secondaryStorageUrls = getParentSecondaryStorageUrls(backupVO); + + Commands commands = new Commands(Command.OnError.Stop); + commands.addCommand(new RestoreKnibBackupCommand(deltasToRemove, backupAndVolumePairs, secondaryStorageUrls, quickRestore)); + commands.addCommand(new MergeDiskOnlyVmSnapshotCommand(deltasToBeMerged, vm.getState().equals(VirtualMachine.State.Running), vm.getInstanceName())); + + Answer[] answers; + + try { + answers = sendBackupCommands(host.getId(), commands); + } catch (OperationTimedoutException | AgentUnavailableException e) { + throw new CloudRuntimeException(e); + } + + if (answers == null) { + logger.error("Failed to restore backup [{}] due to no answer from host.", backup); + return false; + } + + if (!processRestoreAnswers(vm, answers)) { + return false; + } + + updateVolumePathsAndSizeIfNeeded(vm, volumeTOs, volumeInfos, deltasToBeMerged, sameVmAsBackup); + + if (currentBackup != null) { + nativeBackupStoragePoolDao.expungeByBackupId(currentBackup.getId()); + setEndOfChainAndRemoveCurrentForBackup(currentBackup); + } + + if (quickRestore) { + List volumesToConsolidate = getVolumesToConsolidate(vm, deltasOnSecondary, volumeTOs, host.getId(), sameVmAsBackup); + return finalizeQuickRestore(vm, volumesToConsolidate, host.getId()); + } + + return true; + } + + @Override + public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeInfo backupVolumeInfo, String hostIp, String dataStoreUuid, + Pair vmNameAndState, VirtualMachine vm, boolean quickRestore) { + logger.debug("Queueing backup [{}] volume [{}] restore for VM [{}].", backup.getUuid(), backupVolumeInfo, vm.getUuid()); + validateQuickRestore(backup, quickRestore); + Outcome outcome = restoreBackedUpVolumeThroughJobQueue(vm, backup, backupVolumeInfo, hostIp, quickRestore); + + try { + outcome.get(); + } catch (InterruptedException | ExecutionException e) { + throw new CloudRuntimeException(String.format("Unable to retrieve result from job restoreBackedUpVolume due to [%s]. Backup [%s].", e.getMessage(), backup.getUuid()), e); + } finally { + BackupVO backupVO = backupDao.findById(backup.getId()); + backupVO.setStatus(Backup.Status.BackedUp); + backupDao.update(backupVO.getId(), backupVO); + } + + Object jobResult = jobManager.unmarshallResultObject(outcome.getJob()); + + handleRestoreException(backup, vm, jobResult); + + if (!(jobResult instanceof Pair)) { + throw new CloudRuntimeException(String.format("Unexpected answer from restoreBackupVolume job. Got [%s].", jobResult)); + } + return (Pair) jobResult; + } + + @Override + public Pair orchestrateRestoreBackedUpVolume(Backup backup, VirtualMachine vm, Backup.VolumeInfo backupVolumeInfo, String hostIp, boolean quickRestore) { + BackupVO backupVO = (BackupVO) backup; + Pair isValidStateAndBackupVo = validateCompressionStateForRestoreAndGetBackup(backup.getId()); + + if (!isValidStateAndBackupVo.first()) { + return new Pair<>(false, null); + } + + VolumeVO backedUpVolume = volumeDao.findByUuidIncludingRemoved(backupVolumeInfo.getUuid()); + HostVO hostVo = hostDao.findByIp(hostIp); + VolumeInfo volumeInfo = duplicateAndCreateVolume(vm, hostVo, backupVolumeInfo); + + VolumeObjectTO volumeObjectTO = (VolumeObjectTO) volumeInfo.getTO(); + NativeBackupDataStoreVO deltaOnSecondary = nativeBackupDataStoreDao.findByBackupIdAndVolumeId(backup.getId(), backedUpVolume.getId()); + NativeBackupJoinVO nativeBackupJoinVO = nativeBackupJoinDao.findById(backup.getId()); + Pair backupAndVolumePair = generateBackupAndVolumePairForSingleNewVolume(deltaOnSecondary, volumeObjectTO, backedUpVolume.getId(), nativeBackupJoinVO); + Set secondaryStorageUrls = getParentSecondaryStorageUrls(backupVO); + + RestoreKnibBackupCommand cmd = new RestoreKnibBackupCommand(Set.of(), Set.of(backupAndVolumePair), secondaryStorageUrls, quickRestore); + + Answer answer = sendBackupCommand(hostVo.getId(), cmd); + + if (!processRestoreAnswers(vm, new Answer[] {answer})) { + throw new CloudRuntimeException("Bad answer from agent"); + } + + VolumeVO newVolume = (VolumeVO)volumeInfo.getVolume(); + volumeDao.update(newVolume.getId(), newVolume); + + Volume attachedVolume = volumeApiService.attachVolumeToVM(vm.getId(), newVolume.getId(), null, false, false); + + if (quickRestore) { + ArrayList volumeToConsolidate = new ArrayList<>(); + volumeToConsolidate.add(volumeDataFactory.getVolume(attachedVolume.getId())); + finalizeQuickRestore(vm, volumeToConsolidate, hostVo.getId()); + } + + return new Pair<>(true, attachedVolume.getUuid()); + } + + @Override + public boolean startBackupCompression(long backupId, long hostId) { + Pair validCompressAndBackupVO = validateBackupStateForStartCompressionAndUpdateCompressionStatus(backupId); + + if (!validCompressAndBackupVO.first()) { + return false; + } + + NativeBackupJoinVO backup = nativeBackupJoinDao.findById(backupId); + NativeBackupJoinVO parentBackup = nativeBackupJoinDao.findById(backup.getParentId()); + + List backupDeltas = nativeBackupDataStoreDao.listByBackupId(backupId); + List parentBackupDeltas = parentBackup != null ? nativeBackupDataStoreDao.listByBackupId(backup.getParentId()) : List.of(); + + DataStoreTO imageStoreTo = dataStoreManager.getDataStore(backup.getImageStoreId(), DataStoreRole.Image).getTO(); + DataStoreTO parentStoreTo = parentBackup != null ? dataStoreManager.getDataStore(parentBackup.getImageStoreId(), DataStoreRole.Image).getTO() : null; + + List deltasToCompressAndParents = new ArrayList<>(); + for (NativeBackupDataStoreVO delta : backupDeltas) { + BackupDeltaTO backupDeltaTO = new BackupDeltaTO(imageStoreTo, Hypervisor.HypervisorType.KVM, delta.getBackupPath()); + NativeBackupDataStoreVO parentDataStore = parentBackupDeltas.stream().filter(parent -> parent.getVolumeId() == delta.getVolumeId()).findFirst().orElse(null); + BackupDeltaTO parentDeltaTO = parentDataStore != null ? new BackupDeltaTO(parentStoreTo, Hypervisor.HypervisorType.KVM, parentDataStore.getBackupPath()) : null; + deltasToCompressAndParents.add(new DeltaMergeTreeTO(null, parentDeltaTO, backupDeltaTO, null)); + } + + HostVO hostVO = hostDao.findById(hostId); + BackupVO backupVO = validCompressAndBackupVO.second(); + + long minFreeStorage = Math.round(backupVO.getSize() * backupCompressionMinimumFreeStorage.valueIn(hostVO.getDataCenterId())); + + BackupOfferingVO backupOfferingVO = backupOfferingDao.findByIdIncludingRemoved(backupVO.getBackupOfferingId()); + NativeBackupOfferingVO nativeBackupOfferingVO = nativeBackupOfferingDao.findByUuid(backupOfferingVO.getExternalId()); + List backupChain = getBackupJoinParents(backupVO, true); + List chainImageStoreUrls = getChainImageStoreUrls(backupChain); + CompressBackupCommand cmd = new CompressBackupCommand(deltasToCompressAndParents, chainImageStoreUrls, minFreeStorage, nativeBackupOfferingVO.getCompressionLibrary(), + backupCompressionCoroutines.valueIn(hostVO.getClusterId()), backupCompressionRateLimit.valueIn(hostVO.getClusterId())); + cmd.setWait(backupCompressionTimeout.valueIn(hostVO.getClusterId())); + Answer answer = agentManager.easySend(hostId, cmd); + + if (answer == null || !answer.getResult()) { + logger.error("Failed to compress backup [{}] due to {}.", backup.getUuid(), answer == null ? "no answer" : answer.getDetails()); + backupVO.setCompressionStatus(Backup.CompressionStatus.CompressionError); + backupDao.update(backupId, backupVO); + return false; + } + + logger.info("Successfully completed the first step of the backup compression process for backup [{}]. Will launch a new compression job to finalize the compression.", + backup.getUuid()); + + backupCompressionJobDao.persist(new BackupCompressionJobVO(backupVO.getId(), backupVO.getZoneId(), backupVO.getVmId(), BackupCompressionJobType.FinalizeCompression)); + + return true; + } + + @Override + public boolean finalizeBackupCompression(long backupId, long hostId) { + Pair shouldContinueProcessAndBackupVo = validateBackupStateForFinalizeCompression(backupId); + if (!shouldContinueProcessAndBackupVo.first()) { + return false; + } + BackupVO backupVO = shouldContinueProcessAndBackupVo.second(); + + NativeBackupJoinVO backupJoinVO = nativeBackupJoinDao.findById(backupId); + DataStoreTO imageStoreTo = dataStoreManager.getDataStore(backupJoinVO.getImageStoreId(), DataStoreRole.Image).getTO(); + List deltas = nativeBackupDataStoreDao.listByBackupId(backupId); + List deltaTOs = deltas.stream() + .map(delta -> new BackupDeltaTO(imageStoreTo, Hypervisor.HypervisorType.KVM, delta.getBackupPath())) + .collect(Collectors.toList()); + + FinalizeBackupCompressionCommand cmd = new FinalizeBackupCompressionCommand(backupVO.getStatus() != Backup.Status.BackedUp, deltaTOs); + HostVO hostVO = hostDao.findById(hostId); + cmd.setWait(backupCompressionTimeout.valueIn(hostVO.getClusterId())); + Answer answer = agentManager.easySend(hostId, cmd); + + if (answer == null || !answer.getResult()) { + logger.error("Failed to finish compression of backup [{}] due to {}.", backupVO.getUuid(), answer == null ? "no answer" : answer.getDetails()); + backupVO.setCompressionStatus(Backup.CompressionStatus.CompressionError); + backupDao.update(backupId, backupVO); + return false; + } + + if (cmd.isCleanup()) { + logger.info("Successfully cleaned up backup compression of backup [{}].", backupVO); + return true; + } + + backupVO.setCompressionStatus(Backup.CompressionStatus.Compressed); + backupVO.setUncompressedSize(backupVO.getSize()); + backupVO.setSize(Long.parseLong(answer.getDetails())); + backupDao.update(backupVO.getId(), backupVO); + + logger.info("Finalized compression for backup [{}], old size was [{}], compressed size is [{}].", backupVO.getUuid(), backupVO.getUncompressedSize(), backupVO.getSize()); + return true; + } + + @Override + public Pair restoreBackupToVM(VirtualMachine vm, Backup backup, String hostIp, String dataStoreUuid, boolean quickRestore) { + Pair shouldRestoreAndOldStatus = validateBackupStateForRestoreBackupToVM(backup.getId()); + if (!shouldRestoreAndOldStatus.first()) { + return new Pair<>(false, "Backup is not in the right state."); + } + + boolean result = false; + try { + result = orchestrateRestoreVMFromBackup(backup, vm, quickRestore, null, false); + } catch (Exception exception) { + handleRestoreException(backup, vm, exception); + } finally { + Transaction.execute(TransactionLegacy.CLOUD_DB, (TransactionCallback) transactionStatus -> { + BackupVO backupVO = backupDao.findById(backup.getId()); + backupVO.setStatus(shouldRestoreAndOldStatus.second()); + backupDao.update(backupVO.getId(), backupVO); + return true; + }); + } + + return new Pair<>(result, null); + } + + @Override + public void syncBackupMetrics(Long zoneId) { + } + + @Override + public Backup createNewBackupEntryForRestorePoint(Backup.RestorePoint rp, VirtualMachine vm) { + return null; + } + + @Override + public Pair getBackupStorageStats(Long zoneId) { + return new Pair<>(0L, 0L); + } + + @Override + public void syncBackupStorageStats(Long zoneId) { + } + + @Override + public boolean supportsInstanceFromBackup() { + return true; + } + + @Override + public boolean supportsMemoryVmSnapshot() { + return false; + } + + @Override + public void prepareVolumeForDetach(Volume volume, VirtualMachine virtualMachine) { + logger.info("Preparing volume [{}] for detach.", volume.getUuid()); + mergeCurrentDeltaIntoVolume(volume, virtualMachine, "detach", virtualMachine.getState().equals(VirtualMachine.State.Running)); + } + + @Override + public void prepareVolumeForMigration(Volume volume, VirtualMachine vm) { + if (VirtualMachine.State.Migrating.equals(vm.getState())) { + logger.info("Preparing volume [{}] for live migration.", volume.getUuid()); + mergeCurrentDeltaIntoVolume(volume, vm, "live migration", true); + } + } + + @Override + public void updateVolumeId(VirtualMachine virtualMachine, long oldVolumeId, long newVolumeId) { + nativeBackupDataStoreDao.updateVolumeId(oldVolumeId, newVolumeId); + } + + @Override + public void prepareVmForSnapshotRevert(VMSnapshot vmSnapshot, VirtualMachine virtualMachine) { + NativeBackupJoinVO currentBackup = nativeBackupJoinDao.findCurrent(virtualMachine.getId()); + + if (currentBackup == null) { + logger.debug("There is no current backup delta, the VM [{}] is already prepared for VM snapshot revert.", virtualMachine.getUuid()); + return; + } + if (currentBackup.getDate().before(vmSnapshot.getCreated())) { + logger.debug("The current backup delta was taken before [{}] the VM snapshot being reverted [{}], no need to prepare the VM.", currentBackup.getDate(), + vmSnapshot.getCreated()); + return; + } + + logger.debug("Preparing VM [{}] for VM snapshot reversion.", virtualMachine.getUuid()); + + List volumeObjectTOs = vmSnapshotHelper.getVolumeTOList(virtualMachine.getId()); + + VMSnapshotVO vmSnapshotSucceedingCurrentBackup = getSucceedingVmSnapshot(currentBackup); + + List deltaMergeTreeTOList = new ArrayList<>(); + Commands commands = new Commands(Command.OnError.Stop); + List deletedDeltas = new ArrayList<>(); + + createDeleteCommandsAndMergeTrees(volumeObjectTOs, commands, deletedDeltas, vmSnapshotSucceedingCurrentBackup, deltaMergeTreeTOList); + + if (!deltaMergeTreeTOList.isEmpty()) { + commands.addCommand(new MergeDiskOnlyVmSnapshotCommand(deltaMergeTreeTOList, false, virtualMachine.getInstanceName())); + } + + Long hostId = vmSnapshotHelper.pickRunningHost(virtualMachine.getId()); + + Answer[] answers; + try { + answers = sendBackupCommands(hostId, commands); + } catch (AgentUnavailableException | OperationTimedoutException e) { + throw new CloudRuntimeException(e); + } + + if (answers == null || Arrays.stream(answers).anyMatch(answer -> !answer.getResult())) { + logger.error("Error while trying to prepare VM [{}] for VM snapshot reversion. Got [{}] as answers from host.", virtualMachine.getUuid(), + answers != null ? Arrays.stream(answers).filter(answer -> !answer.getResult()).map(Answer::getDetails) : null); + throw new CloudRuntimeException(String.format("Unable to prepare VM [%s] for VM snapshot reversion.", virtualMachine.getUuid())); + } + + List snapRefsSucceedingCurrentBackup = new ArrayList<>(); + + if (vmSnapshotSucceedingCurrentBackup != null) { + snapRefsSucceedingCurrentBackup = vmSnapshotHelper.getVolumeSnapshotsAssociatedWithKvmDiskOnlyVmSnapshot(vmSnapshotSucceedingCurrentBackup.getId()); + } + + updateReferencesAfterPrepareForSnapshotRevert(deltaMergeTreeTOList, snapRefsSucceedingCurrentBackup, deletedDeltas, currentBackup); + } + + @Override + public Boolean crossZoneInstanceCreationEnabled(BackupOffering backupOffering) { + return false; + } + + @Override + public List listRestorePoints(VirtualMachine vm) { + return null; + } + + @Override + public String getConfigComponentName() { + return BackupService.class.getSimpleName(); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[] {backupChainSize, backupTimeout, backupCompressionTimeout, backupCompressionMinimumFreeStorage, backupCompressionRateLimit, + backupCompressionCoroutines}; + } + + private Outcome createBackupThroughJobQueue(VirtualMachine vm, boolean quiesceVm, boolean isolated) { + final CallContext context = CallContext.current(); + long userId = context.getCallingUser().getId(); + long accountId = context.getCallingAccount().getAccountId(); + long vmId = vm.getId(); + + BackupVO backup = new BackupVO(String.format("%s-%s", vm.getHostName(), DateUtil.getDateInSystemTimeZone()), vmId, vm.getBackupOfferingId(), accountId, + vm.getDomainId(), vm.getDataCenterId(), 0, Backup.Status.Queued, null); + + VmWorkJobVO workJob = new VmWorkJobVO(AsyncJobExecutionContext.getOriginJobId(), userId, accountId, VmWorkTakeBackup.class.getName(), vmId, VirtualMachine.Type.Instance, + VmWorkJobVO.Step.Starting); + VmWorkTakeBackup workInfo = new VmWorkTakeBackup(userId, accountId, vmId, backupDao.persist(backup).getId(), VM_WORK_JOB_HANDLER, quiesceVm, isolated); + + workJob.setDispatcher(VmWorkConstants.VM_WORK_JOB_DISPATCHER); + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + jobManager.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vmId); + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId()); + + return new OutcomeImpl<>(Pair.class, workJob, VirtualMachineManagerImpl.VmJobCheckInterval.value(), new Predicate() { + @Override + public boolean checkCondition() { + AsyncJobVO jobVo = entityManager.findById(AsyncJobVO.class, workJob.getId()); + return jobVo == null || jobVo.getStatus() != JobInfo.Status.IN_PROGRESS; + } + }, AsyncJob.Topics.JOB_STATE); + } + + private Outcome deleteBackupThroughJobQueue(Backup backup, boolean forced) { + final CallContext context = CallContext.current(); + long userId = context.getCallingUser().getId(); + long accountId = context.getCallingAccount().getAccountId(); + VirtualMachine userVm = userVmDao.findByIdIncludingRemoved(backup.getVmId()); + long vmId = userVm.getId(); + + VmWorkJobVO workJob = new VmWorkJobVO(AsyncJobExecutionContext.getOriginJobId(), userId, accountId, VmWorkDeleteBackup.class.getName(), vmId, VirtualMachine.Type.Instance, + VmWorkJobVO.Step.Starting); + VmWorkDeleteBackup workInfo = new VmWorkDeleteBackup(userId, accountId, vmId, VM_WORK_JOB_HANDLER, backup.getId(), forced); + + return submitWorkJob(workJob, workInfo, vmId); + } + + private Outcome restoreVMFromBackupThroughJobQueue(VirtualMachine vm, Backup backup, boolean quickRestore, Long hostId) { + final CallContext context = CallContext.current(); + long userId = context.getCallingUser().getId(); + long accountId = context.getCallingAccount().getAccountId(); + long vmId = vm.getId(); + + VmWorkJobVO workJob = new VmWorkJobVO(AsyncJobExecutionContext.getOriginJobId(), userId, accountId, VmWorkRestoreBackup.class.getName(), vmId, VirtualMachine.Type.Instance, + VmWorkJobVO.Step.Starting); + VmWorkRestoreBackup workInfo = new VmWorkRestoreBackup(userId, accountId, vmId, VM_WORK_JOB_HANDLER, backup.getId(), quickRestore, hostId); + + return submitWorkJob(workJob, workInfo, vmId); + } + + private Outcome restoreBackedUpVolumeThroughJobQueue(VirtualMachine vm, Backup backup, Backup.VolumeInfo backupVolumeInfo, String hostIp, boolean quickRestore) { + final CallContext context = CallContext.current(); + long userId = context.getCallingUser().getId(); + long accountId = context.getCallingAccount().getAccountId(); + long vmId = vm.getId(); + + VmWorkJobVO workJob = new VmWorkJobVO(AsyncJobExecutionContext.getOriginJobId(), userId, accountId, VmWorkRestoreVolumeBackupAndAttach.class.getName(), vmId, + VirtualMachine.Type.Instance, VmWorkJobVO.Step.Starting); + VmWorkRestoreVolumeBackupAndAttach workInfo = new VmWorkRestoreVolumeBackupAndAttach(userId, accountId, vmId, VM_WORK_JOB_HANDLER, backup.getId(), + backupVolumeInfo, hostIp, quickRestore); + + return submitWorkJob(workJob, workInfo, vmId); + } + + private OutcomeImpl submitWorkJob(VmWorkJobVO workJob, VmWork workInfo, long vmId) { + workJob.setDispatcher(VmWorkConstants.VM_WORK_JOB_DISPATCHER); + workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); + + jobManager.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vmId); + AsyncJobExecutionContext.getCurrentExecutionContext().joinJob(workJob.getId()); + + return new OutcomeImpl<>(Boolean.class, workJob, VirtualMachineManagerImpl.VmJobCheckInterval.value(), new Predicate() { + @Override + public boolean checkCondition() { + AsyncJobVO jobVo = entityManager.findById(AsyncJobVO.class, workJob.getId()); + return jobVo == null || jobVo.getStatus() != JobInfo.Status.IN_PROGRESS; + } + }, AsyncJob.Topics.JOB_STATE); + } + + protected void compressBackupAsyncIfHasOfferingSupport(NativeBackupJoinVO backupJoinVO, long zoneId) { + if (!offeringSupportsCompression(backupJoinVO)) { + return; + } + + logger.info("Queuing backup compression job for backup [{}].", backupJoinVO.getUuid()); + backupCompressionJobDao.persist(new BackupCompressionJobVO(backupJoinVO.getId(), zoneId, backupJoinVO.getVmId(), BackupCompressionJobType.StartCompression)); + } + + private boolean finalizeQuickRestore(VirtualMachine vm, List volumesToConsolidate, long hostId) { + logger.info("Finalizing quick restore for VM [{}].", vm.getUuid()); + + UserVmVO userVmVO = userVmDao.findById(vm.getId()); + if (userVmVO.getState() == VirtualMachine.State.Stopped) { + try { + logger.info("Starting VM [{}] as part of the quick restore process.", vm.getName()); + userVmManager.startVirtualMachine(userVmVO.getId(), hostId, new HashMap<>(), null, true); + } catch (Exception e) { + logger.error("Caught [{}] while trying to quick restore VM [{}]. Throwing BackupException.", e, vm); + throw new BackupException(String.format("Exception while trying to start VM [%s] as part of the quick restore process.", userVmVO.getUuid()), e, false); + } + } + + return consolidateVolumes(vm, hostId, volumesToConsolidate); + } + + private List getVolumesToConsolidate(VirtualMachine vm, List deltasOnSecondary, List volumeObjectTOS, long hostId, + boolean sameVmAsBackup) { + List volumesToConsolidate = new ArrayList<>(); + + transitVmStateWithoutThrow(vm, VirtualMachine.Event.RestoringSuccess, hostId); + for (VolumeObjectTO volume : volumeObjectTOS) { + VolumeInfo volumeInfo = volumeDataFactory.getVolume(volume.getVolumeId()); + transitVolumeStateWithoutThrow(volumeInfo.getVolume(), Volume.Event.RestoreSucceeded); + + if (!sameVmAsBackup || deltasOnSecondary.stream().anyMatch(delta -> delta.getVolumeId() == volume.getVolumeId())) { + volumesToConsolidate.add(volumeInfo); + } + } + return volumesToConsolidate; + } + + private boolean consolidateVolumes(VirtualMachine vm, long hostId, List volumesToConsolidate) { + for (VolumeInfo volumeInfo : volumesToConsolidate) { + transitVolumeStateWithoutThrow(volumeInfo.getVolume(), Volume.Event.ConsolidationRequested); + } + + VMInstanceDetailVO uuids = userVmDetailsDao.findDetail(vm.getId(), VmDetailConstants.LINKED_VOLUMES_SECONDARY_STORAGE_UUIDS); + List secondaryStorageUuids = uuids != null ? List.of(uuids.getValue().split(",")) : List.of(); + ConsolidateVolumesCommand cmd = new ConsolidateVolumesCommand(volumesToConsolidate, secondaryStorageUuids, vm.getInstanceName()); + Answer answer = sendBackupCommand(hostId, cmd); + + String logError = String.format("Failed to consolidate volumes [%s] of VM [%s]. Answer details: [%s].", + volumesToConsolidate, vm.getName(), answer != null ? answer.getDetails() : "null"); + if (!(answer instanceof ConsolidateVolumesAnswer)) { + logger.error(logError); + throw new BackupException(logError, false); + } + ConsolidateVolumesAnswer cAnswer = (ConsolidateVolumesAnswer)answer; + processConsolidateAnswer(cAnswer, volumesToConsolidate, vm); + + logger.info("Volume consolidation answer: [{}].", cAnswer.getResult()); + return cAnswer.getResult(); + } + + /** + * Validates the Backup status:
+ * - If it is Error and The VM is in BackupError, will throw an exception;
+ * - If it is in Error but the VM is not in BackupError, will set the backup as Failed so that it may be removed with {@code deleteFailedBackup(BackupVO backupVO)};
+ * - If it is not in Error, does nothing. + * */ + private void checkErrorBackup(BackupVO backupVO, VirtualMachine virtualMachine) { + if (backupVO.getStatus() != Backup.Status.Error) { + return; + } + if (virtualMachine != null && virtualMachine.getState() == VirtualMachine.State.BackupError) { + logger.error("Unable to delete backup [{}] as it is in Error state and the associated VM [{}] is in BackupError state. You must read the backup creation logs," + + " normalize the VM's volumes in the hypervisor/storage and update the VM state in the database before trying to delete the backup. Try again when the VM is not " + + "in this state.", backupVO, virtualMachine.getUuid()); + throw new InvalidParameterValueException(String.format("Unable to delete backup [%s]. Please check the logs.", backupVO.getUuid())); + } + logger.debug("Assuming VM and storage are normalized and setting backup [{}] as failed so its metadata is deleted."); + backupVO.setStatus(Backup.Status.Failed); + } + + /** + * Deletes a Failed backup metadata and sets the backup as Expunged. + * */ + private boolean deleteFailedBackup(BackupVO backupVO) { + if (backupVO.getStatus() == Backup.Status.Failed) { + long backupId = backupVO.getId(); + + backupVO.setStatus(Backup.Status.Expunged); + backupDao.update(backupId, backupVO); + nativeBackupStoragePoolDao.expungeByBackupId(backupId); + nativeBackupDataStoreDao.expungeByBackupId(backupId); + backupDetailDao.removeDetails(backupId); + return true; + } + return false; + } + + /** + * Merges the current delta on primary storage, if any, into the given volume. If the backup has no more deltas on primary storage, will set the backup as end_of_chain. + * */ + protected void mergeCurrentDeltaIntoVolume(Volume volume, VirtualMachine virtualMachine, String operation, boolean isVmRunning) { + NativeBackupStoragePoolVO delta = nativeBackupStoragePoolDao.findOneByVolumeId(volume.getId()); + if (delta == null) { + logger.debug("Volume [{}] has no deltas to merge, doing nothing.", volume.getUuid()); + return; + } + NativeBackupJoinVO nativeBackupJoinVO = nativeBackupJoinDao.findById(delta.getBackupId()); + VMSnapshotVO succeedingVmSnapshotVO = getSucceedingVmSnapshot(nativeBackupJoinVO); + + DataStore store = dataStoreManager.getDataStore(volume.getPoolId(), DataStoreRole.Primary); + VolumeObject volumeObject = VolumeObject.getVolumeObject(store, (VolumeVO)volume); + + DeltaMergeTreeTO deltaMergeTreeTO = createDeltaMergeTree(succeedingVmSnapshotVO == null, isVmRunning, delta, (VolumeObjectTO)volumeObject.getTO(), succeedingVmSnapshotVO); + MergeDiskOnlyVmSnapshotCommand cmd = new MergeDiskOnlyVmSnapshotCommand(List.of(deltaMergeTreeTO), isVmRunning, virtualMachine.getInstanceName()); + + Answer answer = sendBackupCommand(vmSnapshotHelper.pickRunningHost(virtualMachine.getId()), cmd); + + if (answer == null || !answer.getResult()) { + logger.error("Error while trying to prepare volume [{}] for {}. Got [{}] as answer from host.", volume.getUuid(), operation, answer != null ? answer.getDetails() : null); + throw new CloudRuntimeException(String.format("Unable to prepare volume [%s] for [%s].", volume.getUuid(), operation)); + } + + if (succeedingVmSnapshotVO == null) { + VolumeVO volumeVO = volumeDao.findById(volumeObject.getId()); + volumeVO.setPath(deltaMergeTreeTO.getParent().getPath()); + volumeDao.update(volumeVO.getId(), volumeVO); + } + + expungeOldDeltasAndUpdateVmSnapshotIfNeeded(List.of(delta), succeedingVmSnapshotVO); + + List backupDeltas = nativeBackupStoragePoolDao.listByBackupId(delta.getBackupId()); + if (backupDeltas.isEmpty()) { + logger.debug("Backup [{}] has no more deltas on primary storage due to prepare volume [{}] for {} operation. Will set it as end of chain and not current.", + nativeBackupJoinVO.getUuid(), volume.getUuid(), operation); + setEndOfChainAndRemoveCurrentForBackup(nativeBackupJoinVO); + } + } + + /** + * Creates the necessary delta references on both primary and secondary storage. Also maps the volume to the parent delta backup and create the delta merge tree. + * */ + protected void createDeltaReferences(boolean fullBackup, boolean endOfChain, boolean hasVmSnapshotSucceedingLastBackup, boolean runningVm, Backup backup, + List parentBackupDeltasOnSecondary, List parentBackupDeltasOnPrimary, + HashMap volumeUuidToDeltaPrimaryRef, HashMap volumeUuidToDeltaSecondaryRef, + VMSnapshotVO succeedingVmSnapshot, KnibTO knibTO, boolean isolated) { + VolumeObjectTO volumeObjectTO = knibTO.getVolumeObjectTO(); + logger.debug("Creating delta references for backup [{}] of volume [{}].", backup.getUuid(), volumeObjectTO.getUuid()); + + NativeBackupDataStoreVO deltaSecondaryRef = new NativeBackupDataStoreVO(backup.getId(), volumeObjectTO.getVolumeId(), volumeObjectTO.getDeviceId(), null); + + if (!fullBackup) { + NativeBackupStoragePoolVO parentDeltaOnPrimary = createDeltaMergeTreeForVolume(false, runningVm, parentBackupDeltasOnPrimary, succeedingVmSnapshot, knibTO); + findAndSetParentBackupPath(parentBackupDeltasOnSecondary, parentDeltaOnPrimary, knibTO); + } + + NativeBackupDataStoreVO referenceOnSecondary = nativeBackupDataStoreDao.persist(deltaSecondaryRef); + logger.trace("Created reference [{}] for backup [{}] of volume [{}].", referenceOnSecondary, backup, volumeObjectTO); + volumeUuidToDeltaSecondaryRef.put(volumeObjectTO.getUuid(), referenceOnSecondary); + + if (endOfChain || isolated) { + logger.trace("Backup [{}] is [{}] and, thus, not creating a storage pool reference for its delta.", backup, endOfChain ? "end of chain" : "isolated"); + return; + } + + NativeBackupStoragePoolVO deltaPrimaryRef = new NativeBackupStoragePoolVO(backup.getId(), volumeObjectTO.getPoolId(), volumeObjectTO.getVolumeId(), null, + volumeObjectTO.getPath()); + + if (knibTO.getDeltaMergeTreeTO() != null && !hasVmSnapshotSucceedingLastBackup) { + deltaPrimaryRef.setBackupDeltaParentPath(knibTO.getDeltaMergeTreeTO().getParent().getPath()); + } else if (hasVmSnapshotSucceedingLastBackup) { + deltaPrimaryRef.setBackupDeltaParentPath(volumeObjectTO.getPath()); + } + + NativeBackupStoragePoolVO referenceOnPrimary = nativeBackupStoragePoolDao.persist(deltaPrimaryRef); + logger.trace("Created reference [{}] for backup [{}] of volume [{}].", referenceOnPrimary, backup, volumeObjectTO); + volumeUuidToDeltaPrimaryRef.put(volumeObjectTO.getUuid(), referenceOnPrimary); + } + + private HostVO getHostToRestore(VirtualMachine vm, boolean quickRestore, Long hostId) throws AgentUnavailableException { + HostVO host; + if (quickRestore) { + if (hostId == null) { + hostId = vm.getLastHostId(); + } + if (hostId == null) { + logger.error("Cannot quick restore if the VM has no last host and no hostId was informed. You may try to start it in an available host and stop it before quick" + + " restoring. Otherwise, use the normal restore."); + throw new AgentUnavailableException(String.format("No host found to quick restore VM [%s]. Please check the logs.", vm.getUuid()), -1); + } + host = hostDao.findByIdIncludingRemoved(hostId); + if (host.getStatus() != Status.Up || host.isInMaintenanceStates() || host.getResourceState() != ResourceState.Enabled) { + logger.error("Cannot quick restore if the VM's last host is in maintenance, not Up, or disabled. You may try to start it in an available host and stop it before quick" + + " restoring. Otherwise, use the normal restore."); + throw new AgentUnavailableException(String.format("No host found to quick restore VM [%s]. Please check the logs.", vm.getUuid()), -1); + } + } else { + hostId = vmSnapshotHelper.pickRunningHost(vm.getId()); + host = hostDao.findByIdIncludingRemoved(hostId); + } + return host; + } + + /** + * Returns ordered list of disk-only VM snapshots taken after the last backup. The list is ordered from oldest to newest. + * */ + protected List getSucceedingVmSnapshotList(NativeBackupJoinVO backup) { + List vmSnapshotVOs = new ArrayList<>(); + if (backup == null) { + return vmSnapshotVOs; + } + + VMSnapshotVO currentSnapshotVO = vmSnapshotDao.findCurrentSnapshotByVmId(backup.getVmId()); + if (currentSnapshotVO == null || currentSnapshotVO.getCreated().before(backup.getDate())) { + return vmSnapshotVOs; + } + vmSnapshotVOs.add(0, currentSnapshotVO); + + while (currentSnapshotVO.getParent() != null && currentSnapshotVO.getParent() != 0) { + VMSnapshotVO parentSnap = vmSnapshotDao.findById(currentSnapshotVO.getParent()); + if (parentSnap.getCreated().before(backup.getDate())){ + break; + } + currentSnapshotVO = parentSnap; + vmSnapshotVOs.add(0, currentSnapshotVO); + } + + logger.debug("Found the following VM snapshots that succeed the backup [{}]: [{}].", backup.getUuid(), vmSnapshotVOs); + + return vmSnapshotVOs; + } + + /** + * Returns the disk-only VM snapshot taken after the last backup, if any. + * */ + private VMSnapshotVO getSucceedingVmSnapshot(NativeBackupJoinVO backup) { + List snaps = getSucceedingVmSnapshotList(backup); + if (snaps.isEmpty()) { + return null; + } + return snaps.get(0); + } + + /** + * Given a VM snapshot, returns a map of volume id to list of snapshot references of the children of the VM snapshot. + * */ + private Map> gatherSnapshotReferencesOfChildrenSnapshot(List volumeObjectTOs, VMSnapshot vmSnapshotVO) { + Map> volumeToSnapshotRefs = new HashMap<>(); + + if (vmSnapshotVO == null) { + return volumeToSnapshotRefs; + } + + List snapshotChildren = vmSnapshotDao.listByParent(vmSnapshotVO.getId()); + + if (CollectionUtils.isEmpty(snapshotChildren)) { + return volumeToSnapshotRefs; + } + + List snapshotDataStoreVOS = new ArrayList<>(); + snapshotChildren.stream() + .map(snapshotVo -> vmSnapshotHelper.getVolumeSnapshotsAssociatedWithKvmDiskOnlyVmSnapshot(snapshotVo.getId())) + .forEach(snapshotDataStoreVOS::addAll); + + mapVolumesToSnapshotReferences(volumeObjectTOs, snapshotDataStoreVOS, volumeToSnapshotRefs); + + if (logger.isDebugEnabled()) { + StringBuilder log = new StringBuilder(String.format("Found the following snapshot references that succeed the VM snapshot [%s].", vmSnapshotVO.getUuid())); + for (VolumeObjectTO volumeObjectTO : volumeObjectTOs) { + log.append(String.format(" Volume [%s]; Snapshot references [%s].", volumeObjectTO.getUuid(), volumeToSnapshotRefs.get(volumeObjectTO.getId()))); + } + logger.debug(log.toString()); + } + + return volumeToSnapshotRefs; + } + + /** + * Given a list of volumes and VM snapshots, maps the volumes to the snapshot references of the VM snapshots. + * */ + protected Map> mapVolumesToVmSnapshotReferences(List volumeObjectTOs, List vmSnapshotVOList) { + Map> volumeToSnapshotRefs = new HashMap<>(); + if (vmSnapshotVOList.isEmpty()) { + logger.trace("No VM snapshot to map to any volume, returning."); + return volumeToSnapshotRefs; + } + + ArrayList allRefs = new ArrayList<>(); + for (VMSnapshotVO vmSnapshotVO : vmSnapshotVOList) { + allRefs.addAll(vmSnapshotHelper.getVolumeSnapshotsAssociatedWithKvmDiskOnlyVmSnapshot(vmSnapshotVO.getId())); + } + mapVolumesToSnapshotReferences(volumeObjectTOs, allRefs, volumeToSnapshotRefs); + logger.trace("Given volume objects [{}] and VM snapshots [{}], created the following map [{}].", volumeObjectTOs, vmSnapshotVOList, volumeToSnapshotRefs); + return volumeToSnapshotRefs; + } + + protected void mapVolumesToSnapshotReferences(List volumeObjectTOs, List snapshotDataStoreVOS, Map> volumeToSnapshotRefs) { + for (VolumeObjectTO volumeObjectTO : volumeObjectTOs) { + List associatedSnapshots = snapshotDataStoreVOS.stream() + .filter(snapRef -> Objects.equals(snapRef.getVolumeId(), volumeObjectTO.getVolumeId())) + .collect(Collectors.toList()); + volumeToSnapshotRefs.put(volumeObjectTO.getId(), associatedSnapshots); + } + } + + /** + * Updates the necessary references on the database. Also calculates the backup's physical size. + * */ + private long updateDeltaReferencesAndCalculateBackupPhysicalSize(VolumeObjectTO volumeObjectTO, HashMap volumeUuidToDeltaPrimaryRef, + HashMap volumeUuidToDeltaSecondaryRef, TakeKnibBackupAnswer answer, long physicalBackupSize) { + String volumeUuid = volumeObjectTO.getUuid(); + NativeBackupStoragePoolVO deltaPrimaryRef = volumeUuidToDeltaPrimaryRef.get(volumeUuid); + NativeBackupDataStoreVO deltaSecondaryRef = volumeUuidToDeltaSecondaryRef.get(volumeUuid); + + String newVolumePath = answer.getMapVolumeUuidToNewVolumePath().get(volumeUuid); + + if (deltaPrimaryRef != null) { + logger.trace("Updating delta reference on primary [{}] path to [{}].", deltaPrimaryRef, newVolumePath); + deltaPrimaryRef.setBackupDeltaPath(newVolumePath); + nativeBackupStoragePoolDao.update(deltaPrimaryRef.getId(), deltaPrimaryRef); + } + + VolumeVO volumeVO = volumeDao.findById(volumeObjectTO.getId()); + volumeVO.setPath(newVolumePath); + logger.trace("Updating volume [{}] path to [{}].", volumeVO.getUuid(), newVolumePath); + volumeDao.update(volumeVO.getId(), volumeVO); + + Pair deltaPathOnSecondaryAndSize = answer.getMapVolumeUuidToDeltaPathOnSecondaryAndSize().get(volumeUuid); + logger.trace("Updating delta reference on secondary [{}] path to [{}].", deltaSecondaryRef, deltaPathOnSecondaryAndSize.first()); + deltaSecondaryRef.setBackupPath(deltaPathOnSecondaryAndSize.first()); + nativeBackupDataStoreDao.update(deltaSecondaryRef.getId(), deltaSecondaryRef); + + physicalBackupSize += deltaPathOnSecondaryAndSize.second(); + return physicalBackupSize; + } + + /** + * Expunge the old backup deltas and if there were disk-only VM snapshot deltas after the last backup, update their paths. + * */ + private void expungeOldDeltasAndUpdateVmSnapshotIfNeeded(List oldDeltasOnPrimary, VMSnapshot vmSnapshot) { + List snapshotRefs = vmSnapshot == null ? List.of() : vmSnapshotHelper.getVolumeSnapshotsAssociatedWithKvmDiskOnlyVmSnapshot(vmSnapshot.getId()); + for (NativeBackupStoragePoolVO oldBackupDelta : oldDeltasOnPrimary) { + logger.trace("Expunging old backup delta [{}].", oldBackupDelta); + nativeBackupStoragePoolDao.expunge(oldBackupDelta.getId()); + SnapshotDataStoreVO snapshotDataStoreVO = snapshotRefs.stream().filter(ref -> ref.getVolumeId() == oldBackupDelta.getVolumeId()).findFirst().orElse(null); + if (snapshotDataStoreVO == null) { + continue; + } + snapshotDataStoreVO.setInstallPath(oldBackupDelta.getBackupDeltaParentPath()); + logger.debug("Updating snapshot delta [{}] path to [{}].", snapshotDataStoreVO.getId(), oldBackupDelta.getBackupDeltaParentPath()); + snapshotDataStoreDao.update(snapshotDataStoreVO.getId(), snapshotDataStoreVO); + } + } + + /** + * Create a {@link DeltaMergeTreeTO} for the volume if it has a delta on primary and add it to the list. + * + * @return the delta on primary of the volume. Null if no delta. + * */ + protected NativeBackupStoragePoolVO createDeltaMergeTreeForVolume(boolean childIsVolume, boolean runningVm, List deltasOnPrimary, VMSnapshotVO succeedingVmSnapshot, + KnibTO knibTO) { + VolumeObjectTO volumeObjectTO = knibTO.getVolumeObjectTO(); + + NativeBackupStoragePoolVO deltaOnPrimary = deltasOnPrimary.stream() + .filter(delta -> delta.getVolumeId() == volumeObjectTO.getVolumeId()) + .findFirst() + .orElse(null); + if (deltaOnPrimary == null) { + logger.debug("Volume [{}] has no delta on primary storage.", volumeObjectTO); + return null; + } + + logger.debug("Volume [{}] has a backup delta on primary storage [{}].", volumeObjectTO.getUuid(), deltaOnPrimary); + + knibTO.setDeltaMergeTreeTO(createDeltaMergeTree(childIsVolume, runningVm, deltaOnPrimary, volumeObjectTO, succeedingVmSnapshot)); + return deltaOnPrimary; + } + + private DeltaMergeTreeTO createDeltaMergeTree(boolean childIsVolume, boolean runningVm, NativeBackupStoragePoolVO deltaOnPrimary, + VolumeObjectTO volumeObjectTO, VMSnapshotVO succeedingVmSnapshot) { + DataStore store = dataStoreManager.getDataStore(deltaOnPrimary.getStoragePoolId(), DataStoreRole.Primary); + DataTO deltaChild; + if (childIsVolume) { + deltaChild = volumeObjectTO; + } else { + deltaChild = new BackupDeltaTO(store.getTO(), Hypervisor.HypervisorType.KVM, deltaOnPrimary.getBackupDeltaPath()); + } + + BackupDeltaTO deltaParent = new BackupDeltaTO(store.getTO(), Hypervisor.HypervisorType.KVM, deltaOnPrimary.getBackupDeltaParentPath()); + + List succeedingDeltaPaths = new ArrayList<>(); + if (succeedingVmSnapshot != null) { + succeedingDeltaPaths = gatherSnapshotReferencesOfChildrenSnapshot(List.of(volumeObjectTO), succeedingVmSnapshot).getOrDefault(volumeObjectTO.getVolumeId(), List.of()) + .stream().map(SnapshotDataStoreVO::getInstallPath).collect(Collectors.toList()); + + if (!childIsVolume && !runningVm && succeedingDeltaPaths.isEmpty()) { + succeedingDeltaPaths = List.of(volumeObjectTO.getPath()); + logger.debug("Since the last backup delta of volume [{}] is succeeded by a snapshot and the delta created by this snapshot is also the volume, it will have to be" + + " rebased. Setting it as the grand-child.", volumeObjectTO.getUuid()); + } + } + + + + List deltaGrandchildren = succeedingDeltaPaths.stream() + .map(deltaPath -> new BackupDeltaTO(store.getTO(), Hypervisor.HypervisorType.KVM, deltaPath)) + .collect(Collectors.toList()); + + DeltaMergeTreeTO deltaMergeTreeTO = new DeltaMergeTreeTO(volumeObjectTO, deltaParent, deltaChild, deltaGrandchildren); + + logger.debug("Mapped the following delta merge tree for volume [{}]: [{}].", volumeObjectTO.getUuid(), deltaMergeTreeTO); + return deltaMergeTreeTO; + } + + /** + * Sets on the {@code knibTO} the backupParentOnSecondary path based on the list of NativeBackupDataStoreVO. + * + * @param parentBackupDeltasOnSecondary + * List of deltas on secondary; + * @param parentDeltaOnPrimary + * @param knibTO + * KnibTO to be configured; + */ + protected void findAndSetParentBackupPath(List parentBackupDeltasOnSecondary, NativeBackupStoragePoolVO parentDeltaOnPrimary, KnibTO knibTO) { + VolumeObjectTO volumeObjectTO = knibTO.getVolumeObjectTO(); + if (parentDeltaOnPrimary == null) { + logger.debug("Volume [{}] has no parent on primary, thus its backup cannot be incremental.", volumeObjectTO); + return; + } + + NativeBackupDataStoreVO parentOnSecondary = parentBackupDeltasOnSecondary.stream() + .filter(backupDataStoreVo -> volumeObjectTO.getVolumeId() == backupDataStoreVo.getVolumeId()) + .findFirst() + .orElse(null); + + if (parentOnSecondary == null) { + return; + } + + logger.debug("Volume [{}] already has a backup [{}].", volumeObjectTO.getUuid(), parentOnSecondary.getBackupId()); + + knibTO.setPathBackupParentOnSecondary(parentOnSecondary.getBackupPath()); + } + + /** + * Verify if the data center has heuristic rules for allocating backups; if there is then returns the {@link DataStore} returned by the JS script. + * Otherwise, returns a {@link DataStore} with free capacity. + */ + protected DataStore getImageStoreForBackup(Long dataCenterId, BackupVO backupVO) { + DataStore imageStore = heuristicRuleHelper.getImageStoreIfThereIsHeuristicRule(dataCenterId, HeuristicType.BACKUP, backupVO); + + if (imageStore == null) { + imageStore = dataStoreManager.getImageStoreWithFreeCapacity(dataCenterId); + } + + if (imageStore == null) { + backupVO.setStatus(Backup.Status.Failed); + backupDao.update(backupVO.getId(), backupVO); + throw new CloudRuntimeException(String.format("Unable to find secondary storage for backup [%s].", backupVO)); + } + + logger.debug("Backup [{}] will use secondary storage [{}].", backupVO.getUuid(), imageStore.getUuid()); + return imageStore; + } + + protected void setBackupAsIsolated(BackupVO backup) { + logger.debug("Setting backup [{}] as isolated.", backup.getUuid()); + backupDetailDao.persist(new BackupDetailVO(backup.getId(), ISOLATED, Boolean.TRUE.toString(), true)); + } + + /** + * Gets the parent for newBackup. Will set the newBackup as the end of chain if needed.
+ * - If no backups are found, returns null.
+ * - If the last backup was the end of the chain, returns null.
+ * + * @param newBackup the new backup being created. + * @param backupChain newBackup's ancestors. + * */ + protected NativeBackupJoinVO getParentAndSetEndOfChain(BackupVO newBackup, List backupChain, NativeBackupOfferingVO offering) { + int chainSize = getChainSizeForBackup(offering, newBackup.getZoneId()); + if (CollectionUtils.isEmpty(backupChain)) { + setEndOfChainTrueIfRemainingChainSizeIsOneOrLess(chainSize, chainSize, newBackup.getId(), newBackup.getUuid()); + return null; + } + + int remainingChainSize = chainSize - backupChain.size(); + setEndOfChainTrueIfRemainingChainSizeIsOneOrLess(remainingChainSize, chainSize, newBackup.getId(), newBackup.getUuid()); + + NativeBackupJoinVO parent = backupChain.get(0); + return parent.getStatus().equals(Backup.Status.BackedUp) ? parent : null; + } + + /** + * For every restore point, maps a volume to it. + * @throws CloudRuntimeException If cannot map restore point to any volume. + * */ + private Set> generateBackupAndVolumePairsToRestore(List backupDeltas, List volumeTOs, + NativeBackupJoinVO backupJoinVO, boolean sameVmAsBackup) { + Set> backupAndVolumePairs = new HashSet<>(); + DataStore dataStore = dataStoreManager.getDataStore(backupJoinVO.getImageStoreId(), DataStoreRole.Image); + for (NativeBackupDataStoreVO backupDataStoreVO : backupDeltas) { + VolumeObjectTO volumeObjectTO = volumeTOs.stream() + .filter(volumeTO -> sameVmAsBackup ? volumeTO.getVolumeId() == backupDataStoreVO.getVolumeId() : volumeTO.getDeviceId() == backupDataStoreVO.getDeviceId()) + .findFirst() + .orElse(null); + + if (volumeObjectTO == null) { + logger.error("All backups should have a corresponding volume at this point, however, backup delta [{}] does not.", backupDataStoreVO.getId()); + throw new CloudRuntimeException("Error while restoring backup. Please check the logs."); + } + + backupAndVolumePairs.add(new Pair<>(new BackupDeltaTO(dataStore.getTO(), Hypervisor.HypervisorType.KVM, backupDataStoreVO.getBackupPath()), volumeObjectTO)); + } + logger.debug("Generated the following list of pairs of backup deltas and volumes: [{}].", backupAndVolumePairs); + return backupAndVolumePairs; + } + + protected Pair generateBackupAndVolumePairForSingleNewVolume(NativeBackupDataStoreVO backupDeltaVo, VolumeObjectTO volumeTO, long oldVolumeId, + NativeBackupJoinVO backupJoinVO) { + DataStore dataStore = dataStoreManager.getDataStore(backupJoinVO.getImageStoreId(), DataStoreRole.Image); + Pair backupAndVolumePair = new Pair<>(new BackupDeltaTO(dataStore.getTO(), Hypervisor.HypervisorType.KVM, backupDeltaVo.getBackupPath()), volumeTO); + + logger.debug("Paired volume [{}] with backup delta [{}].", volumeTO, backupAndVolumePair.first()); + return backupAndVolumePair; + } + + /** + * For every volume, maps deltas that should be deleted, if there are any. If a volume has a delta but is not part of backup being restored, it will be mapped to be merged. + * + * @return List of deltas to be merged. + * */ + private List populateDeltasToRemoveAndToMergeAndUpdateVolumePaths(List deltasOnPrimary, Set deltasToRemove, List volumeTOs, + List volumesNotPartOfTheBackupBeingRestored, String vmUuid) { + List deltasToBeMerged = new ArrayList<>(); + for (NativeBackupStoragePoolVO deltaOnPrimary : deltasOnPrimary) { + Optional optional = volumeTOs.stream().filter(volumeTO -> volumeTO.getVolumeId() == deltaOnPrimary.getVolumeId()).findFirst(); + if (optional.isEmpty()) { + logger.error("Failed to find volume that matches delta [{}] with path [{}]. Please check for inconsistencies on the database or if there are leftover" + + " deltas on storage.", deltaOnPrimary.getId(), deltaOnPrimary.getBackupDeltaPath()); + throw new CloudRuntimeException(String.format("Failed to restore VM [%s]. Please check the logs.", vmUuid)); + } + VolumeObjectTO volumeObjectTO = optional.get(); + + if (volumesNotPartOfTheBackupBeingRestored.contains(volumeObjectTO)) { + deltasToBeMerged.add(createDeltaMergeTree(true, false, deltaOnPrimary, volumeObjectTO, null)); + continue; + } + + DataStore dataStore = dataStoreManager.getDataStore(deltaOnPrimary.getStoragePoolId(), DataStoreRole.Primary); + BackupDeltaTO backupDeltaTO = new BackupDeltaTO(dataStore.getTO(), Hypervisor.HypervisorType.KVM, deltaOnPrimary.getBackupDeltaPath()); + logger.debug("Mapped the following backup delta on primary to be removed since the volume [{}] is not part of the backup being restored [{}].", + volumeObjectTO.getUuid(), backupDeltaTO); + deltasToRemove.add(backupDeltaTO); + volumeObjectTO.setPath(deltaOnPrimary.getBackupDeltaParentPath()); + } + if (!deltasToBeMerged.isEmpty()) { + logger.debug("The following deltaMergeTrees [{}] were created to merge volumes [{}] that have no backups.", deltasToBeMerged, volumesNotPartOfTheBackupBeingRestored); + } + return deltasToBeMerged; + } + + private void updateVolumePathsAndSizeIfNeeded(VirtualMachine vm, List volumeTOs, List volumeInfos, + List deltaMergeTreeTOList, boolean sameVmAsBackup) { + List volumeVOs = volumeDao.findByInstance(vm.getId()); + + for (VolumeVO volumeVO : volumeVOs) { + VolumeObjectTO volumeTO = volumeTOs.stream().filter(volumeObjectTO -> volumeObjectTO.getVolumeId() == volumeVO.getId()).findFirst().get(); + + String log = "Volume [%s] path was updated as part of the backup restore process. New path: [%s]."; + DeltaMergeTreeTO deltaMergeTreeTO = deltaMergeTreeTOList.stream().filter(delta -> delta.getChild().getId() == volumeTO.getId()).findFirst().orElse(null); + if (!volumeVO.getPath().equals(volumeTO.getPath())) { + volumeVO.setPath(volumeTO.getPath()); + logger.debug(() -> String.format(log, volumeVO.getUuid(), volumeVO.getPath())); + } else if (deltaMergeTreeTO != null) { + volumeVO.setPath(deltaMergeTreeTO.getParent().getPath()); + logger.debug(() -> String.format(log, volumeVO.getUuid(), volumeVO.getPath())); + } + + Backup.VolumeInfo volumeInfo = volumeInfos.stream() + .filter(info -> sameVmAsBackup ? volumeVO.getUuid().equals(info.getUuid()) : volumeVO.getDeviceId().equals(info.getDeviceId())) + .findFirst().orElse(null); + if (volumeInfo != null && !Objects.equals(volumeInfo.getSize(), volumeVO.getSize())) { + logger.debug("Volume [{}] size was restored as part of the backup restore process. Old size is [{}] new size is [{}].", volumeVO.getUuid(), + volumeVO.getSize(), volumeInfo.getSize()); + volumeVO.setSize(volumeInfo.getSize()); + } + + volumeDao.update(volumeVO.getId(), volumeVO); + } + } + + protected void createAndAttachVolumes(List volumeInfos, List backupDeltas, VirtualMachine vm, HostVO host) { + logger.info("Found the following backup deltas that have no volume correspondence [{}]. Will create new volumes and attach them to VM [{}].", backupDeltas.stream() + .map(NativeBackupDataStoreVO::getId).collect(Collectors.toList()), vm.getUuid()); + for (NativeBackupDataStoreVO delta : backupDeltas) { + VolumeVO volumeVO = volumeDao.findByIdIncludingRemoved(delta.getVolumeId()); + Backup.VolumeInfo backupVolumeInfo = volumeInfos.stream().filter(info -> volumeVO.getUuid().equals(info.getUuid())).findFirst().orElseThrow(); + VolumeInfo volumeInfo = duplicateAndCreateVolume(vm, host, backupVolumeInfo); + Volume volume = volumeApiService.attachVolumeToVM(vm.getId(), volumeInfo.getId(), null, false, true); + transitVolumeStateWithoutThrow(volume, Volume.Event.RestoreRequested); + delta.setVolumeId(volume.getId()); + } + } + + protected VolumeInfo duplicateAndCreateVolume(VirtualMachine vm, HostVO hostVo, Backup.VolumeInfo backupVolumeInfo) { + VolumeVO newVolume = duplicateVolume(backupVolumeInfo); + VolumeInfo volumeInfo = volumeDataFactory.getVolume(newVolume.getId()); + + try { + volumeInfo = volumeOrchestrationService.createVolumeOnPrimaryStorage(vm, volumeInfo, Hypervisor.HypervisorType.KVM, null, hostVo.getClusterId(), hostVo.getPodId()); + } catch (NoTransitionException ex) { + logger.error("Exception while creating volume to restore.", ex); + throw new CloudRuntimeException(ex); + } + + return volumeInfo; + } + + private VolumeVO duplicateVolume(Backup.VolumeInfo backupVolumeInfo) { + VolumeVO volumeVO = volumeDao.findByUuidIncludingRemoved(backupVolumeInfo.getUuid()); + VolumeVO duplicateVO = new VolumeVO(volumeVO); + DiskOfferingVO diskOfferingVO = diskOfferingDao.findByUuidIncludingRemoved(backupVolumeInfo.getDiskOfferingId()); + duplicateVO.setDiskOfferingId(diskOfferingVO.getId()); + duplicateVO.setSize(backupVolumeInfo.getSize()); + duplicateVO.setMinIops(backupVolumeInfo.getMinIops()); + duplicateVO.setMaxIops(backupVolumeInfo.getMaxIops()); + duplicateVO.setAttached(null); + duplicateVO.setVolumeType(Volume.Type.DATADISK); + duplicateVO.setInstanceId(null); + duplicateVO.setPoolId(null); + duplicateVO.setPath(null); + return volumeDao.persist(duplicateVO); + } + + protected List getBackupsWithoutVolumes(List backups, List volumes) { + List deltasOnSecondaryWithNoVolumes = new ArrayList<>(); + for (NativeBackupDataStoreVO backup : backups) { + VolumeObjectTO volumeObjectTO = volumes.stream().filter(volumeTO -> volumeTO.getVolumeId() == backup.getVolumeId()) + .findFirst() + .orElse(null); + + if (volumeObjectTO == null) { + deltasOnSecondaryWithNoVolumes.add(backup); + } + } + return deltasOnSecondaryWithNoVolumes; + } + + protected List getVolumesThatAreNotPartOfTheBackup(List volumeObjectTOS, List deltasOnSecondary) { + List volumesWithNoBackups = new ArrayList<>(); + for (VolumeObjectTO volume : volumeObjectTOS) { + if (deltasOnSecondary.stream().noneMatch(delta -> delta.getVolumeId() == volume.getVolumeId())) { + volumesWithNoBackups.add(volume); + } + } + logger.debug("Found the following volumes that are not part of the backup being restored [{}].", volumesWithNoBackups); + return volumesWithNoBackups; + } + + private void processBackupSuccess(boolean runningVm, List volumeTOs, HashMap volumeUuidToDeltaPrimaryRef, + HashMap volumeUuidToDeltaSecondaryRef, TakeKnibBackupAnswer answer, List parentBackupDeltasOnPrimary, + List succeedingVmSnapshots, BackupVO backupVO, boolean fullBackup, VirtualMachine userVm, Long hostId) { + long physicalBackupSize = 0; + logger.debug("Processing backup [{}] success.", backupVO.getUuid()); + for (VolumeObjectTO volumeObjectTO : volumeTOs) { + physicalBackupSize = updateDeltaReferencesAndCalculateBackupPhysicalSize(volumeObjectTO, volumeUuidToDeltaPrimaryRef, volumeUuidToDeltaSecondaryRef, answer, physicalBackupSize); + } + + expungeOldDeltasAndUpdateVmSnapshotIfNeeded(parentBackupDeltasOnPrimary, succeedingVmSnapshots.isEmpty() ? null : succeedingVmSnapshots.get(0)); + + backupVO.setSize(physicalBackupSize); + backupVO.setStatus(Backup.Status.BackedUp); + backupVO.setBackedUpVolumes(backupManager.createVolumeInfoFromVolumes(new ArrayList<>(volumeDao.findByInstance(userVm.getId())))); + backupDao.loadDetails(backupVO); + backupVO.getDetails().putAll(backupManager.getBackupDetailsFromVM(userVm)); + backupVO.setType(fullBackup ? "FULL" : "INCREMENTAL"); + backupDao.update(backupVO.getId(), backupVO); + + transitVmStateWithoutThrow(userVm, runningVm ? VirtualMachine.Event.BackupSucceededRunning : VirtualMachine.Event.BackupSucceededStopped, hostId); + } + + private void processBackupFailure(Answer answer, VirtualMachine vm, long hostId, boolean runningVm, BackupVO backupVO) { + if (answer instanceof TakeKnibBackupAnswer && ((TakeKnibBackupAnswer) answer).isVmConsistent()) { + logger.info("Backup [{}] of VM [{}] failed. However, the VM is still consistent, so we will roll back its state.", backupVO.getUuid(), vm.getUuid()); + backupVO.setStatus(Backup.Status.Failed); + + transitVmStateWithoutThrow(vm, runningVm ? VirtualMachine.Event.OperationFailedToRunning : VirtualMachine.Event.OperationFailedToStopped, hostId); + } else { + logger.info("Backup [{}] of VM [{}] ended in error. We are not sure if the VM is consistent; thus, we will set it as BackupError.", backupVO.getUuid(), vm.getUuid()); + transitVmStateWithoutThrow(vm, VirtualMachine.Event.OperationFailedToError, hostId); + backupVO.setStatus(Backup.Status.Error); + } + + backupDao.update(backupVO.getId(), backupVO); + } + + private void processRemovedBackups(List removedBackupIds) { + for (Long removedBackupId : removedBackupIds) { + BackupVO removedBackupVO = backupDao.findByIdIncludingRemoved(removedBackupId); + removedBackupVO.setStatus(Backup.Status.Expunged); + backupDao.update(removedBackupId, removedBackupVO); + nativeBackupDataStoreDao.expungeByBackupId(removedBackupId); + backupDetailDao.removeDetailsExcept(removedBackupId, END_OF_CHAIN); + } + } + + /** + * For every backup, except for the one which the command was issued, will set them as Expunged regardless and hope operators will look + * at the logs. For the current one, if forced=false, will set it as error, otherwise, will set it as Expunged as well. + * */ + private boolean processRemoveBackupFailures(boolean forced, Answer[] deleteAnswers, List removedBackupIds, NativeBackupJoinVO backupJoinVO) { + List failures = Arrays.stream(deleteAnswers).filter(answer -> !answer.getResult()).collect(Collectors.toList()); + Set failedToRemoveBackupIdSet = new HashSet<>(); + if (CollectionUtils.isNotEmpty(failures)) { + StringBuilder failureStringBuilder = new StringBuilder("Encountered the following failures during backup removal, all will be marked as Expunged and need to be" + + " manually deleted from storage. "); + for (Answer answer : failures) { + failedToRemoveBackupIdSet.add(((BackupDeleteAnswer)answer).getBackupId()); + failureStringBuilder.append(answer.getDetails()); + } + logger.error(failureStringBuilder.toString()); + } + + removedBackupIds.removeAll(failedToRemoveBackupIdSet); + + if (!forced && failedToRemoveBackupIdSet.remove(backupJoinVO.getId())) { + BackupVO failedVO = backupDao.findByIdIncludingRemoved(backupJoinVO.getId()); + logger.info("Since backup delete command was not forced, will not set the main backup [{}] as Expunged, will set it as error instead.", failedVO.getUuid()); + failedVO.setStatus(Backup.Status.Error); + backupDao.update(failedVO.getId(), failedVO); + } + + for (Long failedToRemove : failedToRemoveBackupIdSet) { + BackupVO failedVO = backupDao.findByIdIncludingRemoved(failedToRemove); + failedVO.setStatus(Backup.Status.Expunged); + logger.error("Setting backup [{}] as expunged, even though there was an error when deleting it from storage. Please look at the logs and check if it was deleted from" + + " storage.", failedVO.getUuid()); + backupDao.update(failedToRemove, failedVO); + } + + return failedToRemoveBackupIdSet.isEmpty(); + } + + private void processConsolidateAnswer(ConsolidateVolumesAnswer cAnswer, List volumesToConsolidate, VirtualMachine vm) { + for (VolumeObjectTO volumeObjectTO : cAnswer.getSuccessfullyConsolidatedVolumes()) { + VolumeInfo volumeInfo = volumesToConsolidate.stream().filter(vol -> vol.getId() == volumeObjectTO.getVolumeId()).findFirst().orElseThrow(); + transitVolumeStateWithoutThrow(volumeInfo.getVolume(), Volume.Event.OperationSucceeded); + volumesToConsolidate.remove(volumeInfo); + } + volumesToConsolidate.forEach(volumeInfo -> transitVolumeStateWithoutThrow(volumeInfo, Volume.Event.OperationFailed)); + if (cAnswer.getResult()) { + userVmDetailsDao.removeDetail(vm.getId(), VmDetailConstants.LINKED_VOLUMES_SECONDARY_STORAGE_UUIDS); + } else { + throw new BackupException(String.format("Failed to consolidate all volumes necessary of VM [%s]. Missing volumes are [%s].", vm.getUuid(), volumesToConsolidate), false); + } + } + + private boolean processRestoreAnswers(VirtualMachine vm, Answer[] answers) { + boolean cmdSucceeded = true; + for (Answer answer : answers) { + if (answer == null || !answer.getResult()) { + cmdSucceeded = false; + logger.error("Failed to restore backup due to: [{}].", answer == null ? "null answer" : answer.getDetails()); + } + if (answer instanceof RestoreKnibBackupAnswer) { + RestoreKnibBackupAnswer restoreAnswer = (RestoreKnibBackupAnswer) answer; + userVmDetailsDao.addDetail(vm.getId(), VmDetailConstants.LINKED_VOLUMES_SECONDARY_STORAGE_UUIDS, StringUtils.join(restoreAnswer.getSecondaryStorageUuids(), ","), false); + } + } + return cmdSucceeded; + } + + private void handleBackupExceptionInRestore(VirtualMachine vm, BackupException jobResult) { + if (!jobResult.isVmConsistent()) { + UserVmVO vmVO = userVmDao.findById(vm.getId()); + vmVO.setState(VirtualMachine.State.RestoreError); + userVmDao.update(vmVO.getId(), vmVO); + for (VolumeVO vol : volumeDao.findByInstance(vmVO.getId())) { + vol.setState(Volume.State.RestoreError); + volumeDao.update(vol.getId(), vol); + } + } + } + + private void handleRestoreException(Backup backup, VirtualMachine vm, Object jobResult) { + if (!(jobResult instanceof Throwable)) { + return; + } + if (jobResult instanceof BackupException) { + handleBackupExceptionInRestore(vm, (BackupException)jobResult); + } else if (jobResult instanceof BackupProviderException) { + throw (BackupProviderException) jobResult; + } + throw new CloudRuntimeException(String.format("Exception while restoring KVM native incremental backup [%s]. Check the logs for more information.", backup.getUuid()), ((Throwable)jobResult).getCause()); + } + + /** + * Merges the backup deltas related to the passed {@code NativeBackupJoinVO}. + * + * @return true if the merge was successful and false otherwise. + * */ + protected boolean mergeCurrentBackupDeltas(NativeBackupJoinVO backupJoinVO) { + VirtualMachine userVm = userVmDao.findById(backupJoinVO.getVmId()); + + VMSnapshotVO succeedingVmSnapshot = getSucceedingVmSnapshot(backupJoinVO); + MergeDiskOnlyVmSnapshotCommand cmd = buildMergeDiskOnlyVmSnapshotCommandForCurrentBackup(backupJoinVO, userVm, succeedingVmSnapshot); + Long hostId = vmSnapshotHelper.pickRunningHost(backupJoinVO.getVmId()); + + Answer answer = sendBackupCommand(hostId, cmd); + if (answer == null || !answer.getResult()) { + logger.error("Failed to remove backup [{}]. Tried to merge the current deltas to cleanup the VM but failed due to [{}].", + backupJoinVO.getUuid(), answer != null ? answer.getDetails() : "no answer"); + return false; + } + + expungeOldDeltasAndUpdateVmSnapshotIfNeeded(nativeBackupStoragePoolDao.listByBackupId(backupJoinVO.getId()), succeedingVmSnapshot); + + if (succeedingVmSnapshot != null) { + return true; + } + + for (DeltaMergeTreeTO deltaMergeTreeTO : cmd.getDeltaMergeTreeToList()) { + VolumeVO volumeVO = volumeDao.findById(deltaMergeTreeTO.getVolumeObjectTO().getVolumeId()); + volumeVO.setPath(deltaMergeTreeTO.getParent().getPath()); + logger.debug("Updating volume [{}] path to [{}] as part of the backup delete cleanup process.", volumeVO.getUuid(), volumeVO.getPath()); + volumeDao.update(volumeVO.getId(), volumeVO); + } + + return true; + } + + private void createDeleteCommandsAndMergeTrees(List volumeObjectTOs, Commands commands, List deletedDeltas, + VMSnapshotVO vmSnapshotSucceedingCurrentBackup, List deltaMergeTreeTOList) { + for (VolumeObjectTO volumeObjectTO : volumeObjectTOs) { + NativeBackupStoragePoolVO delta = nativeBackupStoragePoolDao.findOneByVolumeId(volumeObjectTO.getVolumeId()); + if (delta == null) { + continue; + } + if (delta.getBackupDeltaPath().equals(volumeObjectTO.getPath())) { + commands.addCommand(new DeleteCommand(new BackupDeltaTO(volumeObjectTO.getDataStore(), Hypervisor.HypervisorType.KVM, delta.getBackupDeltaParentPath()))); + deletedDeltas.add(delta); + logger.debug("Volume [{}] has a backup delta that will be deleted as part of the preparation to revert a VM snapshot.", volumeObjectTO.getUuid()); + } else { + deltaMergeTreeTOList.add(createDeltaMergeTree(false, false, delta, volumeObjectTO, vmSnapshotSucceedingCurrentBackup)); + } + } + } + + /*** + * Gets the list of parents that should be expunged. Will also create delete commands for them and add them to the list deleteCommands object. + * + * @param backupVO backup being expunged + * @param deleteCommands Commands object that will be appended with the delete commands for the parent backups. + * @return A pair which contains the list of backups that will be expunged, and the reference to the last backup of the chain that is still alive, if it exists. + */ + private Pair, NativeBackupJoinVO> getParentsToBeExpungedWithBackupAndAddThemToListOfDeleteCommands(BackupVO backupVO, Commands deleteCommands) { + logger.debug("Searching for removed parents of [{}] that should be expunged.", backupVO); + List backupParents = getBackupJoinParents(backupVO, true); + List backupParentsToBeExpunged = null; + NativeBackupJoinVO lastAliveBackup = null; + for (int i = 0; i < backupParents.size(); i++) { + NativeBackupJoinVO backupParent = backupParents.get(i); + if (Backup.Status.Removed.equals(backupParent.getStatus())) { + addBackupDeltasToDeleteCommand(backupParent.getId(), deleteCommands); + } else { + backupParentsToBeExpunged = backupParents.subList(0, i); + lastAliveBackup = backupParents.get(i); + break; + } + } + if (backupParentsToBeExpunged == null) { + backupParentsToBeExpunged = backupParents; + } + logger.debug("Found [{}] removed parents of [{}] that should be expunged: [{}].", backupParentsToBeExpunged.size(), backupVO, backupParentsToBeExpunged); + return new Pair<>(backupParentsToBeExpunged, lastAliveBackup); + } + + private MergeDiskOnlyVmSnapshotCommand buildMergeDiskOnlyVmSnapshotCommandForCurrentBackup(NativeBackupJoinVO backupJoinVO, VirtualMachine userVm, VMSnapshotVO vmSnapshot) { + List deltaMergeTreeTOs = new ArrayList<>(); + + List volumeTOs = vmSnapshotHelper.getVolumeTOList(backupJoinVO.getVmId()); + Map> volumeIdToSnapshotDataStoreList = gatherSnapshotReferencesOfChildrenSnapshot(volumeTOs, vmSnapshot); + List deltasOnPrimary = nativeBackupStoragePoolDao.listByBackupId(backupJoinVO.getId()); + + for (VolumeObjectTO volumeObjectTO : volumeTOs) { + KnibTO knibTO = new KnibTO(volumeObjectTO, volumeIdToSnapshotDataStoreList.getOrDefault(volumeObjectTO.getId(), new ArrayList<>())); + createDeltaMergeTreeForVolume(vmSnapshot == null, userVm.getState() == VirtualMachine.State.Running, deltasOnPrimary, vmSnapshot, knibTO); + if (knibTO.getDeltaMergeTreeTO() != null) { + deltaMergeTreeTOs.add(knibTO.getDeltaMergeTreeTO()); + } else { + logger.debug("Volume [{}] does not have any deltas to merge as part of the backup delete process.", volumeObjectTO.getUuid()); + } + } + + return new MergeDiskOnlyVmSnapshotCommand(deltaMergeTreeTOs, userVm.getState().equals(VirtualMachine.State.Running), userVm.getInstanceName()); + } + + private DataStore addBackupDeltasToDeleteCommand(long backupId, Commands deleteCommands) { + NativeBackupJoinVO nativeBackupJoinVO = nativeBackupJoinDao.findById(backupId); + List nativeBackupDataStoreVOs = nativeBackupDataStoreDao.listByBackupId(backupId); + DataStore dataStore = dataStoreManager.getDataStore(nativeBackupJoinVO.getImageStoreId(), DataStoreRole.Image); + DataStoreTO dataStoreTO = dataStore.getTO(); + for (NativeBackupDataStoreVO nativeBackupDataStoreVO : nativeBackupDataStoreVOs) { + BackupDeltaTO backupDeltaTO = new BackupDeltaTO(dataStoreTO, Hypervisor.HypervisorType.KVM, nativeBackupDataStoreVO.getBackupPath()); + backupDeltaTO.setId(backupId); + DeleteCommand deleteCommand = new DeleteCommand(backupDeltaTO); + deleteCommands.addCommand(deleteCommand); + } + return dataStore; + } + + private Set getParentSecondaryStorageUrls(BackupVO backupVO) { + List parentBackups = getBackupJoinParents(backupVO, true); + Set secondaryStorageIds = parentBackups.stream().map(NativeBackupJoinVO::getImageStoreId).collect(Collectors.toSet()); + return secondaryStorageIds.stream().map(id -> imageStoreDao.findById(id).getUrl()).collect(Collectors.toSet()); + } + + private List getChainImageStoreUrls(List backupChain) { + List chainImageStoreUrls; + LinkedHashSet imageStoreIdSet = backupChain.stream().map(NativeBackupJoinVO::getImageStoreId).collect(Collectors.toCollection(LinkedHashSet::new)); + chainImageStoreUrls = imageStoreIdSet.stream().map(id -> dataStoreDao.findById(id).getUrl()).collect(Collectors.toList()); + return chainImageStoreUrls; + } + + /** + * Gets the list of backup parents of a given BackupVO. + * @param backupVO the backup in question. + * @param includeRemoved whether to include removed (but not expunged) parents or not. + * @return list of parents, or an empty list if no parents found. + * */ + protected List getBackupJoinParents(BackupVO backupVO, boolean includeRemoved) { + List ancestorBackups; + + if (includeRemoved) { + ancestorBackups = nativeBackupJoinDao.listIncludingRemovedByVmIdAndBeforeDateOrderByCreatedDesc(backupVO.getVmId(), backupVO.getDate()); + } else { + ancestorBackups = nativeBackupJoinDao.listByBackedUpAndVmIdAndDateBeforeOrAfterOrderBy(backupVO.getVmId(), backupVO.getDate(), true, false); + } + + for (int i = 0; i < ancestorBackups.size(); i++) { + if (ancestorBackups.get(i).getEndOfChain()) { + return ancestorBackups.subList(0, i); + } + } + + logger.debug("Found the following backup chain ancestors of backup [{}]: [{}].", backupVO, ancestorBackups); + return ancestorBackups; + } + + protected int getChainSizeForBackup(NativeBackupOfferingVO offering, long zoneId) { + if (offering.getBackupChainSize() != null) { + return offering.getBackupChainSize(); + } + return backupChainSize.valueIn(zoneId); + } + + /** + * Gets the list of backup children of a given backupVO + * + * @return list of children, or and empty list if no children found. + * */ + protected List getBackupJoinChildren(BackupVO backupVO) { + List children = nativeBackupJoinDao.listByBackedUpAndVmIdAndDateBeforeOrAfterOrderBy(backupVO.getVmId(), backupVO.getDate(), false, true); + + long parentId = backupVO.getId(); + for (int i = 0; i < children.size(); i++) { + if (children.get(i).getParentId() != parentId) { + return children.subList(0, i); + } + parentId = children.get(i).getId(); + } + + return children; + } + + /** + * Creates a detail for the given BackupVO if the remaining chain size is one or less and the value of backupChainSize is greater than 0. + * */ + protected void setEndOfChainTrueIfRemainingChainSizeIsOneOrLess(int remainingChainSize, int chainSize, long backupId, String backupUuid) { + if (remainingChainSize <= 1 && chainSize > 0) { + logger.debug("Setting backup [{}] as end of chain.", backupUuid); + backupDetailDao.persist(new BackupDetailVO(backupId, END_OF_CHAIN, Boolean.TRUE.toString(), true)); + } + } + + private void setBackupVirtualSize(List volumeTOs, BackupVO backupVO) { + long virtualSize = 0; + for (VolumeObjectTO volumeObjectTO : volumeTOs) { + virtualSize += volumeObjectTO.getSize(); + } + + backupVO.setProtectedSize(virtualSize); + } + + private void updateBackupStatusToBackingUp(List volumeTOs, BackupVO backupVO) { + setBackupVirtualSize(volumeTOs, backupVO); + backupVO.setStatus(Backup.Status.BackingUp); + backupDao.update(backupVO.getId(), backupVO); + } + + /** + * Retrieves the current backup and removes the CURRENT detail. If the informed backup is not the end of chain, sets is as the new CURRENT + * */ + private void updateCurrentBackup(NativeBackupJoinVO backup) { + NativeBackupJoinVO current = nativeBackupJoinDao.findCurrent(backup.getVmId()); + + if (current != null) { + backupDetailDao.removeDetail(current.getId(), CURRENT); + } + + if (!backup.getEndOfChain()) { + backupDetailDao.persist(new BackupDetailVO(backup.getId(), CURRENT, Boolean.TRUE.toString(), true)); + } + } + + /** + * Given a backup, removes the CURRENT detail, and if the snapshot is not set as END_OF_CHAIN, sets it as END_OF_CHAIN. + * */ + protected void setEndOfChainAndRemoveCurrentForBackup(NativeBackupJoinVO currentBackup) { + backupDetailDao.removeDetail(currentBackup.getId(), CURRENT); + if (!currentBackup.getEndOfChain()) { + backupDetailDao.persist(new BackupDetailVO(currentBackup.getId(), END_OF_CHAIN, Boolean.TRUE.toString(), true)); + } + } + + private void createDetails(Long imageStoreId, Long parentId, BackupVO backupVO) { + backupDetailDao.persist(new BackupDetailVO(backupVO.getId(), IMAGE_STORE_ID, imageStoreId.toString(), false)); + backupDetailDao.persist(new BackupDetailVO(backupVO.getId(), PARENT_ID, parentId.toString(), false)); + } + + private void updateReferencesAfterPrepareForSnapshotRevert(List deltaMergeTreeTOList, List snapRefsSucceedingCurrentBackup, + List deletedDeltas, NativeBackupJoinVO backupVO) { + for (DeltaMergeTreeTO deltaMergeTreeTO : deltaMergeTreeTOList) { + SnapshotDataStoreVO snapshotRef = snapRefsSucceedingCurrentBackup.stream() + .filter(ref -> Objects.equals(ref.getVolumeId(), deltaMergeTreeTO.getVolumeObjectTO().getVolumeId())) + .findFirst() + .orElse(null); + if (snapshotRef != null) { + snapshotRef.setInstallPath(deltaMergeTreeTO.getParent().getPath()); + logger.debug("Updating snapshot reference [{}] path to [{}] as part of the preparation to restore a VM snapshot.", snapshotRef.getId(), snapshotRef.getInstallPath()); + snapshotDataStoreDao.update(snapshotRef.getId(), snapshotRef); + } + nativeBackupStoragePoolDao.expungeByVolumeId(deltaMergeTreeTO.getVolumeObjectTO().getVolumeId()); + } + + for (NativeBackupStoragePoolVO delta : deletedDeltas) { + nativeBackupStoragePoolDao.expungeByVolumeId(delta.getVolumeId()); + } + + setEndOfChainAndRemoveCurrentForBackup(backupVO); + } + + protected Answer sendBackupCommand(long hostId, Command cmd) { + cmd.setWait(backupTimeout.value()); + return agentManager.easySend(hostId, cmd); + } + + protected Answer[] sendBackupCommands(Long hostId, Commands cmds) throws OperationTimedoutException, AgentUnavailableException { + for (Command cmd : cmds) { + cmd.setWait(backupTimeout.value()); + } + return agentManager.send(hostId, cmds); + } + + private void validateQuickRestore(Backup backup, boolean quickRestore) { + BackupOfferingVO backupOfferingVO = backupOfferingDao.findByIdIncludingRemoved(backup.getBackupOfferingId()); + NativeBackupOfferingVO nativeBackupOfferingVO = nativeBackupOfferingDao.findByUuidIncludingRemoved(backupOfferingVO.getExternalId()); + if (!nativeBackupOfferingVO.isAllowQuickRestore() && quickRestore) { + throw new BackupProviderException(String.format("Unable to quick restore backup [%s] using offering [%s] as the offering does not support quick restoration.", + backup.getUuid(), backupOfferingVO.getUuid())); + } + } + + private boolean offeringSupportsCompression(NativeBackupJoinVO backup) { + BackupOfferingVO backupOfferingVO = backupOfferingDao.findByIdIncludingRemoved(backup.getBackupOfferingId()); + NativeBackupOfferingVO nativeBackupOfferingVO = nativeBackupOfferingDao.findByUuidIncludingRemoved(backupOfferingVO.getExternalId()); + if (!nativeBackupOfferingVO.isCompress()) { + logger.debug("Backup [{}] will not be compressed as offering [{}] with external id [{}] does not support it.", backup, backupOfferingVO.getUuid(), + nativeBackupOfferingVO.getExternalId()); + return false; + } + return true; + } + + + private void validateVmState(VirtualMachine vm, String operation, VirtualMachine.State... additionalStates) { + List allowedStates = new ArrayList<>(this.allowedVmStates); + allowedStates.addAll(Arrays.asList(additionalStates)); + if (!allowedStates.contains(vm.getState())) { + throw new BackupProviderException(String.format("VM [%s] is not in the right state to %s. It must be in one of these states: %s", vm.getUuid(), operation, + allowedStates)); + } + } + + protected Pair validateCompressionStateForRestoreAndGetBackup(long backupId) { + return Transaction.execute(TransactionLegacy.CLOUD_DB, (TransactionCallback>) result -> { + try { + BackupVO backupVO = lockBackup(backupId); + if (backupVO == null) { + logger.warn("Unable to get lock on backup [{}]. Cannot restore it.", backupId); + return new Pair<>(false, null); + } + + if (backupVO.getCompressionStatus() == Backup.CompressionStatus.FinalizingCompression) { + logger.error("We cannot restore backups that are finalizing the compression process. Please wait for the process to end and try again later.", + allowedBackupStatesToCompress, backupVO.getStatus()); + return new Pair<>(false, null); + } + backupVO.setStatus(Backup.Status.Restoring); + backupDao.update(backupId, backupVO); + return new Pair<>(true, backupVO); + } finally { + releaseBackup(backupId); + } + }); + } + + /** + * Validates that the backup is in a valid state. This is synchronized with the backup compression check. We get a new backup reference to make sure the compression has not + * changed the backup compression state. + * */ + private boolean validateBackupStateForRemoval(long backupId) { + return Transaction.execute(TransactionLegacy.CLOUD_DB, (TransactionCallback) result -> { + try { + BackupVO backupVO = lockBackup(backupId); + if (backupVO == null) { + logger.warn("Unable to acquire lock for backup [{}]. Cannot remove it.", backupId); + return false; + } + + if (!allowedBackupStatesToRemove.contains(backupVO.getStatus())) { + logger.error("Backup [{}] is not in a state allowed to be removed. Current state is [{}]; allowed states are [{}]", backupVO, backupVO.getStatus(), + allowedBackupStatesToRemove); + return false; + } + + if (Backup.CompressionStatus.Compressing.equals(backupVO.getCompressionStatus())) { + logger.error("Backup [{}] is being compressed, we cannot delete it. Please wait for the compress process to end and try again later.", backupVO.getUuid()); + return false; + } + return true; + } finally { + releaseBackup(backupId); + } + }); + } + + /** + * Validates that the backup is in a valid state to start the compression. This is synchronized with the backup removal check. We get a new backup reference to make sure the + * delete process has not changed the backup state. + * */ + private Pair validateBackupStateForStartCompressionAndUpdateCompressionStatus(long backupId) { + return Transaction.execute(TransactionLegacy.CLOUD_DB, (TransactionCallback>) result -> { + try { + BackupVO backupVO = lockBackup(backupId); + if (backupVO == null) { + logger.warn("Unable to get lock on backup [{}]. Will abort the start of the compression process. We might try again later.", backupId); + return new Pair<>(false, null); + } + + if (!allowedBackupStatesToCompress.contains(backupVO.getStatus())) { + logger.error("We can only compress backups that are on states [{}]. Current backup state is [{}].", allowedBackupStatesToCompress, backupVO.getStatus()); + return new Pair<>(false, null); + } + + logger.info("Compressing backup [{}].", backupVO.getUuid()); + backupVO.setCompressionStatus(Backup.CompressionStatus.Compressing); + backupDao.update(backupVO.getId(), backupVO); + return new Pair<>(true, backupVO); + } finally { + releaseBackup(backupId); + } + }); + } + + /** + * Validates that the backup is in a valid state to finalize the compression. This is synchronized with the backup restore check. We get a new backup reference to make sure + * the restore process has not changed the backup state. + * */ + private Pair validateBackupStateForFinalizeCompression(long backupId) { + return Transaction.execute(TransactionLegacy.CLOUD_DB, (TransactionCallback>) result -> { + try { + BackupVO backupVO = lockBackup(backupId); + if (backupVO == null) { + logger.warn("Unable to get lock on backup [{}]. Will abort the finalize compression process. We might try again later.", backupId); + return new Pair<>(false, null); + } + + List children = getBackupJoinChildren(backupVO); + if (Backup.Status.Restoring == backupVO.getStatus() || children.stream().anyMatch(backup -> backup.getStatus() == Backup.Status.Restoring)) { + logger.warn( + "Backup [{}] not in right state to finish compression. We can only finish compression process if backup is in [{}] state and no children are being " + "restored. Will try again later", + backupVO, Backup.Status.BackedUp); + return new Pair<>(false, null); + } + + if (Backup.Status.BackedUp == backupVO.getStatus()) { + logger.info("Backup [{}] is in the right state to finish compression. Will start the process.", backupVO.getUuid()); + backupVO.setCompressionStatus(Backup.CompressionStatus.FinalizingCompression); + backupDao.update(backupId, backupVO); + } else { + logger.warn( + "Backup [{}] is in [{}] state. Aborting compression and cleaning up compressed data. We can only finish compression process if backup is in [{}] " + "state.", + backupVO.getUuid(), backupVO.getStatus(), Backup.Status.BackedUp); + backupVO.setCompressionStatus(Backup.CompressionStatus.CompressionError); + backupDao.update(backupId, backupVO); + } + return new Pair<>(true, backupVO); + } finally { + releaseBackup(backupId); + } + }); + } + + private Pair validateBackupStateForRestoreBackupToVM(long backupId) { + return Transaction.execute(TransactionLegacy.CLOUD_DB, (TransactionCallback>) result -> { + try { + BackupVO backupVO = lockBackup(backupId); + if (backupVO == null) { + logger.warn("Unable to get lock on backup [{}]. Cannot create VM from this backup right now.", backupId); + return new Pair<>(false, null); + } + + if (Backup.Status.BackedUp == backupVO.getStatus() || Backup.Status.Restoring == backupVO.getStatus()) { + logger.debug("Backup [{}] is in the right state to create VM from it. Will start the process.", backupVO.getUuid()); + Backup.Status oldStatus = backupVO.getStatus(); + backupVO.setStatus(Backup.Status.Restoring); + backupDao.update(backupId, backupVO); + return new Pair<>(true, oldStatus); + } else { + logger.warn( + "Backup [{}] is in [{}] state. Aborting compression and cleaning up compressed data. We can only finish compression process if backup is in [{}] " + "state.", + backupVO.getUuid(), backupVO.getStatus(), Backup.Status.BackedUp); + return new Pair<>(false, null); + } + } finally { + releaseBackup(backupId); + } + }); + } + + private void validateStorages(List volumeTOs, String vmUuid) { + for (VolumeObjectTO volumeObjectTO : volumeTOs) { + StoragePoolVO storagePoolVO = storagePoolDao.findById(volumeObjectTO.getPoolId()); + if (!supportedStoragePoolTypes.contains(storagePoolVO.getPoolType())) { + logger.error("Only able to take backups of VMs with volumes in the following storage types [{}]. Throwing an exception.", supportedStoragePoolTypes); + throw new BackupProviderException(String.format("Unable to take backup of VM [%s], please check the logs.", vmUuid)); + } + } + } + + private void validateNoVmSnapshots(VirtualMachine vm) { + List vmSnapshotVOs = vmSnapshotDao.findByVm(vm.getId()); + if (!vmSnapshotVOs.isEmpty()) { + throw new BackupProviderException(String.format("Restoring VM [%s] would remove the current VM snapshots it has. Please remove the VM snapshots [%s] before" + + " restoring the backup.", vm.getUuid(), vmSnapshotVOs.stream().map(VMSnapshotVO::getUuid).collect(Collectors.toList()))); + } + } + + private BackupVO lockBackup(long backupId) { + return backupDao.acquireInLockTable(backupId, 300); + } + + private void releaseBackup(long backupId) { + backupDao.releaseFromLockTable(backupId); + } + + protected void transitVmStateWithoutThrow(VirtualMachine vm, VirtualMachine.Event event, long hostId) { + try { + virtualMachineManager.stateTransitTo(vm, event, hostId); + } catch (NoTransitionException e) { + String msg = String.format("Failed to change VM [%s] state with event [%s].", vm.getUuid(), event.toString()); + logger.error(msg, e); + throw new CloudRuntimeException(msg, e); + } + } + + private void transitVolumeStateWithoutThrow(Volume volume, Volume.Event event) { + try { + volumeApiService.stateTransitTo(volume, event); + } catch (NoTransitionException e) { + throw new CloudRuntimeException(e); + } + } +} diff --git a/plugins/backup/knib/src/main/resources/META-INF/cloudstack/knib/module.properties b/plugins/backup/knib/src/main/resources/META-INF/cloudstack/knib/module.properties new file mode 100644 index 000000000000..9c48622eafe9 --- /dev/null +++ b/plugins/backup/knib/src/main/resources/META-INF/cloudstack/knib/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=knib +parent=backup diff --git a/plugins/backup/knib/src/main/resources/META-INF/cloudstack/knib/spring-backup-knib-context.xml b/plugins/backup/knib/src/main/resources/META-INF/cloudstack/knib/spring-backup-knib-context.xml new file mode 100644 index 000000000000..9bf3eb462bc5 --- /dev/null +++ b/plugins/backup/knib/src/main/resources/META-INF/cloudstack/knib/spring-backup-knib-context.xml @@ -0,0 +1,26 @@ + + + + + + + diff --git a/plugins/backup/knib/src/test/java/org/apache/cloudstack/backup/KnibBackupProviderTest.java b/plugins/backup/knib/src/test/java/org/apache/cloudstack/backup/KnibBackupProviderTest.java new file mode 100644 index 000000000000..3fbab24589c1 --- /dev/null +++ b/plugins/backup/knib/src/test/java/org/apache/cloudstack/backup/KnibBackupProviderTest.java @@ -0,0 +1,609 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.backup; + +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.storage.VolumeApiServiceImpl; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.DateUtil; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.snapshot.VMSnapshot; +import com.cloud.vm.snapshot.VMSnapshotDetailsVO; +import com.cloud.vm.snapshot.VMSnapshotVO; +import com.cloud.vm.snapshot.dao.VMSnapshotDao; +import com.cloud.vm.snapshot.dao.VMSnapshotDetailsDao; +import org.apache.cloudstack.backup.dao.BackupDao; +import org.apache.cloudstack.backup.dao.BackupDetailsDao; +import org.apache.cloudstack.backup.dao.NativeBackupDataStoreDao; +import org.apache.cloudstack.backup.dao.NativeBackupJoinDao; +import org.apache.cloudstack.backup.dao.NativeBackupStoragePoolDao; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.secstorage.heuristics.HeuristicType; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; +import org.apache.cloudstack.storage.heuristics.HeuristicRuleHelper; +import org.apache.cloudstack.storage.to.KnibTO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.cloudstack.storage.vmsnapshot.VMSnapshotHelper; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +@RunWith(MockitoJUnitRunner.class) +public class KnibBackupProviderTest { + + @Mock + private VirtualMachine virtualMachineMock; + + @Mock + private BackupOffering backupOfferingMock; + + @Mock + private VolumeDao volumeDaoMock; + + @Mock + private VolumeVO volumeVoMock; + + @Mock + private SnapshotDataStoreDao snapshotDataStoreDaoMock; + + @Mock + private SnapshotDataStoreVO snapshotDataStoreVoMock; + + @Mock + private VMSnapshotDao vmSnapshotDaoMock; + + @Mock + private VMSnapshotVO vmSnapshotVoMock; + + @Mock + private VMSnapshotDetailsDao vmSnapshotDetailsDaoMock; + + @Mock + private VMSnapshotDetailsVO vmSnapshotDetailsVoMock; + + @Mock + private NativeBackupJoinDao nativeBackupJoinDaoMock; + + @Mock + private NativeBackupJoinVO nativeBackupJoinVoMock; + + @Mock + private NativeBackupDataStoreDao nativeBackupDataStoreDaoMock; + + @Mock + private NativeBackupDataStoreVO nativeBackupDataStoreVoMock; + + @Mock + private NativeBackupStoragePoolDao nativeBackupStoragePoolDaoMock; + + @Mock + private BackupVO backupVoMock; + + @Mock + private BackupDetailsDao backupDetailDaoMock; + + @Mock + private ConfigKey backupChainSize; + + @Mock + private DataStoreManager dataStoreManagerMock; + + @Mock + private DataStore dataStoreMock; + + @Mock + private HeuristicRuleHelper heuristicRuleHelperMock; + + @Mock + private VMSnapshotHelper vmSnapshotHelperMock; + + @Mock + private BackupDao backupDaoMock; + + @Mock + private VolumeObjectTO volumeObjectToMock; + + @Mock + private VirtualMachineManager virtualMachineManagerMock; + + @Mock + private HostDao hostDaoMock; + + @Mock + private HostVO hostVOMock; + + @Spy + @InjectMocks + private KnibBackupProvider knibBackupProviderSpy; + + private long vmId = 319832; + private long volumeId = 41; + + @Before + public void setup() { + doReturn(vmId).when(virtualMachineMock).getId(); + doReturn(vmId).when(backupVoMock).getVmId(); + doReturn(vmId).when(nativeBackupJoinVoMock).getVmId(); + } + + + @Test + public void assignVMToBackupOfferingTestNotKvm() { + doReturn(Hypervisor.HypervisorType.Any).when(virtualMachineMock).getHypervisorType(); + boolean result = knibBackupProviderSpy.assignVMToBackupOffering(virtualMachineMock, backupOfferingMock); + assertFalse(result); + } + + @Test + public void assignVMToBackupOfferingTestKvmWithUnsupportedDiskOnlyVmSnapshot() { + doReturn(Hypervisor.HypervisorType.KVM).when(virtualMachineMock).getHypervisorType(); + doReturn(List.of(vmSnapshotVoMock)).when(vmSnapshotDaoMock).findByVmAndByType(vmId, VMSnapshot.Type.Disk); + long vmSnapId = 921; + doReturn(vmSnapId).when(vmSnapshotVoMock).getId(); + doReturn(List.of(vmSnapshotDetailsVoMock)).when(vmSnapshotDetailsDaoMock).listDetails(vmSnapId); + doReturn("Anything").when(vmSnapshotDetailsVoMock).getName(); + + boolean result = knibBackupProviderSpy.assignVMToBackupOffering(virtualMachineMock, backupOfferingMock); + assertFalse(result); + } + + @Test + public void assignVMToBackupOfferingTestKvmWithSupportedDiskOnlyVmSnapshotAndDiskAndMemoryVmSnapshot() { + doReturn(Hypervisor.HypervisorType.KVM).when(virtualMachineMock).getHypervisorType(); + doReturn(List.of(vmSnapshotVoMock)).when(vmSnapshotDaoMock).findByVmAndByType(vmId, VMSnapshot.Type.Disk); + long vmSnapId = 921; + doReturn(vmSnapId).when(vmSnapshotVoMock).getId(); + doReturn(List.of(vmSnapshotDetailsVoMock)).when(vmSnapshotDetailsDaoMock).listDetails(vmSnapId); + doReturn(VolumeApiServiceImpl.KVM_FILE_BASED_STORAGE_SNAPSHOT).when(vmSnapshotDetailsVoMock).getName(); + doReturn(List.of(vmSnapshotVoMock)).when(vmSnapshotDaoMock).findByVmAndByType(vmId, VMSnapshot.Type.DiskAndMemory); + + boolean result = knibBackupProviderSpy.assignVMToBackupOffering(virtualMachineMock, backupOfferingMock); + assertFalse(result); + } + + + @Test + public void assignVMToBackupOfferingTestKvmWithSupportedDiskOnlyVmSnapshotAndNoDiskAndMemoryVmSnapshot() { + doReturn(Hypervisor.HypervisorType.KVM).when(virtualMachineMock).getHypervisorType(); + doReturn(List.of(vmSnapshotVoMock)).when(vmSnapshotDaoMock).findByVmAndByType(vmId, VMSnapshot.Type.Disk); + long vmSnapId = 921; + doReturn(vmSnapId).when(vmSnapshotVoMock).getId(); + doReturn(List.of(vmSnapshotDetailsVoMock)).when(vmSnapshotDetailsDaoMock).listDetails(vmSnapId); + doReturn(VolumeApiServiceImpl.KVM_FILE_BASED_STORAGE_SNAPSHOT).when(vmSnapshotDetailsVoMock).getName(); + + boolean result = knibBackupProviderSpy.assignVMToBackupOffering(virtualMachineMock, backupOfferingMock); + assertTrue(result); + } + + @Test + public void removeVMFromBackupOfferingTestNoActiveChain() { + doReturn(VirtualMachine.State.Running).when(virtualMachineMock).getState(); + + boolean result = knibBackupProviderSpy.removeVMFromBackupOffering(virtualMachineMock); + + verify(knibBackupProviderSpy, Mockito.never()).mergeCurrentBackupDeltas(Mockito.any()); + assertTrue(result); + } + + @Test + public void removeVMFromBackupOfferingTestWithActiveChain() { + doReturn(nativeBackupJoinVoMock).when(nativeBackupJoinDaoMock).findCurrent(vmId); + doReturn(true).when(knibBackupProviderSpy).mergeCurrentBackupDeltas(Mockito.any()); + doReturn(VirtualMachine.State.Stopped).when(virtualMachineMock).getState(); + + boolean result = knibBackupProviderSpy.removeVMFromBackupOffering(virtualMachineMock); + + verify(knibBackupProviderSpy, Mockito.times(1)).mergeCurrentBackupDeltas(Mockito.any()); + assertTrue(result); + } + + @Test + public void getBackupJoinParentsTestIncludeRemovedEmptyList() { + Date date = DateUtil.now(); + doReturn(date).when(backupVoMock).getDate(); + doReturn(new ArrayList<>()).when(nativeBackupJoinDaoMock).listIncludingRemovedByVmIdAndBeforeDateOrderByCreatedDesc(vmId, date); + + List result = knibBackupProviderSpy.getBackupJoinParents(backupVoMock, true); + + assertTrue(result.isEmpty()); + } + + @Test + public void getBackupJoinParentsTestIncludeRemovedAncestorIsEndOfChain() { + Date date = DateUtil.now(); + doReturn(date).when(backupVoMock).getDate(); + doReturn(true).when(nativeBackupJoinVoMock).getEndOfChain(); + doReturn(List.of(nativeBackupJoinVoMock)).when(nativeBackupJoinDaoMock).listIncludingRemovedByVmIdAndBeforeDateOrderByCreatedDesc(vmId, date); + + List result = knibBackupProviderSpy.getBackupJoinParents(backupVoMock, true); + + assertTrue(result.isEmpty()); + } + + @Test + public void getBackupJoinParentsTestIncludeRemovedAncestorMultipleAncestors() { + Date date = DateUtil.now(); + doReturn(date).when(backupVoMock).getDate(); + NativeBackupJoinVO nativeBackupJoinVoMock1 = Mockito.mock(NativeBackupJoinVO.class); + doReturn(false).when(nativeBackupJoinVoMock1).getEndOfChain(); + NativeBackupJoinVO nativeBackupJoinVoMock2 = Mockito.mock(NativeBackupJoinVO.class); + doReturn(false).when(nativeBackupJoinVoMock2).getEndOfChain(); + doReturn(true).when(nativeBackupJoinVoMock).getEndOfChain(); + doReturn(List.of(nativeBackupJoinVoMock1, nativeBackupJoinVoMock2, nativeBackupJoinVoMock)).when(nativeBackupJoinDaoMock).listIncludingRemovedByVmIdAndBeforeDateOrderByCreatedDesc(vmId, date); + + List result = knibBackupProviderSpy.getBackupJoinParents(backupVoMock, true); + + assertEquals(List.of(nativeBackupJoinVoMock1, nativeBackupJoinVoMock2), result); + } + + @Test + public void getBackupJoinParentsTestIncludeRemovedAncestorMultipleAncestorsNoEndOfChain() { + Date date = DateUtil.now(); + doReturn(date).when(backupVoMock).getDate(); + NativeBackupJoinVO nativeBackupJoinVoMock1 = Mockito.mock(NativeBackupJoinVO.class); + doReturn(false).when(nativeBackupJoinVoMock1).getEndOfChain(); + NativeBackupJoinVO nativeBackupJoinVoMock2 = Mockito.mock(NativeBackupJoinVO.class); + doReturn(false).when(nativeBackupJoinVoMock2).getEndOfChain(); + doReturn(false).when(nativeBackupJoinVoMock).getEndOfChain(); + doReturn(List.of(nativeBackupJoinVoMock1, nativeBackupJoinVoMock2, nativeBackupJoinVoMock)).when(nativeBackupJoinDaoMock).listIncludingRemovedByVmIdAndBeforeDateOrderByCreatedDesc(vmId, date); + + List result = knibBackupProviderSpy.getBackupJoinParents(backupVoMock, true); + + assertEquals(List.of(nativeBackupJoinVoMock1, nativeBackupJoinVoMock2, nativeBackupJoinVoMock), result); + } + + @Test + public void getBackupJoinParentsTestNoRemovedAncestorMultipleAncestorsNoEndOfChain() { + Date date = DateUtil.now(); + doReturn(date).when(backupVoMock).getDate(); + NativeBackupJoinVO nativeBackupJoinVoMock1 = Mockito.mock(NativeBackupJoinVO.class); + doReturn(false).when(nativeBackupJoinVoMock1).getEndOfChain(); + NativeBackupJoinVO nativeBackupJoinVoMock2 = Mockito.mock(NativeBackupJoinVO.class); + doReturn(false).when(nativeBackupJoinVoMock2).getEndOfChain(); + doReturn(false).when(nativeBackupJoinVoMock).getEndOfChain(); + doReturn(List.of(nativeBackupJoinVoMock1, nativeBackupJoinVoMock2, nativeBackupJoinVoMock)).when(nativeBackupJoinDaoMock).listByBackedUpAndVmIdAndDateBeforeOrAfterOrderBy(vmId, date, true, + false); + + List result = knibBackupProviderSpy.getBackupJoinParents(backupVoMock, false); + + assertEquals(List.of(nativeBackupJoinVoMock1, nativeBackupJoinVoMock2, nativeBackupJoinVoMock), result); + } + + @Test + public void setEndOfChainTrueIfRemainingChainSizeIsOneTestChainSizeLowerThanOneAndConfigIsZero() { + int chainSize = 0; + knibBackupProviderSpy.setEndOfChainTrueIfRemainingChainSizeIsOneOrLess(-1, chainSize, 1, "uuid"); + + verify(backupDetailDaoMock, Mockito.never()).persist(Mockito.any()); + } + + @Test + public void setEndOfChainTrueIfRemainingChainSizeIsOneTestChainSizeLowerThanOneAndConfigBiggerThanZero() { + int chainSize = 1; + knibBackupProviderSpy.setEndOfChainTrueIfRemainingChainSizeIsOneOrLess(-1, chainSize, 1, "uuid"); + + verify(backupDetailDaoMock, Mockito.times(1)).persist(Mockito.any()); + } + + @Test + public void setEndOfChainTrueIfRemainingChainSizeIsOneTestChainSizeBiggerThanOne() { + knibBackupProviderSpy.setEndOfChainTrueIfRemainingChainSizeIsOneOrLess(2, 0, 1, "uuid"); + + verify(backupDetailDaoMock, Mockito.never()).persist(Mockito.any()); + } + + @Test + public void setEndOfChainTrueIfRemainingChainSizeIsOneTestChainSizeIsOne() { + int chainSize = 2; + knibBackupProviderSpy.setEndOfChainTrueIfRemainingChainSizeIsOneOrLess(1, chainSize, 1, "uuid"); + + verify(backupDetailDaoMock, Mockito.times(1)).persist(Mockito.any()); + } + + @Test + public void getParentAndSetEndOfChainTestBackupChainIsEmpty() { + int chainSize = 2; + doReturn(chainSize).when(knibBackupProviderSpy).getChainSizeForBackup(Mockito.any(), Mockito.anyLong()); + doNothing().when(knibBackupProviderSpy).setEndOfChainTrueIfRemainingChainSizeIsOneOrLess(Mockito.anyInt(), Mockito.anyInt(), Mockito.anyLong(), Mockito.any()); + + NativeBackupJoinVO result = knibBackupProviderSpy.getParentAndSetEndOfChain(backupVoMock, List.of(), null); + + assertNull(result); + verify(knibBackupProviderSpy, Mockito.times(1)).setEndOfChainTrueIfRemainingChainSizeIsOneOrLess(Mockito.anyInt(), Mockito.anyInt(), Mockito.anyLong(), + Mockito.any()); + } + + @Test + public void getParentAndSetEndOfChainTestBackupChainIsBiggerThanChainSize() { + int chainSize = 2; + doReturn(chainSize).when(knibBackupProviderSpy).getChainSizeForBackup(Mockito.any(), Mockito.anyLong()); + doNothing().when(knibBackupProviderSpy).setEndOfChainTrueIfRemainingChainSizeIsOneOrLess(Mockito.anyInt(), Mockito.anyInt(), Mockito.anyLong(), Mockito.any()); + + NativeBackupJoinVO nativeBackupJoinVoMock1 = Mockito.mock(NativeBackupJoinVO.class); + doReturn(Backup.Status.BackedUp).when(nativeBackupJoinVoMock1).getStatus(); + NativeBackupJoinVO nativeBackupJoinVoMock2 = Mockito.mock(NativeBackupJoinVO.class); + NativeBackupJoinVO result = knibBackupProviderSpy.getParentAndSetEndOfChain(backupVoMock, List.of(nativeBackupJoinVoMock1, nativeBackupJoinVoMock2), null); + + assertEquals(nativeBackupJoinVoMock1, result); + verify(knibBackupProviderSpy, Mockito.times(1)).setEndOfChainTrueIfRemainingChainSizeIsOneOrLess(Mockito.anyInt(), Mockito.anyInt(), Mockito.anyLong(), Mockito.any()); + } + + @Test + public void getParentAndSetEndOfChainTestBackupChainIsSmallerThanChainSize() { + int chainSize = 3; + doReturn(chainSize).when(knibBackupProviderSpy).getChainSizeForBackup(Mockito.any(), Mockito.anyLong()); + doNothing().when(knibBackupProviderSpy).setEndOfChainTrueIfRemainingChainSizeIsOneOrLess(Mockito.anyInt(), Mockito.anyInt(), Mockito.anyLong(), Mockito.any()); + + NativeBackupJoinVO nativeBackupJoinVoMock1 = Mockito.mock(NativeBackupJoinVO.class); + doReturn(Backup.Status.BackedUp).when(nativeBackupJoinVoMock1).getStatus(); + NativeBackupJoinVO nativeBackupJoinVoMock2 = Mockito.mock(NativeBackupJoinVO.class); + NativeBackupJoinVO result = knibBackupProviderSpy.getParentAndSetEndOfChain(backupVoMock, List.of(nativeBackupJoinVoMock1, nativeBackupJoinVoMock2), null); + + assertEquals(nativeBackupJoinVoMock1, result); + verify(knibBackupProviderSpy, Mockito.times(1)).setEndOfChainTrueIfRemainingChainSizeIsOneOrLess(Mockito.anyInt(), Mockito.anyInt(), Mockito.anyLong(), Mockito.any()); + } + + @Test + public void getParentAndSetEndOfChainTestBackupChainIsNotEmptyParentIsRemoved() { + int chainSize = 2; + doReturn(chainSize).when(knibBackupProviderSpy).getChainSizeForBackup(Mockito.any(), Mockito.anyLong()); + doNothing().when(knibBackupProviderSpy).setEndOfChainTrueIfRemainingChainSizeIsOneOrLess(Mockito.anyInt(), Mockito.anyInt(), Mockito.anyLong(), Mockito.any()); + + NativeBackupJoinVO nativeBackupJoinVoMock1 = Mockito.mock(NativeBackupJoinVO.class); + doReturn(Backup.Status.Removed).when(nativeBackupJoinVoMock1).getStatus(); + NativeBackupJoinVO result = knibBackupProviderSpy.getParentAndSetEndOfChain(backupVoMock, List.of(nativeBackupJoinVoMock1), null); + + assertNull(result); + verify(knibBackupProviderSpy, Mockito.times(1)).setEndOfChainTrueIfRemainingChainSizeIsOneOrLess(Mockito.anyInt(), Mockito.anyInt(), Mockito.anyLong(), Mockito.any()); + } + + @Test + public void getImageStoreForBackupTestNoHeuristic() { + long zoneId = 2; + doReturn(null).when(heuristicRuleHelperMock).getImageStoreIfThereIsHeuristicRule(zoneId, HeuristicType.BACKUP, backupVoMock); + doReturn(dataStoreMock).when(dataStoreManagerMock).getImageStoreWithFreeCapacity(zoneId); + + DataStore result = knibBackupProviderSpy.getImageStoreForBackup(zoneId, backupVoMock); + + assertEquals(dataStoreMock, result); + } + + @Test + public void getImageStoreForBackupTestWithHeuristic() { + long zoneId = 2; + doReturn(dataStoreMock).when(heuristicRuleHelperMock).getImageStoreIfThereIsHeuristicRule(zoneId, HeuristicType.BACKUP, backupVoMock); + + DataStore result = knibBackupProviderSpy.getImageStoreForBackup(zoneId, backupVoMock); + + assertEquals(dataStoreMock, result); + verify(dataStoreManagerMock, Mockito.never()).getImageStoreWithFreeCapacity(Mockito.anyLong()); + } + + @Test (expected = CloudRuntimeException.class) + public void getImageStoreForBackupTestNoStorageFound() { + knibBackupProviderSpy.getImageStoreForBackup(0L, backupVoMock); + } + + @Test + public void getSucceedingVmSnapshotListTestBackupIsNull() { + List result = knibBackupProviderSpy.getSucceedingVmSnapshotList(null); + + assertTrue(result.isEmpty()); + } + + @Test + public void getSucceedingVmSnapshotListTestNoCurrentSnapshotVo() { + doReturn(null).when(vmSnapshotDaoMock).findCurrentSnapshotByVmId(vmId); + + List result = knibBackupProviderSpy.getSucceedingVmSnapshotList(nativeBackupJoinVoMock); + + assertTrue(result.isEmpty()); + } + + @Test + public void getSucceedingVmSnapshotListTestCurrentCreatedBeforeBackup() { + doReturn(vmSnapshotVoMock).when(vmSnapshotDaoMock).findCurrentSnapshotByVmId(vmId); + Date before = DateUtil.now(); + before.setTime(before.getTime()-10000); + Date now = DateUtil.now(); + doReturn(before).when(vmSnapshotVoMock).getCreated(); + doReturn(now).when(nativeBackupJoinVoMock).getDate(); + + List result = knibBackupProviderSpy.getSucceedingVmSnapshotList(nativeBackupJoinVoMock); + + assertTrue(result.isEmpty()); + } + + @Test + public void getSucceedingVmSnapshotListTestCurrentVmSnapshotHasNoParent() { + doReturn(vmSnapshotVoMock).when(vmSnapshotDaoMock).findCurrentSnapshotByVmId(vmId); + Date before = DateUtil.now(); + before.setTime(before.getTime()-10000); + Date now = DateUtil.now(); + doReturn(now).when(vmSnapshotVoMock).getCreated(); + doReturn(before).when(nativeBackupJoinVoMock).getDate(); + + List result = knibBackupProviderSpy.getSucceedingVmSnapshotList(nativeBackupJoinVoMock); + + assertEquals(1, result.size()); + assertEquals(vmSnapshotVoMock, result.get(0)); + } + + @Test + public void getSucceedingVmSnapshotListTestCurrentVmSnapshotHasParentsCreatedAfter() { + doReturn(vmSnapshotVoMock).when(vmSnapshotDaoMock).findCurrentSnapshotByVmId(vmId); + Date before = DateUtil.now(); + before.setTime(before.getTime()-10000); + Date now = DateUtil.now(); + doReturn(now).when(vmSnapshotVoMock).getCreated(); + doReturn(before).when(nativeBackupJoinVoMock).getDate(); + long snapParentId = 909; + doReturn(snapParentId).when(vmSnapshotVoMock).getParent(); + VMSnapshotVO vmSnapshotVoMock1 = Mockito.mock(VMSnapshotVO.class); + doReturn(now).when(vmSnapshotVoMock1).getCreated(); + doReturn(vmSnapshotVoMock1).when(vmSnapshotDaoMock).findById(snapParentId); + + List result = knibBackupProviderSpy.getSucceedingVmSnapshotList(nativeBackupJoinVoMock); + + assertEquals(List.of(vmSnapshotVoMock1, vmSnapshotVoMock), result); + } + + + @Test + public void getSucceedingVmSnapshotListTestCurrentVmSnapshotHasParentsCreatedBefore() { + doReturn(vmSnapshotVoMock).when(vmSnapshotDaoMock).findCurrentSnapshotByVmId(vmId); + Date before = DateUtil.now(); + before.setTime(before.getTime() - 10000); + Date now = DateUtil.now(); + doReturn(now).when(vmSnapshotVoMock).getCreated(); + doReturn(before).when(nativeBackupJoinVoMock).getDate(); + long snapParentId = 909; + doReturn(snapParentId).when(vmSnapshotVoMock).getParent(); + VMSnapshotVO vmSnapshotVoMock1 = Mockito.mock(VMSnapshotVO.class); + Date evenBefore = new Date(before.getTime() - 10000); + doReturn(evenBefore).when(vmSnapshotVoMock1).getCreated(); + doReturn(vmSnapshotVoMock1).when(vmSnapshotDaoMock).findById(snapParentId); + + List result = knibBackupProviderSpy.getSucceedingVmSnapshotList(nativeBackupJoinVoMock); + + assertEquals(List.of(vmSnapshotVoMock), result); + } + + @Test + public void mapVolumesToVmSnapshotReferencesTestVmSnapshotVOListIsEmpty() { + knibBackupProviderSpy.mapVolumesToVmSnapshotReferences(List.of(), List.of()); + + verify(vmSnapshotHelperMock, Mockito.never()).getVolumeSnapshotsAssociatedWithKvmDiskOnlyVmSnapshot(1); + } + + @Test + public void mapVolumesToVmSnapshotReferencesTestVmSnapshotVOListHasTwoElements() { + VMSnapshotVO vmSnapshotVoMock1 = Mockito.mock(VMSnapshotVO.class); + doReturn(1L).when(vmSnapshotVoMock).getId(); + doReturn(2L).when(vmSnapshotVoMock1).getId(); + doNothing().when(knibBackupProviderSpy).mapVolumesToSnapshotReferences(Mockito.anyList(), Mockito.anyList(), Mockito.anyMap()); + + knibBackupProviderSpy.mapVolumesToVmSnapshotReferences(List.of(), List.of(vmSnapshotVoMock, vmSnapshotVoMock1)); + + verify(vmSnapshotHelperMock, times(1)).getVolumeSnapshotsAssociatedWithKvmDiskOnlyVmSnapshot(1); + verify(vmSnapshotHelperMock, times(1)).getVolumeSnapshotsAssociatedWithKvmDiskOnlyVmSnapshot(2); + verify(knibBackupProviderSpy, times(1)).mapVolumesToSnapshotReferences(Mockito.anyList(), Mockito.anyList(), Mockito.anyMap()); + } + + @Test + public void createDeltaReferencesTestFullBackupEndOfChain() { + doReturn(nativeBackupDataStoreVoMock).when(nativeBackupDataStoreDaoMock).persist(Mockito.any()); + + knibBackupProviderSpy.createDeltaReferences(true, true, true, true, backupVoMock, List.of(), List.of(), new HashMap<>(), new HashMap<>(), null, new KnibTO(volumeObjectToMock, List.of()), false); + + verify(nativeBackupDataStoreDaoMock, Mockito.times(1)).persist(Mockito.any()); + } + + @Test + public void createDeltaReferencesTestIsolatedBackup() { + doReturn(nativeBackupDataStoreVoMock).when(nativeBackupDataStoreDaoMock).persist(Mockito.any()); + + knibBackupProviderSpy.createDeltaReferences(true, false, true, true, backupVoMock, List.of(), List.of(), new HashMap<>(), new HashMap<>(), null, new KnibTO(volumeObjectToMock, List.of()), true); + + verify(nativeBackupDataStoreDaoMock, Mockito.times(1)).persist(Mockito.any()); + verify(knibBackupProviderSpy, Mockito.times(0)).findAndSetParentBackupPath(Mockito.any(), Mockito.any(), Mockito.any()); + verify(knibBackupProviderSpy, Mockito.times(0)).findAndSetParentBackupPath(Mockito.any(), Mockito.any(), Mockito.any()); + verify(nativeBackupStoragePoolDaoMock, Mockito.times(0)).persist(Mockito.any()); + } + + @Test + public void createDeltaReferencesTestNotFullBackupEndOfChain() { + doReturn(nativeBackupDataStoreVoMock).when(nativeBackupDataStoreDaoMock).persist(Mockito.any()); + KnibTO knibTO = new KnibTO(volumeObjectToMock, List.of()); + doReturn(null).when(knibBackupProviderSpy).createDeltaMergeTreeForVolume(false, true, List.of(), null, knibTO); + doNothing().when(knibBackupProviderSpy).findAndSetParentBackupPath(List.of(), null, knibTO); + + knibBackupProviderSpy.createDeltaReferences(false, true, true, true, backupVoMock, List.of(), List.of(), new HashMap<>(), new HashMap<>(), null, knibTO, false); + + verify(nativeBackupDataStoreDaoMock, Mockito.times(1)).persist(Mockito.any()); + verify(knibBackupProviderSpy, Mockito.times(1)).findAndSetParentBackupPath(List.of(), null, knibTO); + } + + @Test + public void createDeltaReferencesTestFullBackupNotEndOfChainDoesNotHaveVmSnapshotSucceedingLastBackup() { + doReturn(nativeBackupDataStoreVoMock).when(nativeBackupDataStoreDaoMock).persist(Mockito.any()); + + knibBackupProviderSpy.createDeltaReferences(true, false, false, true, backupVoMock, List.of(), List.of(), new HashMap<>(), new HashMap<>(), null, new KnibTO(volumeObjectToMock, List.of()), false); + + verify(nativeBackupDataStoreDaoMock, Mockito.times(1)).persist(Mockito.any()); + } + + @Test + public void orchestrateTakeBackupTestHostIsDownReturnFalse() { + + Mockito.when(virtualMachineManagerMock.findById(Mockito.anyLong())).thenReturn(virtualMachineMock); + Mockito.when(vmSnapshotHelperMock.pickRunningHost(Mockito.anyLong())).thenReturn(1L); + Mockito.when(hostDaoMock.findById(Mockito.anyLong())).thenReturn(hostVOMock); + Mockito.when(hostVOMock.getStatus()).thenReturn(Status.Down); + + Pair result = knibBackupProviderSpy.orchestrateTakeBackup(backupVoMock, false, false); + assertFalse(result.first()); + } + + @Test + public void orchestrateTakeBackupTestHostIsDisconnectedReturnFalse() { + + Mockito.when(virtualMachineManagerMock.findById(Mockito.anyLong())).thenReturn(virtualMachineMock); + Mockito.when(vmSnapshotHelperMock.pickRunningHost(Mockito.anyLong())).thenReturn(1L); + Mockito.when(hostDaoMock.findById(Mockito.anyLong())).thenReturn(hostVOMock); + Mockito.when(hostVOMock.getStatus()).thenReturn(Status.Disconnected); + + Pair result = knibBackupProviderSpy.orchestrateTakeBackup(backupVoMock, false, false); + assertFalse(result.first()); + } + + @Test + public void setBackupAsIsolatedTestPersistIsolatedDetail() { + knibBackupProviderSpy.setBackupAsIsolated(backupVoMock); + verify(backupDetailDaoMock, Mockito.times(1)).persist(Mockito.any()); + } +} \ No newline at end of file diff --git a/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java b/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java index c5b690cfcd40..7658886cc793 100644 --- a/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java +++ b/plugins/backup/nas/src/main/java/org/apache/cloudstack/backup/NASBackupProvider.java @@ -46,7 +46,6 @@ import com.cloud.vm.snapshot.dao.VMSnapshotDao; import com.cloud.vm.snapshot.dao.VMSnapshotDetailsDao; - import org.apache.cloudstack.backup.dao.BackupDao; import org.apache.cloudstack.backup.dao.BackupRepositoryDao; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -187,7 +186,7 @@ protected Host getVMHypervisorHostForBackup(VirtualMachine vm) { } @Override - public Pair takeBackup(final VirtualMachine vm, Boolean quiesceVM) { + public Pair takeBackup(final VirtualMachine vm, Boolean quiesceVM, boolean isolated) { final Host host = getVMHypervisorHostForBackup(vm); final BackupRepository backupRepository = backupRepositoryDao.findByBackupOfferingId(vm.getBackupOfferingId()); @@ -286,12 +285,12 @@ private BackupVO createBackupObject(VirtualMachine vm, String backupPath) { } @Override - public Pair restoreBackupToVM(VirtualMachine vm, Backup backup, String hostIp, String dataStoreUuid) { + public Pair restoreBackupToVM(VirtualMachine vm, Backup backup, String hostIp, String dataStoreUuid, boolean quickrestore) { return restoreVMBackup(vm, backup); } @Override - public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) { + public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup, boolean quickRestore, Long hostId) { return restoreVMBackup(vm, backup).first(); } @@ -376,7 +375,8 @@ private String getVolumePathPrefix(StoragePoolVO storagePool) { } @Override - public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeInfo backupVolumeInfo, String hostIp, String dataStoreUuid, Pair vmNameAndState) { + public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeInfo backupVolumeInfo, String hostIp, String dataStoreUuid, + Pair vmNameAndState, VirtualMachine vm, boolean quickRestore) { final VolumeVO volume = volumeDao.findByUuid(backupVolumeInfo.getUuid()); final DiskOffering diskOffering = diskOfferingDao.findByUuid(backupVolumeInfo.getDiskOfferingId()); final StoragePoolVO pool = primaryDataStoreDao.findByUuid(dataStoreUuid); diff --git a/plugins/backup/nas/src/test/java/org/apache/cloudstack/backup/NASBackupProviderTest.java b/plugins/backup/nas/src/test/java/org/apache/cloudstack/backup/NASBackupProviderTest.java index a512292cd28f..41092ea15d19 100644 --- a/plugins/backup/nas/src/test/java/org/apache/cloudstack/backup/NASBackupProviderTest.java +++ b/plugins/backup/nas/src/test/java/org/apache/cloudstack/backup/NASBackupProviderTest.java @@ -221,7 +221,7 @@ public void takeBackupSuccessfully() throws AgentUnavailableException, Operation Mockito.when(backupDao.persist(Mockito.any(BackupVO.class))).thenAnswer(invocation -> invocation.getArgument(0)); Mockito.when(backupDao.update(Mockito.anyLong(), Mockito.any(BackupVO.class))).thenReturn(true); - Pair result = nasBackupProvider.takeBackup(vm, false); + Pair result = nasBackupProvider.takeBackup(vm, false, false); Assert.assertTrue(result.first()); Assert.assertNotNull(result.second()); diff --git a/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java b/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java index 4cf4bd111ef1..1cf962edae51 100644 --- a/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java +++ b/plugins/backup/networker/src/main/java/org/apache/cloudstack/backup/NetworkerBackupProvider.java @@ -357,7 +357,7 @@ public boolean removeVMFromBackupOffering(VirtualMachine vm) { } @Override - public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) { + public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup, boolean quickRestore, Long hostId) { String networkerServer; HostVO hostVO; @@ -407,7 +407,8 @@ public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) { } @Override - public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeInfo backupVolumeInfo, String hostIp, String dataStoreUuid, Pair vmNameAndState) { + public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeInfo backupVolumeInfo, String hostIp, String dataStoreUuid, + Pair vmNameAndState, VirtualMachine vm, boolean quickRestore) { String networkerServer; VolumeVO volume = volumeDao.findByUuid(backupVolumeInfo.getUuid()); final DiskOffering diskOffering = diskOfferingDao.findByUuid(backupVolumeInfo.getDiskOfferingId()); @@ -491,7 +492,7 @@ public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeI } @Override - public Pair takeBackup(VirtualMachine vm, Boolean quiesceVM) { + public Pair takeBackup(VirtualMachine vm, Boolean quiesceVM, boolean isolated) { String networkerServer; String clusterName; @@ -648,7 +649,7 @@ public void syncBackupStorageStats(Long zoneId) { public boolean willDeleteBackupsOnOfferingRemoval() { return false; } @Override - public Pair restoreBackupToVM(VirtualMachine vm, Backup backup, String hostIp, String dataStoreUuid) { + public Pair restoreBackupToVM(VirtualMachine vm, Backup backup, String hostIp, String dataStoreUuid, boolean quickrestore) { return new Pair<>(true, null); } } diff --git a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java index 39970dab3427..9b34af2d6f49 100644 --- a/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java +++ b/plugins/backup/veeam/src/main/java/org/apache/cloudstack/backup/VeeamBackupProvider.java @@ -219,7 +219,7 @@ public boolean willDeleteBackupsOnOfferingRemoval() { } @Override - public Pair takeBackup(final VirtualMachine vm, Boolean quiesceVM) { + public Pair takeBackup(final VirtualMachine vm, Boolean quiesceVM, boolean isolated) { final VeeamClient client = getClient(vm.getDataCenterId()); Boolean result = client.startBackupJob(vm.getBackupExternalId()); return new Pair<>(result, null); @@ -256,7 +256,7 @@ public boolean deleteBackup(Backup backup, boolean forced) { } @Override - public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup) { + public boolean restoreVMFromBackup(VirtualMachine vm, Backup backup, boolean quickRestore, Long hostId) { final String restorePointId = backup.getExternalId(); try { return getClient(vm.getDataCenterId()).restoreFullVM(vm.getInstanceName(), restorePointId); @@ -291,7 +291,8 @@ private void prepareForBackupRestoration(VirtualMachine vm) { } @Override - public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeInfo backupVolumeInfo, String hostIp, String dataStoreUuid, Pair vmNameAndState) { + public Pair restoreBackedUpVolume(Backup backup, Backup.VolumeInfo backupVolumeInfo, String hostIp, String dataStoreUuid, + Pair vmNameAndState, VirtualMachine vm, boolean quickRestore) { final Long zoneId = backup.getZoneId(); final String restorePointId = backup.getExternalId(); return getClient(zoneId).restoreVMToDifferentLocation(restorePointId, null, hostIp, dataStoreUuid); @@ -337,7 +338,7 @@ public List listRestorePoints(VirtualMachine vm) { } @Override - public Pair restoreBackupToVM(VirtualMachine vm, Backup backup, String hostIp, String dataStoreUuid) { + public Pair restoreBackupToVM(VirtualMachine vm, Backup backup, String hostIp, String dataStoreUuid, boolean quickrestore) { final Long zoneId = backup.getZoneId(); final String restorePointId = backup.getExternalId(); final String restoreLocation = vm.getInstanceName(); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/BlockCommitListener.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/BlockCommitListener.java index d360aa481372..ab4513642efa 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/BlockCommitListener.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/BlockCommitListener.java @@ -27,18 +27,14 @@ import org.libvirt.event.BlockJobStatus; import org.libvirt.event.BlockJobType; -import java.util.concurrent.Semaphore; - public class BlockCommitListener implements BlockJobListener { - private Semaphore semaphore; private String result; private String vmName; private Logger logger; private String logid; - protected BlockCommitListener(Semaphore semaphore, String vmName, String logid) { - this.semaphore = semaphore; + protected BlockCommitListener(String vmName, String logid) { this.vmName = vmName; this.logid = logid; logger = LogManager.getLogger(getClass()); @@ -54,24 +50,22 @@ public void onEvent(Domain domain, String diskPath, BlockJobType type, BlockJobS return; } + ThreadContext.put("logcontextid", logid); + logger.debug("Received status [{}] on disk [{}] while listening for block commit of VM [{}].", status, diskPath, vmName); switch (status) { case COMPLETED: result = null; - semaphore.release(); return; case READY: try { - ThreadContext.put("logcontextid", logid); logger.debug("Pivoting disk [{}] of VM [{}].", diskPath, vmName); domain.blockJobAbort(diskPath, Domain.BlockJobAbortFlags.PIVOT); } catch (LibvirtException ex) { result = String.format("Failed to pivot disk due to [%s].", ex.getMessage()); - semaphore.release(); } return; default: result = String.format("Failed to block commit disk with status [%s].", status); - semaphore.release(); } } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index 0a9e0d2d98e6..7530ff080c53 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -32,6 +32,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.nio.file.Files; +import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; @@ -47,8 +48,6 @@ import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -70,6 +69,9 @@ import javax.xml.xpath.XPathFactory; import com.cloud.agent.api.to.VirtualMachineMetadataTO; +import com.cloud.utils.exception.BackupException; +import org.apache.cloudstack.storage.to.DeltaMergeTreeTO; +import com.cloud.agent.api.to.DataObjectType; import org.apache.cloudstack.api.ApiConstants.IoDriverPolicy; import org.apache.cloudstack.command.CommandInfo; import org.apache.cloudstack.command.ReconcileCommandService; @@ -382,6 +384,20 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv public static final String CHECKPOINT_DELETE_COMMAND = "virsh checkpoint-delete --domain %s --checkpointname %s --metadata"; + private static final String BLOCK_PULL_COMMAND = "virsh blockpull --domain %s --path %s"; + + private static final String SNAPSHOT_XML = "\n" + + "%s\n" + + "\n" + + " \n" + + "%s" + + " \n" + + ""; + + private static final String TAG_DISK_SNAPSHOT = "\n" + + "\n" + + "\n"; + protected int qcow2DeltaMergeTimeout; private String modifyVlanPath; @@ -4967,7 +4983,7 @@ public DiskDef getDiskWithPathOfVolumeObjectTO(List disks, VolumeObject return disks.stream() .filter(diskDef -> diskDef.getDiskPath() != null && diskDef.getDiskPath().contains(vol.getPath())) .findFirst() - .orElseThrow(() -> new CloudRuntimeException(String.format("Unable to find volume [%s].", vol.getUuid()))); + .orElseThrow(() -> new CloudRuntimeException(String.format("Unable to find volume [%s] with path [%s].", vol.getUuid(), vol.getPath()))); } protected String getDiskPathFromDiskDef(DiskDef disk) { @@ -6194,7 +6210,7 @@ public static String generateSecretUUIDFromString(String seed) { } /** - * Merges the snapshot into base file. + * Merges the delta into a base file. * * @param vm Domain of the VM; * @param diskLabel Disk label to manage snapshot and base file; @@ -6206,7 +6222,7 @@ public static String generateSecretUUIDFromString(String seed) { * @param conn Libvirt connection; * @throws LibvirtException */ - public void mergeSnapshotIntoBaseFile(Domain vm, String diskLabel, String baseFilePath, String topFilePath, boolean active, String snapshotName, VolumeObjectTO volume, + public void mergeDeltaIntoBaseFile(Domain vm, String diskLabel, String baseFilePath, String topFilePath, boolean active, String snapshotName, VolumeObjectTO volume, Connect conn) throws LibvirtException { if (AgentPropertiesFileHandler.getPropertyValue(AgentProperties.LIBVIRT_EVENTS_ENABLED)) { mergeSnapshotIntoBaseFileWithEventsAndConfigurableTimeout(vm, diskLabel, baseFilePath, topFilePath, active, snapshotName, volume, conn); @@ -6231,40 +6247,26 @@ protected void mergeSnapshotIntoBaseFileWithEventsAndConfigurableTimeout(Domain commitFlags |= Domain.BlockCommitFlags.ACTIVE; } - Semaphore semaphore = getSemaphoreToWaitForMerge(); - BlockCommitListener blockCommitListener = getBlockCommitListener(semaphore, vmName); - vm.addBlockJobListener(blockCommitListener); - - logger.info("Starting block commit of snapshot [{}] of VM [{}]. Using parameters: diskLabel [{}]; baseFilePath [{}]; topFilePath [{}]; commitFlags [{}]", snapshotName, - vmName, diskLabel, baseFilePath, topFilePath, commitFlags); + BlockCommitListener blockCommitListener = getBlockCommitListener(vmName); + try { + vm.addBlockJobListener(blockCommitListener); - vm.blockCommit(diskLabel, baseFilePath, topFilePath, 0, commitFlags); + logger.info("Starting block commit of QCOW2 delta [{}] of VM [{}]. Using parameters: diskLabel [{}]; baseFilePath [{}]; topFilePath [{}]; commitFlags [{}]", + snapshotName, + vmName, diskLabel, baseFilePath, topFilePath, commitFlags); - Thread checkProgressThread = new Thread(() -> checkBlockCommitProgress(vm, diskLabel, vmName, snapshotName, topFilePath, baseFilePath)); - checkProgressThread.start(); + vm.blockCommit(diskLabel, baseFilePath, topFilePath, 0, commitFlags); - String errorMessage = String.format("the block commit of top file [%s] into base file [%s] for snapshot [%s] of VM [%s]." + - " The job will be left running to avoid data corruption, but ACS will return an error and volume [%s] will need to be normalized manually. If the commit" + - " involved the active image, the pivot will need to be manually done.", topFilePath, baseFilePath, snapshotName, vmName, volume); - try { - if (!semaphore.tryAcquire(qcow2DeltaMergeTimeout, TimeUnit.SECONDS)) { - throw new CloudRuntimeException("Timed out while waiting for " + errorMessage); - } - } catch (InterruptedException e) { - throw new CloudRuntimeException("Interrupted while waiting for " + errorMessage); + checkBlockCommitProgress(vm, diskLabel, vmName, snapshotName, topFilePath, baseFilePath); } finally { vm.removeBlockJobListener(blockCommitListener); } String mergeResult = blockCommitListener.getResult(); - try { - checkProgressThread.join(); - } catch (InterruptedException ex) { - throw new CloudRuntimeException(String.format("Exception while running wait block commit task of snapshot [%s] and VM [%s].", snapshotName, vmName)); - } - if (mergeResult != null) { - String commitError = String.format("Failed %s The failure occurred due to [%s].", errorMessage, mergeResult); + String commitError = String.format("Failed the block commit of top file [%s] into base file [%s] for snapshot [%s] of VM [%s]. The job will be left running to avoid" + + " data corruption, but ACS will return an error and volume [%s] will need to be normalized manually. If the commit involved the active image, the pivot will" + + " need to be manually done. The failure occurred due to [%s].", topFilePath, baseFilePath, snapshotName, vmName, volume, mergeResult); logger.error(commitError); throw new CloudRuntimeException(commitError); } @@ -6321,15 +6323,8 @@ protected String buildMergeCommand(String vmName, String diskLabel, String baseF /** * This was created to facilitate testing. * */ - protected BlockCommitListener getBlockCommitListener(Semaphore semaphore, String vmName) { - return new BlockCommitListener(semaphore, vmName, ThreadContext.get("logcontextid")); - } - - /** - * This was created to facilitate testing. - * */ - protected Semaphore getSemaphoreToWaitForMerge() { - return new Semaphore(0); + protected BlockCommitListener getBlockCommitListener(String vmName) { + return new BlockCommitListener(vmName, ThreadContext.get("logcontextid")); } protected void checkBlockCommitProgress(Domain vm, String diskLabel, String vmName, String snapshotName, String topFilePath, String baseFilePath) { @@ -6345,8 +6340,8 @@ protected void checkBlockCommitProgress(Domain vm, String diskLabel, String vmNa try { Thread.sleep(1000); } catch (InterruptedException ex) { - logger.debug("Thread that was tracking the progress {} was interrupted.", partialLog, ex); - return; + logger.trace("Thread that was tracking the progress for the block commit job {} was interrupted. Ignoring.", partialLog, ex); + continue; } try { @@ -6523,4 +6518,255 @@ public String getHypervisorPath() { public String getGuestCpuArch() { return guestCpuArch; } + + public Map> createDiskOnlyVmSnapshotForRunningVm(List volumeObjectTos, String vmName, String snapshotName, boolean quiesceVm) throws BackupException { + logger.info("Taking disk-only VM snapshot of running VM [{}].", vmName); + + Domain dm = null; + try { + LibvirtUtilitiesHelper libvirtUtilitiesHelper = getLibvirtUtilitiesHelper(); + Connect conn = libvirtUtilitiesHelper.getConnection(); + List disks = getDisks(conn, vmName); + + dm = getDomain(conn, vmName); + + if (dm == null) { + throw new BackupException(String.format("Creation of disk-only VM snapshot failed as we could not find the VM [%s].", vmName), true); + } + + Pair>> snapshotXmlAndVolumeToNewPathMap = createSnapshotXmlAndNewVolumePathMap(volumeObjectTos, disks, snapshotName); + + int flagsToUseForRunningVmSnapshotCreation = getFlagsToUseForRunningVmSnapshotCreation(quiesceVm); + String snapshotXml = snapshotXmlAndVolumeToNewPathMap.first(); + + logger.info("Creating disk-only VM snapshot for VM [{}] using parameters: snapshotXml [{}]; flags [{}].", vmName, snapshotXml, flagsToUseForRunningVmSnapshotCreation); + + dm.snapshotCreateXML(snapshotXml, flagsToUseForRunningVmSnapshotCreation); + + return snapshotXmlAndVolumeToNewPathMap.second(); + } catch (LibvirtException e) { + String errorMsg = String.format("Creation of disk-only VM snapshot for VM [%s] failed due to %s.", vmName, e.getMessage()); + boolean isVmConsistent = false; + if (e.getMessage().contains("QEMU guest agent is not connected")) { + errorMsg = "QEMU guest agent is not connected. If the VM has been recently started, it might connect soon. Otherwise the VM does not have the" + + " guest agent installed; thus the QuiesceVM parameter is not supported."; + isVmConsistent = true; + } + logger.error(errorMsg, e); + throw new BackupException(errorMsg, isVmConsistent); + } finally { + if (dm != null) { + try { + dm.free(); + } catch (LibvirtException l) { + logger.trace("Ignoring libvirt error.", l); + } + } + } + } + + public Map> createDiskOnlyVMSnapshotOfStoppedVm(List volumeObjectTos, String vmName) { + logger.info("Creating volume deltas for stopped VM [{}].", vmName); + + Map> mapVolumeToSnapshotSizeAndNewVolumePath = new HashMap<>(); + try { + for (VolumeObjectTO volumeObjectTO : volumeObjectTos) { + PrimaryDataStoreTO primaryDataStoreTO = (PrimaryDataStoreTO) volumeObjectTO.getDataStore(); + KVMStoragePool kvmStoragePool = getStoragePoolMgr().getStoragePool(primaryDataStoreTO.getPoolType(), primaryDataStoreTO.getUuid()); + + String snapshotPath = UUID.randomUUID().toString(); + String snapshotFullPath = kvmStoragePool.getLocalPathFor(snapshotPath); + QemuImgFile newDelta = new QemuImgFile(snapshotFullPath, QemuImg.PhysicalDiskFormat.QCOW2); + + String currentDeltaFullPath = kvmStoragePool.getLocalPathFor(volumeObjectTO.getPath()); + QemuImgFile currentDelta = new QemuImgFile(currentDeltaFullPath, QemuImg.PhysicalDiskFormat.QCOW2); + + QemuImg qemuImg = new QemuImg(0); + + logger.debug("Creating new delta [{}] for volume [{}] as part of the delta creation process for VM [{}].", newDelta, volumeObjectTO.getUuid(), vmName); + qemuImg.create(newDelta, currentDelta); + + mapVolumeToSnapshotSizeAndNewVolumePath.put(volumeObjectTO.getUuid(), new Pair<>(getFileSize(currentDeltaFullPath), snapshotPath)); + } + } catch (LibvirtException | QemuImgException e) { + logger.error("Exception while creating volume delta for VM [{}]. Deleting leftover deltas.", vmName, e); + for (VolumeObjectTO volumeObjectTO : volumeObjectTos) { + Pair volSizeAndNewPath = mapVolumeToSnapshotSizeAndNewVolumePath.get(volumeObjectTO.getUuid()); + PrimaryDataStoreTO primaryDataStoreTO = (PrimaryDataStoreTO) volumeObjectTO.getDataStore(); + KVMStoragePool kvmStoragePool = getStoragePoolMgr().getStoragePool(primaryDataStoreTO.getPoolType(), primaryDataStoreTO.getUuid()); + + if (volSizeAndNewPath == null) { + continue; + } + try { + Files.deleteIfExists(Path.of(kvmStoragePool.getLocalPathFor(volSizeAndNewPath.second()))); + } catch (IOException ex) { + logger.warn("Tried to delete leftover delta at [{}]. Failed.", volSizeAndNewPath.second(), ex); + } + } + throw new BackupException(String.format("An exception was caught during the delta creation for VM [%s]. The leftover deltas have been deleted.", vmName), true); + } + + return mapVolumeToSnapshotSizeAndNewVolumePath; + } + + public void mergeDeltaForStoppedVm(DeltaMergeTreeTO deltaMergeTreeTO) throws QemuImgException, IOException, LibvirtException { + logger.debug("Merging delta [{}] for stopped VM.", deltaMergeTreeTO); + + QemuImg qemuImg = new QemuImg(qcow2DeltaMergeTimeout * 1000); + DataTO parentTo = deltaMergeTreeTO.getParent(); + PrimaryDataStoreTO primaryDataStoreTO = (PrimaryDataStoreTO) parentTo.getDataStore(); + KVMStoragePool storagePool = storagePoolManager.getStoragePool(primaryDataStoreTO.getPoolType(), primaryDataStoreTO.getUuid()); + String childLocalPath = storagePool.getLocalPathFor(deltaMergeTreeTO.getChild().getPath()); + + QemuImgFile parent = new QemuImgFile(storagePool.getLocalPathFor(parentTo.getPath()), QemuImg.PhysicalDiskFormat.QCOW2); + QemuImgFile child = new QemuImgFile(childLocalPath, QemuImg.PhysicalDiskFormat.QCOW2); + + logger.debug("Committing child delta [{}] into parent delta [{}].", parentTo, deltaMergeTreeTO.getChild()); + qemuImg.commit(child, parent, true); + + List grandChildren = deltaMergeTreeTO.getGrandChildren().stream() + .map(deltaTo -> new QemuImgFile(storagePool.getLocalPathFor(deltaTo.getPath()), QemuImg.PhysicalDiskFormat.QCOW2)) + .collect(Collectors.toList()); + + logger.debug("Rebasing grand-children [{}] into parent at [{}].", grandChildren, parent.getFileName()); + for (QemuImgFile grandChild : grandChildren) { + qemuImg.rebase(grandChild, parent, parent.getFormat().toString(), false); + } + + logger.debug("Deleting child at [{}] as it is useless.", childLocalPath); + + Files.deleteIfExists(Path.of(childLocalPath)); + } + + public void mergeDeltaForRunningVm(DeltaMergeTreeTO mergeTreeTO, String vmName, VolumeObjectTO volumeObjectTO) throws LibvirtException, QemuImgException { + logger.debug("Merging delta [{}] for running VM [{}].", mergeTreeTO, vmName); + + QemuImg qemuImg = new QemuImg(qcow2DeltaMergeTimeout * 1000); + Connect conn = libvirtUtilitiesHelper.getConnection(); + Domain domain = getDomain(conn, vmName); + List disks = getDisks(conn, vmName); + + DataTO childTO = mergeTreeTO.getChild(); + DataTO parentSnapshotTO = mergeTreeTO.getParent(); + KVMStoragePool storagePool = libvirtUtilitiesHelper.getPrimaryPoolFromDataTo(volumeObjectTO, storagePoolManager); + + boolean active = DataObjectType.VOLUME.equals(childTO.getObjectType()); + String label = getDiskWithPathOfVolumeObjectTO(disks, volumeObjectTO).getDiskLabel(); + String parentSnapshotLocalPath = storagePool.getLocalPathFor(parentSnapshotTO.getPath()); + String childDeltaPath = storagePool.getLocalPathFor(childTO.getPath()); + + logger.debug("Found label [{}] for [{}]. Will merge delta at [{}] into delta at [{}].", label, volumeObjectTO, parentSnapshotLocalPath, childDeltaPath); + + mergeDeltaIntoBaseFile(domain, label, parentSnapshotLocalPath, childDeltaPath, active, childTO.getPath(), volumeObjectTO, conn); + + QemuImgFile parent = new QemuImgFile(parentSnapshotLocalPath, QemuImg.PhysicalDiskFormat.QCOW2); + + logger.debug("Rebasing grand-children [{}] into parent at [{}].", mergeTreeTO.getGrandChildren(), parentSnapshotLocalPath); + for (DataTO grandChildTo : mergeTreeTO.getGrandChildren()) { + if (checkIfFileIsInActiveChainForVm(domain, grandChildTo)) { + logger.debug("Grand-child [{}] is on the active chain of VM [{}], thus libvirt has already rebased it, will ignore it.", grandChildTo, vmName); + continue; + } + QemuImgFile grandChild = new QemuImgFile(storagePool.getLocalPathFor(grandChildTo.getPath()), QemuImg.PhysicalDiskFormat.QCOW2); + qemuImg.rebase(grandChild, parent, parent.getFormat().toString(), false); + } + } + + private boolean checkIfFileIsInActiveChainForVm(Domain vm, DataTO dataTO) throws LibvirtException { + String xml = vm.getXMLDesc(0); + KVMStoragePool storagePool = libvirtUtilitiesHelper.getPrimaryPoolFromDataTo(dataTO, storagePoolManager); + return xml.contains(storagePool.getLocalPathFor(dataTO.getPath())); + } + + public int getFlagsToUseForRunningVmSnapshotCreation(boolean quiesceVm) { + int flags = quiesceVm ? Domain.SnapshotCreateFlags.QUIESCE : 0; + flags += Domain.SnapshotCreateFlags.DISK_ONLY + + Domain.SnapshotCreateFlags.ATOMIC + + Domain.SnapshotCreateFlags.NO_METADATA; + return flags; + } + + public Pair>> createSnapshotXmlAndNewVolumePathMap(List volumeObjectTOS, List disks, String snapshotName) { + StringBuilder stringBuilder = new StringBuilder(); + Map> volumeObjectToNewPathMap = new HashMap<>(); + + for (VolumeObjectTO volumeObjectTO : volumeObjectTOS) { + LibvirtVMDef.DiskDef diskdef = getDiskWithPathOfVolumeObjectTO(disks, volumeObjectTO); + String newPath = UUID.randomUUID().toString(); + stringBuilder.append(String.format(TAG_DISK_SNAPSHOT, diskdef.getDiskLabel(), getSnapshotTemporaryPath(diskdef.getDiskPath(), newPath))); + + long snapSize = getFileSize(diskdef.getDiskPath()); + + volumeObjectToNewPathMap.put(volumeObjectTO.getUuid(), new Pair<>(snapSize, newPath)); + } + + String snapshotXml = String.format(SNAPSHOT_XML, snapshotName, stringBuilder); + return new Pair<>(snapshotXml, volumeObjectToNewPathMap); + } + + public long getFileSize(String path) { + return new File(path).length(); + } + + public boolean pullVolumeBackingFile(VolumeObjectTO volumeObjectTO, String vmName) throws LibvirtException { + Connect conn = libvirtUtilitiesHelper.getConnection(); + + Domain vm = getDomain(conn, vmName); + List disks = getDisks(conn, vmName); + DiskDef diskDef = getDiskWithPathOfVolumeObjectTO(disks, volumeObjectTO); + + String diskLabel = diskDef.getDiskLabel(); + Script.runSimpleBashScript(String.format(BLOCK_PULL_COMMAND, vmName, diskLabel)); + + boolean result = checkBlockPullProgress(vm, diskLabel, vmName, volumeObjectTO.getUuid()); + + if (!result) { + logger.warn("Failed to block pull volume [{}] of VM [{}], aborting.", volumeObjectTO, vmName); + vm.blockJobAbort(diskLabel, Domain.BlockJobAbortFlags.ASYNC); + } + return result; + } + + protected Boolean checkBlockPullProgress(Domain vm, String diskLabel, String vmName, String volumeUuid) { + int timeout = qcow2DeltaMergeTimeout; + DomainBlockJobInfo result; + long lastCommittedBytes = 0; + long endBytes = 0; + String partialLog = String.format("for volume [%s] of VM [%s]", volumeUuid, vmName); + while (timeout > 0) { + timeout -= 1; + + try { + Thread.sleep(1000); + } catch (InterruptedException ex) { + logger.trace(String.format("Thread that was tracking the block pull progress %s was interrupted. Ignoring.", partialLog), ex); + continue; + } + + try { + result = vm.getBlockJobInfo(diskLabel, 0); + } catch (LibvirtException ex) { + logger.warn(String.format("Exception while getting block job info %s: [%s].", partialLog, ex.getMessage()), ex); + return false; + } + + if (result == null || result.type == 0 && result.end == 0 && result.cur == 0) { + logger.debug(String.format("Block pull job %s has finished.", partialLog)); + return true; + } + + long currentCommittedBytes = result.cur; + if (currentCommittedBytes > lastCommittedBytes) { + logger.debug(String.format("The block pull %s is at [%s] of [%s].", partialLog, currentCommittedBytes, result.end)); + } + lastCommittedBytes = currentCommittedBytes; + endBytes = result.end; + } + logger.warn(String.format("Block pull %s has timed out after waiting at least %s seconds. The progress of the operation was [%s] of [%s].", partialLog, + qcow2DeltaMergeTimeout, lastCommittedBytes, endBytes)); + return false; + } + + } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtMigrateResourceBetweenSecondaryStorages.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtMigrateResourceBetweenSecondaryStorages.java new file mode 100644 index 000000000000..a3dd2040cfc8 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtMigrateResourceBetweenSecondaryStorages.java @@ -0,0 +1,123 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.Command; +import com.cloud.agent.api.to.DataTO; +import com.cloud.hypervisor.kvm.storage.KVMStoragePool; +import com.cloud.resource.CommandWrapper; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.BackupException; +import com.cloud.utils.script.Script; +import org.apache.cloudstack.utils.qemu.QemuImageOptions; +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.libvirt.LibvirtException; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Set; + +public abstract class LibvirtMigrateResourceBetweenSecondaryStorages extends CommandWrapper { + + protected static final String BACKUP = "backup"; + protected static final String SNAPSHOT = "snapshot"; + + protected Set filesToRemove; + protected List> resourcesToUpdate; + protected String resourceType; + protected int wait; + + public String copyResourceToDestDataStore(DataTO resource, String resourceCurrentPath, KVMStoragePool destImagePool, String resourceParentPath) throws QemuImgException, LibvirtException { + String resourceDestDataStoreFullPath = destImagePool.getLocalPathFor(resource.getPath()); + String resourceDestCheckpointPath = resourceDestDataStoreFullPath.replace("snapshots", "checkpoints"); + + QemuImgFile resourceOrigin = new QemuImgFile(resourceCurrentPath, QemuImg.PhysicalDiskFormat.QCOW2); + QemuImgFile resourceDestination = new QemuImgFile(resourceDestDataStoreFullPath, QemuImg.PhysicalDiskFormat.QCOW2); + QemuImgFile parentResource = null; + + if (resourceParentPath != null) { + parentResource = new QemuImgFile(resourceParentPath, QemuImg.PhysicalDiskFormat.QCOW2); + } + + logger.debug("Migrating {} [{}] to [{}] with {}", resourceType, resourceOrigin, resourceDestination, parentResource == null ? "no parent." : String.format("parent [%s].", parentResource)); + + long resourceId = resource.getId(); + + createDirsIfNeeded(resourceDestDataStoreFullPath, resourceId); + + QemuImg qemuImg = new QemuImg(wait); + qemuImg.convert(resourceOrigin, resourceDestination, parentResource, null, null, new QemuImageOptions(resourceOrigin.getFormat(), resourceOrigin.getFileName(), null), + null, true, false, false, false, null, null); + + filesToRemove.add(resourceCurrentPath); + + if (SNAPSHOT.equals(resourceType)) { + String resourceCurrentCheckpointPath = resourceCurrentPath.replace("snapshots", "checkpoints"); + createDirsIfNeeded(resourceDestCheckpointPath, resourceId); + migrateCheckpointFile(resourceCurrentPath, resourceDestDataStoreFullPath); + filesToRemove.add(resourceCurrentCheckpointPath); + resourcesToUpdate.add(new Pair<>(resourceId, resourceDestCheckpointPath)); + } + + return resourceDestDataStoreFullPath; + } + + private void migrateCheckpointFile(String resourceCurrentPath, String resourceDestDataStoreFullPath) { + resourceCurrentPath = resourceCurrentPath.replace("snapshots", "checkpoints"); + resourceDestDataStoreFullPath = resourceDestDataStoreFullPath.replace("snapshots", "checkpoints"); + + String copyCommand = String.format("cp %s %s", resourceCurrentPath, resourceDestDataStoreFullPath); + Script.runSimpleBashScript(copyCommand); + } + + public void removeResourceFromSourceDataStore(String resourcePath) { + logger.debug("Removing file [{}].", resourcePath); + try { + Files.deleteIfExists(Path.of(resourcePath)); + } catch (IOException ex) { + logger.error("Failed to remove {} [{}].", resourceType, resourcePath, ex); + } + } + + public String rebaseResourceToNewParentPath(String resourcePath, String parentResourcePath) throws LibvirtException, QemuImgException { + QemuImgFile resource = new QemuImgFile(resourcePath, QemuImg.PhysicalDiskFormat.QCOW2); + QemuImgFile parentResource = new QemuImgFile(parentResourcePath, QemuImg.PhysicalDiskFormat.QCOW2); + + QemuImg qemuImg = new QemuImg(wait); + qemuImg.rebase(resource, parentResource, parentResource.getFormat().toString(), false); + + return resourcePath; + } + + private void createDirsIfNeeded(String resourceFullPath, Long resourceId) { + String dirs = resourceFullPath.substring(0, resourceFullPath.lastIndexOf(File.separator)); + try { + Files.createDirectories(Path.of(dirs)); + } catch (IOException e) { + throw new BackupException(String.format("Error while creating directories for migration of %s [%s].", resourceType, resourceId), e, true); + } + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCompressBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCompressBackupCommandWrapper.java new file mode 100644 index 000000000000..d3b6114c60e4 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCompressBackupCommandWrapper.java @@ -0,0 +1,144 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.to.DataTO; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMStoragePool; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.backup.Backup; +import org.apache.cloudstack.backup.CompressBackupCommand; +import org.apache.cloudstack.storage.formatinspector.Qcow2Inspector; +import org.apache.cloudstack.storage.to.DeltaMergeTreeTO; +import org.apache.cloudstack.utils.qemu.QemuImageOptions; +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.libvirt.LibvirtException; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.stream.Collectors; + +@ResourceWrapper(handles = CompressBackupCommand.class) +public class LibvirtCompressBackupCommandWrapper extends CommandWrapper { + public static final String COMPRESSION_TYPE = "compression_type"; + private static final int MIN_QCOW_2_VERSION_FOR_ZSTD = 3; + + @Override + public Answer execute(CompressBackupCommand command, LibvirtComputingResource serverResource) { + List secondaryStorages = new ArrayList<>(); + List deltas = command.getBackupDeltasToCompress(); + KVMStoragePoolManager storagePoolManager = serverResource.getStoragePoolMgr(); + + logger.info("Starting compression for backup deltas [{}].", deltas); + try { + QemuImg qemuImg = new QemuImg(command.getWait() * 1000); + Integer rateLimit = validateAndGetRateLimit(command, qemuImg); + + KVMStoragePool mainSecStorage = storagePoolManager.getStoragePoolByURI(deltas.stream().findFirst().orElseThrow().getChild().getDataStore().getUrl()); + secondaryStorages.add(mainSecStorage); + secondaryStorages.addAll(command.getBackupChainImageStoreUrls().stream().map(storagePoolManager::getStoragePoolByURI).collect(Collectors.toList())); + + if (!checkAvailableStorage(command, mainSecStorage, storagePoolManager)) { + return new Answer(command, false, "Not enough available space on secondary."); + } + + for (DeltaMergeTreeTO delta : deltas) { + DataTO child = delta.getChild(); + + QemuImgFile backingFile = null; + DataTO parent = delta.getParent(); + if (parent != null) { + KVMStoragePool parentSecondaryStorage = storagePoolManager.getStoragePoolByURI(parent.getDataStore().getUrl()); + secondaryStorages.add(parentSecondaryStorage); + backingFile = new QemuImgFile(parentSecondaryStorage.getLocalPathFor(parent.getPath()), QemuImg.PhysicalDiskFormat.QCOW2); + } + + String fullDeltaPath = mainSecStorage.getLocalPathFor(child.getPath()); + String compressedPath = fullDeltaPath + ".comp"; + QemuImgFile originalBackup = new QemuImgFile(fullDeltaPath, QemuImg.PhysicalDiskFormat.QCOW2); + QemuImgFile compressedBackup = new QemuImgFile(compressedPath, QemuImg.PhysicalDiskFormat.QCOW2); + + HashMap options = new HashMap<>(); + Backup.CompressionLibrary compressionLib = getCompressionLibrary(command, fullDeltaPath); + setCompressionTypeOptionIfAvailable(qemuImg, options, compressionLib); + int coroutines = command.getCoroutines(); + logger.info("Starting compression for backup delta [{}] with parent [{}] using [{}] coroutines.", child, parent, coroutines); + qemuImg.convert(originalBackup, compressedBackup, backingFile, options, null, new QemuImageOptions(originalBackup.getFormat(), originalBackup.getFileName(), + null), null, false, false, true, true, coroutines, rateLimit); + } + } catch (LibvirtException | QemuImgException e) { + return new Answer(command, e); + } finally { + for (KVMStoragePool secondaryStorage : secondaryStorages) { + storagePoolManager.deleteStoragePool(secondaryStorage.getType(), secondaryStorage.getUuid()); + } + } + + return new Answer(command); + } + private Integer validateAndGetRateLimit(CompressBackupCommand command, QemuImg qemuImg) { + if (qemuImg.getVersion() < QemuImg.QEMU_5_20) { + throw new CloudRuntimeException("Qemu version is lower than 5.2.0, unable to set the rate limit."); + } + return command.getRateLimit() < 1 ? null : command.getRateLimit(); + } + + /** + * Sets the compression type option if qemu-img is at least in version 5.1. Otherwise, will not set it and qemu will use zlib. + * */ + private void setCompressionTypeOptionIfAvailable(QemuImg qemuImg, HashMap options, Backup.CompressionLibrary compressionLib) { + if (qemuImg.getVersion() >= QemuImg.QEMU_5_10) { + options.put(COMPRESSION_TYPE, compressionLib.name()); + return; + } + logger.warn("Qemu is at a lower version than 5.1, we will not be able to use zstd to compress backups. Only zlib is supported for this version. Current version is [{}].", + qemuImg.getVersion()); + } + + private Backup.CompressionLibrary getCompressionLibrary(CompressBackupCommand command, String fullDeltaPath) { + Backup.CompressionLibrary compressionLib = command.getCompressionLib(); + if (compressionLib == Backup.CompressionLibrary.zlib || !Qcow2Inspector.validateQcow2Version(fullDeltaPath, MIN_QCOW_2_VERSION_FOR_ZSTD)) { + logger.debug("Compression for delta [{}] will use zlib as the compression library.", fullDeltaPath); + return Backup.CompressionLibrary.zlib; + } + + logger.debug("Compression for delta [{}] will try to use zstd as the compression library.", fullDeltaPath); + return Backup.CompressionLibrary.zstd; + } + + /** + * Validates available storage. Forces Libvirt to refresh storage info so that we have the most up to date data. + * */ + private boolean checkAvailableStorage(CompressBackupCommand command, KVMStoragePool mainSecStorage, KVMStoragePoolManager storagePoolManager) { + logger.debug("Checking available storage [{}].", mainSecStorage); + mainSecStorage = storagePoolManager.getStoragePool(mainSecStorage.getType(), mainSecStorage.getUuid(), true, false); + if (mainSecStorage.getAvailable() < command.getMinFreeStorage()) { + logger.warn("There is not enough available space for compression of backup! Available size is [{}], needed [{}]. Aborting compression.", + mainSecStorage.getAvailable(), command.getMinFreeStorage()); + return false; + } + return true; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConsolidateVolumesCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConsolidateVolumesCommandWrapper.java new file mode 100644 index 000000000000..30c8849df5fa --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtConsolidateVolumesCommandWrapper.java @@ -0,0 +1,59 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.storage.Storage; +import org.apache.cloudstack.backup.ConsolidateVolumesAnswer; +import org.apache.cloudstack.backup.ConsolidateVolumesCommand; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.libvirt.LibvirtException; + +import java.util.ArrayList; +import java.util.List; + +@ResourceWrapper(handles = ConsolidateVolumesCommand.class) +public class LibvirtConsolidateVolumesCommandWrapper extends CommandWrapper { + + @Override + public Answer execute(ConsolidateVolumesCommand command, LibvirtComputingResource serverResource) { + List volumeObjectTOs = command.getVolumesToConsolidate(); + String vmName = command.getVmName(); + + List successfulConsolidations = new ArrayList<>(); + try { + for (VolumeObjectTO volumeObjectTO : volumeObjectTOs) { + if (!serverResource.pullVolumeBackingFile(volumeObjectTO, vmName)) { + return new ConsolidateVolumesAnswer(command, false, "Failed to consolidate all volumes.", successfulConsolidations); + } + successfulConsolidations.add(volumeObjectTO); + } + } catch (LibvirtException ex) { + return new ConsolidateVolumesAnswer(command, false, ex.getMessage(), successfulConsolidations); + } + + KVMStoragePoolManager kvmStoragePoolManager = serverResource.getStoragePoolMgr(); + for (String secStorageUuid : command.getSecondaryStorageUuids()) { + kvmStoragePoolManager.deleteStoragePool(Storage.StoragePoolType.NetworkFilesystem, secStorageUuid); + } + return new ConsolidateVolumesAnswer(command, true, "Success", successfulConsolidations); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateDiskOnlyVMSnapshotCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateDiskOnlyVMSnapshotCommandWrapper.java index 84d17a1a1161..c61d4f7f4ea8 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateDiskOnlyVMSnapshotCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCreateDiskOnlyVMSnapshotCommandWrapper.java @@ -19,180 +19,29 @@ package com.cloud.hypervisor.kvm.resource.wrapper; import com.cloud.agent.api.Answer; -import com.cloud.agent.api.VMSnapshotTO; import com.cloud.agent.api.storage.CreateDiskOnlyVmSnapshotAnswer; import com.cloud.agent.api.storage.CreateDiskOnlyVmSnapshotCommand; import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; -import com.cloud.hypervisor.kvm.resource.LibvirtVMDef; -import com.cloud.hypervisor.kvm.storage.KVMStoragePool; -import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; import com.cloud.resource.CommandWrapper; import com.cloud.resource.ResourceWrapper; -import com.cloud.utils.Pair; +import com.cloud.utils.exception.BackupException; import com.cloud.vm.VirtualMachine; -import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; -import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.cloudstack.utils.qemu.QemuImg; -import org.apache.cloudstack.utils.qemu.QemuImgException; -import org.apache.cloudstack.utils.qemu.QemuImgFile; -import org.libvirt.Connect; -import org.libvirt.Domain; -import org.libvirt.LibvirtException; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; @ResourceWrapper(handles = CreateDiskOnlyVmSnapshotCommand.class) public class LibvirtCreateDiskOnlyVMSnapshotCommandWrapper extends CommandWrapper { - private static final String SNAPSHOT_XML = "\n" + - "%s\n" + - "\n" + - " \n" + - "%s" + - " \n" + - ""; - - private static final String TAG_DISK_SNAPSHOT = "\n" + - "\n" + - "\n"; - @Override public Answer execute(CreateDiskOnlyVmSnapshotCommand cmd, LibvirtComputingResource resource) { VirtualMachine.State state = cmd.getVmState(); - if (VirtualMachine.State.Running.equals(state)) { - return takeDiskOnlyVmSnapshotOfRunningVm(cmd, resource); - } - - return takeDiskOnlyVmSnapshotOfStoppedVm(cmd, resource); - } - - protected Answer takeDiskOnlyVmSnapshotOfRunningVm(CreateDiskOnlyVmSnapshotCommand cmd, LibvirtComputingResource resource) { - String vmName = cmd.getVmName(); - logger.info("Taking disk-only VM snapshot of running VM [{}].", vmName); - - Domain dm = null; try { - LibvirtUtilitiesHelper libvirtUtilitiesHelper = resource.getLibvirtUtilitiesHelper(); - Connect conn = libvirtUtilitiesHelper.getConnection(); - List volumeObjectTOS = cmd.getVolumeTOs(); - List disks = resource.getDisks(conn, vmName); - - dm = resource.getDomain(conn, vmName); - - if (dm == null) { - return new CreateDiskOnlyVmSnapshotAnswer(cmd, false, String.format("Creation of disk-only VM Snapshot failed as we could not find the VM [%s].", vmName), null); - } - - VMSnapshotTO target = cmd.getTarget(); - Pair>> snapshotXmlAndVolumeToNewPathMap = createSnapshotXmlAndNewVolumePathMap(volumeObjectTOS, disks, target, resource); - - dm.snapshotCreateXML(snapshotXmlAndVolumeToNewPathMap.first(), getFlagsToUseForRunningVmSnapshotCreation(target)); - - return new CreateDiskOnlyVmSnapshotAnswer(cmd, true, null, snapshotXmlAndVolumeToNewPathMap.second()); - } catch (LibvirtException e) { - String errorMsg = String.format("Creation of disk-only VM snapshot for VM [%s] failed due to %s.", vmName, e.getMessage()); - logger.error(errorMsg, e); - if (e.getMessage().contains("QEMU guest agent is not connected")) { - errorMsg = "QEMU guest agent is not connected. If the VM has been recently started, it might connect soon. Otherwise the VM does not have the" + - " guest agent installed; thus the QuiesceVM parameter is not supported."; - return new CreateDiskOnlyVmSnapshotAnswer(cmd, false, errorMsg, null); - } - return new CreateDiskOnlyVmSnapshotAnswer(cmd, false, e.getMessage(), null); - } finally { - if (dm != null) { - try { - dm.free(); - } catch (LibvirtException l) { - logger.trace("Ignoring libvirt error.", l); - } + if (VirtualMachine.State.Running.equals(state)) { + return new CreateDiskOnlyVmSnapshotAnswer(cmd, true, null, resource.createDiskOnlyVmSnapshotForRunningVm(cmd.getVolumeTOs(), cmd.getVmName(), cmd.getTarget().getSnapshotName(), + cmd.getTarget().getQuiescevm())); } + return new CreateDiskOnlyVmSnapshotAnswer(cmd, true, null, resource.createDiskOnlyVMSnapshotOfStoppedVm(cmd.getVolumeTOs(), cmd.getVmName())); + } catch (BackupException ex) { + return new Answer(cmd, ex); } } - - protected Answer takeDiskOnlyVmSnapshotOfStoppedVm(CreateDiskOnlyVmSnapshotCommand cmd, LibvirtComputingResource resource) { - String vmName = cmd.getVmName(); - logger.info("Taking disk-only VM snapshot of stopped VM [{}].", vmName); - - Map> mapVolumeToSnapshotSizeAndNewVolumePath = new HashMap<>(); - - List volumeObjectTos = cmd.getVolumeTOs(); - KVMStoragePoolManager storagePoolMgr = resource.getStoragePoolMgr(); - try { - for (VolumeObjectTO volumeObjectTO : volumeObjectTos) { - PrimaryDataStoreTO primaryDataStoreTO = (PrimaryDataStoreTO) volumeObjectTO.getDataStore(); - KVMStoragePool kvmStoragePool = storagePoolMgr.getStoragePool(primaryDataStoreTO.getPoolType(), primaryDataStoreTO.getUuid()); - - String snapshotPath = UUID.randomUUID().toString(); - String snapshotFullPath = kvmStoragePool.getLocalPathFor(snapshotPath); - QemuImgFile newDelta = new QemuImgFile(snapshotFullPath, QemuImg.PhysicalDiskFormat.QCOW2); - - String currentDeltaFullPath = kvmStoragePool.getLocalPathFor(volumeObjectTO.getPath()); - QemuImgFile currentDelta = new QemuImgFile(currentDeltaFullPath, QemuImg.PhysicalDiskFormat.QCOW2); - - QemuImg qemuImg = new QemuImg(0); - - logger.debug("Creating new delta for volume [{}] as part of the disk-only VM snapshot process for VM [{}].", volumeObjectTO.getUuid(), vmName); - qemuImg.create(newDelta, currentDelta); - - mapVolumeToSnapshotSizeAndNewVolumePath.put(volumeObjectTO.getUuid(), new Pair<>(getFileSize(currentDeltaFullPath), snapshotPath)); - } - } catch (LibvirtException | QemuImgException e) { - logger.error("Exception while creating disk-only VM snapshot for VM [{}]. Deleting leftover deltas.", vmName, e); - for (VolumeObjectTO volumeObjectTO : volumeObjectTos) { - Pair volSizeAndNewPath = mapVolumeToSnapshotSizeAndNewVolumePath.get(volumeObjectTO.getUuid()); - PrimaryDataStoreTO primaryDataStoreTO = (PrimaryDataStoreTO) volumeObjectTO.getDataStore(); - KVMStoragePool kvmStoragePool = storagePoolMgr.getStoragePool(primaryDataStoreTO.getPoolType(), primaryDataStoreTO.getUuid()); - - if (volSizeAndNewPath == null) { - continue; - } - try { - Files.deleteIfExists(Path.of(kvmStoragePool.getLocalPathFor(volSizeAndNewPath.second()))); - } catch (IOException ex) { - logger.warn("Tried to delete leftover snapshot at [{}] failed.", volSizeAndNewPath.second(), ex); - } - } - return new Answer(cmd, e); - } - - return new CreateDiskOnlyVmSnapshotAnswer(cmd, true, null, mapVolumeToSnapshotSizeAndNewVolumePath); - } - - protected int getFlagsToUseForRunningVmSnapshotCreation(VMSnapshotTO target) { - int flags = target.getQuiescevm() ? Domain.SnapshotCreateFlags.QUIESCE : 0; - flags += Domain.SnapshotCreateFlags.DISK_ONLY + - Domain.SnapshotCreateFlags.ATOMIC + - Domain.SnapshotCreateFlags.NO_METADATA; - return flags; - } - - protected Pair>> createSnapshotXmlAndNewVolumePathMap(List volumeObjectTOS, List disks, VMSnapshotTO target, LibvirtComputingResource resource) { - StringBuilder stringBuilder = new StringBuilder(); - Map> volumeObjectToNewPathMap = new HashMap<>(); - - for (VolumeObjectTO volumeObjectTO : volumeObjectTOS) { - LibvirtVMDef.DiskDef diskdef = resource.getDiskWithPathOfVolumeObjectTO(disks, volumeObjectTO); - String newPath = UUID.randomUUID().toString(); - stringBuilder.append(String.format(TAG_DISK_SNAPSHOT, diskdef.getDiskLabel(), resource.getSnapshotTemporaryPath(diskdef.getDiskPath(), newPath))); - - long snapSize = getFileSize(diskdef.getDiskPath()); - - volumeObjectToNewPathMap.put(volumeObjectTO.getUuid(), new Pair<>(snapSize, newPath)); - } - - String snapshotXml = String.format(SNAPSHOT_XML, target.getSnapshotName(), stringBuilder); - return new Pair<>(snapshotXml, volumeObjectToNewPathMap); - } - - protected long getFileSize(String path) { - return new File(path).length(); - } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtFinalizeBackupCompressionCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtFinalizeBackupCompressionCommandWrapper.java new file mode 100644 index 000000000000..4b02c20c6d26 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtFinalizeBackupCompressionCommandWrapper.java @@ -0,0 +1,73 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMStoragePool; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; + +import org.apache.cloudstack.backup.FinalizeBackupCompressionCommand; +import org.apache.cloudstack.storage.to.BackupDeltaTO; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; + +@ResourceWrapper(handles = FinalizeBackupCompressionCommand.class) +public class LibvirtFinalizeBackupCompressionCommandWrapper extends CommandWrapper { + @Override + public Answer execute(FinalizeBackupCompressionCommand command, LibvirtComputingResource serverResource) { + KVMStoragePool storagePool = null; + KVMStoragePoolManager storagePoolManager = serverResource.getStoragePoolMgr(); + long totalPhysicalSize = 0; + + if (command.isCleanup()) { + logger.info("Cleaning up compressed backup deltas [{}].", command.getBackupDeltaTOList()); + } else { + logger.info("Finalizing backup compression for deltas [{}].", command.getBackupDeltaTOList()); + } + try { + storagePool = storagePoolManager.getStoragePoolByURI(command.getBackupDeltaTOList().get(0).getDataStore().getUrl()); + for (BackupDeltaTO delta : command.getBackupDeltaTOList()) { + Path deltaPath = Path.of(storagePool.getLocalPathFor(delta.getPath())); + Path compressedDeltaPath = Path.of(deltaPath + ".comp"); + + if (command.isCleanup()) { + logger.debug("Cleaning up backup delta at [{}].", compressedDeltaPath); + Files.deleteIfExists(compressedDeltaPath); + continue; + } + + logger.debug("Moving compressed backup delta at [{}] to [{}].", compressedDeltaPath, deltaPath); + Files.move(compressedDeltaPath, deltaPath, StandardCopyOption.REPLACE_EXISTING); + totalPhysicalSize += Files.size(deltaPath); + } + } catch (IOException e) { + return new Answer(command, e); + } finally { + if (storagePool != null) { + storagePoolManager.deleteStoragePool(storagePool.getType(), storagePool.getUuid()); + } + } + return new Answer(command, true, String.valueOf(totalPhysicalSize)); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetStorageStatsCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetStorageStatsCommandWrapper.java index 419b54492583..424622dfc2ce 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetStorageStatsCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetStorageStatsCommandWrapper.java @@ -36,7 +36,7 @@ public final class LibvirtGetStorageStatsCommandWrapper extends CommandWrapper { @Override public Answer execute(MergeDiskOnlyVmSnapshotCommand command, LibvirtComputingResource serverResource) { - VirtualMachine.State vmState = command.getVmState(); + boolean isVmRunning = command.isVmRunning(); try { - if (VirtualMachine.State.Running.equals(vmState)) { + if (isVmRunning) { return mergeDiskOnlySnapshotsForRunningVM(command, serverResource); } return mergeDiskOnlySnapshotsForStoppedVM(command, serverResource); @@ -66,83 +50,28 @@ public Answer execute(MergeDiskOnlyVmSnapshotCommand command, LibvirtComputingRe } protected Answer mergeDiskOnlySnapshotsForStoppedVM(MergeDiskOnlyVmSnapshotCommand cmd, LibvirtComputingResource resource) throws QemuImgException, LibvirtException { - QemuImg qemuImg = new QemuImg(resource.getCmdsTimeout()); - KVMStoragePoolManager storageManager = resource.getStoragePoolMgr(); + List deltaMergeTreeTOList = cmd.getDeltaMergeTreeToList(); - List snapshotMergeTreeTOList = cmd.getSnapshotMergeTreeToList(); + logger.debug("Merging deltas for stopped VM [{}] using the following Delta Merge Trees [{}].", cmd.getVmName(), deltaMergeTreeTOList); - logger.debug("Merging disk-only snapshots for stopped VM [{}] using the following Snapshot Merge Trees [{}].", cmd.getVmName(), snapshotMergeTreeTOList); - - for (SnapshotMergeTreeTO snapshotMergeTreeTO : snapshotMergeTreeTOList) { - DataTO parentTo = snapshotMergeTreeTO.getParent(); - PrimaryDataStoreTO primaryDataStoreTO = (PrimaryDataStoreTO) parentTo.getDataStore(); - KVMStoragePool storagePool = storageManager.getStoragePool(primaryDataStoreTO.getPoolType(), primaryDataStoreTO.getUuid()); - String childLocalPath = storagePool.getLocalPathFor(snapshotMergeTreeTO.getChild().getPath()); - - QemuImgFile parent = new QemuImgFile(storagePool.getLocalPathFor(parentTo.getPath()), QemuImg.PhysicalDiskFormat.QCOW2); - QemuImgFile child = new QemuImgFile(childLocalPath, QemuImg.PhysicalDiskFormat.QCOW2); - - logger.debug("Committing child delta [{}] into parent snapshot [{}].", parentTo, snapshotMergeTreeTO.getChild()); - qemuImg.commit(child, parent, true); - - List grandChildren = snapshotMergeTreeTO.getGrandChildren().stream() - .map(snapshotTo -> new QemuImgFile(storagePool.getLocalPathFor(snapshotTo.getPath()), QemuImg.PhysicalDiskFormat.QCOW2)) - .collect(Collectors.toList()); - - logger.debug("Rebasing grandChildren [{}] into parent at [{}].", grandChildren, parent.getFileName()); - for (QemuImgFile grandChild : grandChildren) { - qemuImg.rebase(grandChild, parent, parent.getFormat().toString(), false); - } - - logger.debug("Deleting child at [{}] as it is useless.", childLocalPath); + for (DeltaMergeTreeTO deltaMergeTreeTO : deltaMergeTreeTOList) { try { - Files.deleteIfExists(Path.of(childLocalPath)); - } catch (IOException e) { - return new Answer(cmd, e); + resource.mergeDeltaForStoppedVm(deltaMergeTreeTO); + } catch (IOException ex) { + return new Answer(cmd, ex); } } return new Answer(cmd, true, null); } - protected Answer mergeDiskOnlySnapshotsForRunningVM(MergeDiskOnlyVmSnapshotCommand cmd, LibvirtComputingResource resource) throws LibvirtException, QemuImgException { + protected Answer mergeDiskOnlySnapshotsForRunningVM(MergeDiskOnlyVmSnapshotCommand cmd, LibvirtComputingResource resource) throws QemuImgException, LibvirtException { String vmName = cmd.getVmName(); - List snapshotMergeTreeTOList = cmd.getSnapshotMergeTreeToList(); - - LibvirtUtilitiesHelper libvirtUtilitiesHelper = resource.getLibvirtUtilitiesHelper(); - Connect conn = libvirtUtilitiesHelper.getConnection(); - Domain domain = resource.getDomain(conn, vmName); - List disks = resource.getDisks(conn, vmName); - KVMStoragePoolManager storageManager = resource.getStoragePoolMgr(); - QemuImg qemuImg = new QemuImg(resource.getCmdsTimeout()); + List deltaMergeTreeTOs = cmd.getDeltaMergeTreeToList(); - logger.debug("Merging disk-only snapshots for running VM [{}] using the following Snapshot Merge Trees [{}].", vmName, snapshotMergeTreeTOList); + logger.debug("Merging deltas for running VM [{}] using the following Delta Merge Trees [{}].", vmName, deltaMergeTreeTOs); - for (SnapshotMergeTreeTO mergeTreeTO : snapshotMergeTreeTOList) { - DataTO childTO = mergeTreeTO.getChild(); - SnapshotObjectTO parentSnapshotTO = (SnapshotObjectTO) mergeTreeTO.getParent(); - VolumeObjectTO volumeObjectTO = parentSnapshotTO.getVolume(); - KVMStoragePool storagePool = libvirtUtilitiesHelper.getPrimaryPoolFromDataTo(volumeObjectTO, storageManager); - - boolean active = DataObjectType.VOLUME.equals(childTO.getObjectType()); - String label = resource.getDiskWithPathOfVolumeObjectTO(disks, volumeObjectTO).getDiskLabel(); - String parentSnapshotLocalPath = storagePool.getLocalPathFor(parentSnapshotTO.getPath()); - String childDeltaPath = storagePool.getLocalPathFor(childTO.getPath()); - - logger.debug("Found label [{}] for [{}]. Will merge delta at [{}] into delta at [{}].", label, volumeObjectTO, parentSnapshotLocalPath, childDeltaPath); - - resource.mergeSnapshotIntoBaseFile(domain, label, parentSnapshotLocalPath, childDeltaPath, active, childTO.getPath(), - volumeObjectTO, conn); - - QemuImgFile parent = new QemuImgFile(parentSnapshotLocalPath, QemuImg.PhysicalDiskFormat.QCOW2); - - List grandChildren = mergeTreeTO.getGrandChildren().stream() - .map(snapshotTo -> new QemuImgFile(storagePool.getLocalPathFor(snapshotTo.getPath()), QemuImg.PhysicalDiskFormat.QCOW2)) - .collect(Collectors.toList()); - - logger.debug("Rebasing grandChildren [{}] into parent at [{}].", grandChildren, parentSnapshotLocalPath); - for (QemuImgFile grandChild : grandChildren) { - qemuImg.rebase(grandChild, parent, parent.getFormat().toString(), false); - } + for (DeltaMergeTreeTO deltaMergeTreeTO : deltaMergeTreeTOs) { + resource.mergeDeltaForRunningVm(deltaMergeTreeTO, vmName, deltaMergeTreeTO.getVolumeObjectTO()); } return new Answer(cmd, true, null); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateBackupsBetweenSecondaryStoragesCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateBackupsBetweenSecondaryStoragesCommandWrapper.java new file mode 100644 index 000000000000..f786a0821d05 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateBackupsBetweenSecondaryStoragesCommandWrapper.java @@ -0,0 +1,132 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.MigrateBackupsBetweenSecondaryStoragesCommand; +import com.cloud.agent.api.MigrateBetweenSecondaryStoragesCommandAnswer; +import com.cloud.agent.api.to.DataStoreTO; +import com.cloud.agent.api.to.DataTO; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.resource.LibvirtMigrateResourceBetweenSecondaryStorages; +import com.cloud.hypervisor.kvm.storage.KVMStoragePool; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.Pair; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.commons.lang3.BooleanUtils; +import org.libvirt.LibvirtException; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +@ResourceWrapper(handles = MigrateBackupsBetweenSecondaryStoragesCommand.class) +public class LibvirtMigrateBackupsBetweenSecondaryStoragesCommandWrapper extends LibvirtMigrateResourceBetweenSecondaryStorages { + + @Override + public Answer execute(MigrateBackupsBetweenSecondaryStoragesCommand command, LibvirtComputingResource serverResource) { + resourceType = BACKUP; + filesToRemove = new HashSet<>(); + resourcesToUpdate = new ArrayList<>(); + wait = command.getWait() * 1000; + + DataStoreTO srcDataStore = command.getSrcDataStore(); + DataStoreTO destDataStore = command.getDestDataStore(); + KVMStoragePoolManager storagePoolManager = serverResource.getStoragePoolMgr(); + + Set imagePools = new HashSet<>(); + KVMStoragePool destImagePool = storagePoolManager.getStoragePoolByURI(destDataStore.getUrl()); + imagePools.add(destImagePool); + + String imagePoolUrl; + KVMStoragePool imagePool = null; + + List> backupChains = command.getBackupChain(); + + try { + Map parentBackupPathMap = new HashMap<>(); + Map parentBackupMigrationMap = new HashMap<>(); + + Map backupPathMap = new HashMap<>(); + Map backupMigrationMap = new HashMap<>(); + + for (List chain : backupChains) { + long lastBackupId = 0; + boolean backupWasMigrated = false; + + backupPathMap.clear(); + backupMigrationMap.clear(); + + for (DataTO backup : chain) { + lastBackupId = backup.getId(); + + imagePoolUrl = backup.getDataStore().getUrl(); + imagePool = storagePoolManager.getStoragePoolByURI(imagePoolUrl); + imagePools.add(imagePool); + + String volumeId = backup.getPath().split("/")[2]; + String resourceCurrentPath = imagePool.getLocalPathFor(backup.getPath()); + String resourceParentPath = parentBackupPathMap.get(volumeId); + + if (imagePoolUrl.equals(srcDataStore.getUrl())) { + backupPathMap.put(volumeId, copyResourceToDestDataStore(backup, resourceCurrentPath, destImagePool, resourceParentPath)); + backupMigrationMap.put(volumeId, true); + backupWasMigrated = true; + } else { + if (BooleanUtils.isTrue(parentBackupMigrationMap.get(volumeId))) { + backupPathMap.put(volumeId, rebaseResourceToNewParentPath(resourceCurrentPath, resourceParentPath)); + } else { + backupPathMap.put(volumeId, resourceCurrentPath); + } + backupMigrationMap.put(volumeId, false); + } + } + + parentBackupPathMap.clear(); + parentBackupPathMap.putAll(backupPathMap); + + parentBackupMigrationMap.clear(); + parentBackupMigrationMap.putAll(backupMigrationMap); + + if (backupWasMigrated) { + resourcesToUpdate.add(new Pair<>(lastBackupId, null)); + } + } + } catch (LibvirtException | QemuImgException e) { + logger.error("Exception while migrating backups [{}] to secondary storage [{}] due to: [{}].", + command.getBackupChain(), imagePool, e.getMessage(), e); + return new MigrateBetweenSecondaryStoragesCommandAnswer(command, false, "Migration of backups between secondary storages failed", resourcesToUpdate); + } finally { + for (String file : filesToRemove) { + removeResourceFromSourceDataStore(file); + } + + for (KVMStoragePool storagePool : imagePools) { + storagePoolManager.deleteStoragePool(storagePool.getType(), storagePool.getUuid()); + } + } + + return new MigrateBetweenSecondaryStoragesCommandAnswer(command, true, "success", resourcesToUpdate); + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRestoreKnibBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRestoreKnibBackupCommandWrapper.java new file mode 100644 index 000000000000..5b8f65277542 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRestoreKnibBackupCommandWrapper.java @@ -0,0 +1,119 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMStoragePool; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.storage.Storage; +import com.cloud.utils.Pair; +import org.apache.cloudstack.backup.RestoreKnibBackupAnswer; +import org.apache.cloudstack.backup.RestoreKnibBackupCommand; +import org.apache.cloudstack.storage.to.BackupDeltaTO; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.libvirt.LibvirtException; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashSet; +import java.util.Set; + +@ResourceWrapper(handles = RestoreKnibBackupCommand.class) +public class LibvirtRestoreKnibBackupCommandWrapper extends CommandWrapper { + @Override + public Answer execute(RestoreKnibBackupCommand cmd, LibvirtComputingResource resource) { + Set> backupToAndVolumeObjectPairs = cmd.getBackupAndVolumePairs(); + Set deltasToRemove = cmd.getDeltasToRemove(); + Set secondaryStorageUrls = cmd.getSecondaryStorageUrls(); + + KVMStoragePoolManager storagePoolManager = resource.getStoragePoolMgr(); + + Set secondaryStorageUuids = new HashSet<>(); + try { + KVMStoragePool secondaryStorage = mountSecondaryStorages(secondaryStorageUrls, backupToAndVolumeObjectPairs.stream().findFirst().get().first().getDataStore().getUrl(), + storagePoolManager, secondaryStorageUuids); + + restoreVolumes(backupToAndVolumeObjectPairs, secondaryStorage, storagePoolManager, cmd.isQuickRestore(), cmd.getWait() * 1000); + + deleteDeltas(deltasToRemove, storagePoolManager); + } catch (LibvirtException | QemuImgException | IOException e) { + return new RestoreKnibBackupAnswer(cmd, e, secondaryStorageUuids); + } finally { + if (!cmd.isQuickRestore()) { + for (String uuid : secondaryStorageUuids) { + storagePoolManager.deleteStoragePool(Storage.StoragePoolType.NetworkFilesystem, uuid); + } + } + } + return new RestoreKnibBackupAnswer(cmd, secondaryStorageUuids); + } + + private void restoreVolumes(Set> backupToAndVolumeObjectPairs, KVMStoragePool secondaryStorage, KVMStoragePoolManager storagePoolManager, + boolean quickRestore, int timeoutInMillis) throws LibvirtException, QemuImgException { + for (Pair backupToVolumeToPair : backupToAndVolumeObjectPairs) { + String fullBackupPath = secondaryStorage.getLocalPathFor(backupToVolumeToPair.first().getPath()); + + VolumeObjectTO volumeObjectTO = backupToVolumeToPair.second(); + PrimaryDataStoreTO primaryDataStoreTO = (PrimaryDataStoreTO) volumeObjectTO.getDataStore(); + KVMStoragePool primaryStoragePool = storagePoolManager.getStoragePool(primaryDataStoreTO.getPoolType(), primaryDataStoreTO.getUuid()); + String fullVolumePath = primaryStoragePool.getLocalPathFor(volumeObjectTO.getPath()); + + QemuImgFile backup = new QemuImgFile(fullBackupPath, QemuImg.PhysicalDiskFormat.QCOW2); + QemuImgFile volume = new QemuImgFile(fullVolumePath, QemuImg.PhysicalDiskFormat.QCOW2); + + QemuImg qemuImg = new QemuImg(timeoutInMillis); + + if (quickRestore) { + logger.info("Creating delta over old volume [{}] at [{}] with backing store stored at [{}].", volumeObjectTO.getUuid(), fullVolumePath, fullBackupPath); + qemuImg.create(volume, backup); + } else { + logger.info("Restoring volume [{}] at [{}] with backup stored at [{}].", volumeObjectTO.getUuid(), fullVolumePath, fullBackupPath); + qemuImg.convert(backup, volume); + } + } + } + + private void deleteDeltas(Set deltasToRemove, KVMStoragePoolManager storagePoolManager) throws IOException { + for (BackupDeltaTO deltaToRemove : deltasToRemove) { + PrimaryDataStoreTO primaryDataStoreTO = (PrimaryDataStoreTO) deltaToRemove.getDataStore(); + KVMStoragePool primaryStoragePool = storagePoolManager.getStoragePool(primaryDataStoreTO.getPoolType(), primaryDataStoreTO.getUuid()); + String fullDeltaPath = primaryStoragePool.getLocalPathFor(deltaToRemove.getPath()); + logger.debug("Deleting leftover delta [{}].", fullDeltaPath); + Files.deleteIfExists(Path.of(fullDeltaPath)); + } + } + + private KVMStoragePool mountSecondaryStorages(Set parentSecondaryStorageUrls, String secondaryStorageUrl, KVMStoragePoolManager storagePoolManager, Set secondaryStorageUuids) { + for (String url : parentSecondaryStorageUrls) { + KVMStoragePool pool = storagePoolManager.getStoragePoolByURI(url); + secondaryStorageUuids.add(pool.getUuid()); + } + KVMStoragePool pool = storagePoolManager.getStoragePoolByURI(secondaryStorageUrl); + secondaryStorageUuids.add(pool.getUuid()); + return pool; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertSnapshotCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertSnapshotCommandWrapper.java index 5d76d140f229..ecb7bab922a6 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertSnapshotCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertSnapshotCommandWrapper.java @@ -28,6 +28,7 @@ import com.cloud.agent.properties.AgentProperties; import com.cloud.agent.properties.AgentPropertiesFileHandler; +import com.cloud.storage.Storage; import org.apache.cloudstack.storage.command.RevertSnapshotCommand; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.SnapshotObjectTO; @@ -58,6 +59,8 @@ import org.apache.cloudstack.utils.qemu.QemuImgFile; import org.libvirt.LibvirtException; +import static com.cloud.hypervisor.kvm.storage.KVMStorageProcessor.poolTypesToDeleteChainInfo; + @ResourceWrapper(handles = RevertSnapshotCommand.class) public class LibvirtRevertSnapshotCommandWrapper extends CommandWrapper { @@ -128,7 +131,7 @@ public Answer execute(final RevertSnapshotCommand command, final LibvirtComputin return new Answer(command, false, result); } } else { - revertVolumeToSnapshot(secondaryStoragePool, snapshotOnPrimaryStorage, snapshot, primaryPool, libvirtComputingResource); + revertVolumeToSnapshot(secondaryStoragePool, snapshotOnPrimaryStorage, snapshot, primaryPool, libvirtComputingResource, command.isDeleteChain()); } } @@ -161,7 +164,7 @@ protected String getFullPathAccordingToStorage(KVMStoragePool kvmStoragePool, St * Reverts the volume to the snapshot. */ protected void revertVolumeToSnapshot(KVMStoragePool kvmStoragePoolSecondary, SnapshotObjectTO snapshotOnPrimaryStorage, SnapshotObjectTO snapshotOnSecondaryStorage, - KVMStoragePool kvmStoragePoolPrimary, LibvirtComputingResource resource) { + KVMStoragePool kvmStoragePoolPrimary, LibvirtComputingResource resource, boolean deleteChain) { VolumeObjectTO volumeObjectTo = snapshotOnSecondaryStorage.getVolume(); String volumePath = getFullPathAccordingToStorage(kvmStoragePoolPrimary, volumeObjectTo.getPath()); @@ -178,6 +181,11 @@ protected void revertVolumeToSnapshot(KVMStoragePool kvmStoragePoolSecondary, Sn try { replaceVolumeWithSnapshot(volumePath, snapshotPath); + if (volumeObjectTo.getChainInfo() != null && poolTypesToDeleteChainInfo.contains(kvmStoragePoolPrimary.getType()) && + volumeObjectTo.getFormat() == Storage.ImageFormat.QCOW2 && deleteChain) { + logger.debug("Deleting leftover backup delta at [{}].", volumeObjectTo.getChainInfo()); + kvmStoragePoolPrimary.deletePhysicalDisk(volumeObjectTo.getChainInfo(), volumeObjectTo.getFormat()); + } logger.debug(String.format("Successfully reverted volume [%s] to snapshot [%s].", volumeObjectTo, snapshotToPrint)); } catch (LibvirtException | QemuImgException ex) { throw new CloudRuntimeException(String.format("Unable to revert volume [%s] to snapshot [%s] due to [%s].", volumeObjectTo, snapshotToPrint, ex.getMessage()), ex); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtTakeKnibBackupCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtTakeKnibBackupCommandWrapper.java new file mode 100644 index 000000000000..8376f22d98b3 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtTakeKnibBackupCommandWrapper.java @@ -0,0 +1,391 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.hypervisor.kvm.resource.wrapper; + +import com.cloud.agent.api.Answer; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.utils.exception.BackupException; +import org.apache.cloudstack.backup.TakeKnibBackupAnswer; +import org.apache.cloudstack.storage.to.BackupDeltaTO; +import org.apache.cloudstack.storage.to.DeltaMergeTreeTO; +import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; +import com.cloud.hypervisor.kvm.storage.KVMStoragePool; +import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.resource.CommandWrapper; +import com.cloud.resource.ResourceWrapper; +import com.cloud.utils.Pair; +import org.apache.cloudstack.backup.TakeKnibBackupCommand; +import org.apache.cloudstack.storage.to.KnibTO; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.cloudstack.utils.qemu.QemuImageOptions; +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.libvirt.LibvirtException; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; + +@ResourceWrapper(handles = TakeKnibBackupCommand.class) +public class LibvirtTakeKnibBackupCommandWrapper extends CommandWrapper { + @Override + public Answer execute(TakeKnibBackupCommand command, LibvirtComputingResource resource) { + String vmName = command.getVmName(); + logger.info("Starting backup process for VM [{}].", vmName); + List knibTOs = command.getKnibTOs(); + List volumeObjectTOs = knibTOs.stream().map(KnibTO::getVolumeObjectTO).collect(Collectors.toList()); + + Map> mapVolumeUuidToDeltaSizeAndNewVolumePath; + Map> mapVolumeUuidToDeltaPathOnSecondaryAndDeltaSize = new HashMap<>(); + Map mapVolumeUuidToNewVolumePath = new HashMap<>(); + + KVMStoragePoolManager storagePoolManager = resource.getStoragePoolMgr(); + boolean runningVM = command.isRunningVM(); + + try { + if (runningVM) { + mapVolumeUuidToDeltaSizeAndNewVolumePath = resource.createDiskOnlyVmSnapshotForRunningVm(volumeObjectTOs, vmName, UUID.randomUUID().toString(), command.isQuiesceVm()); + } else { + mapVolumeUuidToDeltaSizeAndNewVolumePath = resource.createDiskOnlyVMSnapshotOfStoppedVm(volumeObjectTOs, vmName); + } + + backupVolumes(command, resource, storagePoolManager, knibTOs, mapVolumeUuidToDeltaSizeAndNewVolumePath, volumeObjectTOs, vmName, runningVM, mapVolumeUuidToDeltaPathOnSecondaryAndDeltaSize); + + cleanupVm(command, resource, knibTOs, mapVolumeUuidToDeltaSizeAndNewVolumePath, vmName, runningVM, mapVolumeUuidToNewVolumePath); + } catch (BackupException ex) { + return new TakeKnibBackupAnswer(command, ex); + } + + return new TakeKnibBackupAnswer(command, true, mapVolumeUuidToNewVolumePath, mapVolumeUuidToDeltaPathOnSecondaryAndDeltaSize); + } + + /** + * Backup (copy) volumes to secondary storage. Will also populate the mapVolumeUuidToDeltaPathOnSecondaryAndDeltaSize argument. + * The timeout for this method is guided by the wait time for the given command, if the wait time is bigger than 24 days, there will be an overflow on the timeout. + *
+ * If an exception is caught while copying the volumes, will try to recover the VM to the previous state so that it is consistent. + * */ + private void backupVolumes(TakeKnibBackupCommand command, LibvirtComputingResource resource, KVMStoragePoolManager storagePoolManager, List knibTOs, + Map> mapVolumeUuidToDeltaSizeAndNewVolumePath, List volumeObjectTOs, String vmName, boolean runningVM, + Map> mapVolumeUuidToDeltaPathOnSecondaryAndDeltaSize) { + try { + int maxWaitInMillis = command.getWait() * 1000; + for (KnibTO knibTO : knibTOs) { + long startTimeMillis = System.currentTimeMillis(); + VolumeObjectTO volumeObjectTO = knibTO.getVolumeObjectTO(); + String volumeUuid = volumeObjectTO.getUuid(); + + logger.debug("Backing up volume [{}].", volumeUuid); + Pair deltaPathOnSecondaryAndSize = copyBackupDeltaToSecondary(storagePoolManager, knibTO, command.getBackupChainImageStoreUrls(), + command.getImageStoreUrl(), maxWaitInMillis); + + mapVolumeUuidToDeltaPathOnSecondaryAndDeltaSize.put(volumeUuid, deltaPathOnSecondaryAndSize); + maxWaitInMillis = calculateRemainingTime(maxWaitInMillis, startTimeMillis); + } + } catch (Exception ex) { + recoverPreviousVmStateAndDeletePartialBackup(resource, volumeObjectTOs, mapVolumeUuidToDeltaSizeAndNewVolumePath, vmName, runningVM, + mapVolumeUuidToDeltaPathOnSecondaryAndDeltaSize, storagePoolManager, command.getImageStoreUrl()); + throw new BackupException(String.format("There was an exception during the backup process for VM [%s], but the VM has been successfully normalized.", vmName), + ex, true); + } + } + + private int calculateRemainingTime(int maxWaitInMillis, long startTimeMillis) throws TimeoutException { + maxWaitInMillis -= (int)(System.currentTimeMillis() - startTimeMillis); + if (maxWaitInMillis < 0) { + throw new TimeoutException("Timeout while converting backups to secondary storage."); + } + return maxWaitInMillis; + } + + /** + * For each KnibTO, will merge its DeltaMergeTreeTO (if it exists). Also, if this is the end of the chain, will also end the chain for the volume. + * Will populate the mapVolumeUuidToNewVolumePath argument. + * */ + private void cleanupVm(TakeKnibBackupCommand command, LibvirtComputingResource resource, List knibTOs, + Map> mapVolumeUuidToDeltaSizeAndNewVolumePath, String vmName, boolean runningVM, Map mapVolumeUuidToNewVolumePath) { + for (KnibTO knibTO : knibTOs) { + VolumeObjectTO volumeObjectTO = knibTO.getVolumeObjectTO(); + String currentVolumePath = volumeObjectTO.getPath(); + String volumeUuid = volumeObjectTO.getUuid(); + DeltaMergeTreeTO deltaMergeTreeTO = knibTO.getDeltaMergeTreeTO(); + volumeObjectTO.setPath(mapVolumeUuidToDeltaSizeAndNewVolumePath.get(volumeUuid).second()); + + if (deltaMergeTreeTO != null) { + List snapshotDataStoreVos = knibTO.getVmSnapshotDeltaPaths(); + mergeBackupDelta(resource, deltaMergeTreeTO, volumeObjectTO, vmName, runningVM, volumeUuid, snapshotDataStoreVos.isEmpty()); + } + + if (command.isEndChain() || command.isIsolated()) { + String baseVolumePath = currentVolumePath; + if (deltaMergeTreeTO != null && deltaMergeTreeTO.getChild().getPath().equals(baseVolumePath)) { + baseVolumePath = deltaMergeTreeTO.getParent().getPath(); + } + endChainForVolume(resource, volumeObjectTO, vmName, runningVM, volumeUuid, baseVolumePath); + mapVolumeUuidToNewVolumePath.put(volumeUuid, baseVolumePath); + } else { + mapVolumeUuidToNewVolumePath.put(volumeUuid, mapVolumeUuidToDeltaSizeAndNewVolumePath.get(volumeUuid).second()); + } + } + } + + /** + * Copy the backup delta to the secondary storage. Since we created a snapshot on top of the volume, the volume is now the backup delta. + * If there were snapshots created after the last backup, they'll be copied alongside and merged in the secondary storage. + * */ + private Pair copyBackupDeltaToSecondary(KVMStoragePoolManager storagePoolManager, KnibTO knibTO, List chainImageStoreUrls, String imageStoreUrl, + int waitInMillis) { + VolumeObjectTO delta = knibTO.getVolumeObjectTO(); + String parentDeltaPathOnSecondary = knibTO.getPathBackupParentOnSecondary(); + List deltaPathsToCopy = knibTO.getVmSnapshotDeltaPaths(); + deltaPathsToCopy.add(delta.getPath()); + + KVMStoragePool parentImagePool = null; + List chainImagePools = null; + KVMStoragePool imagePool = null; + long backupSize; + final String backupOnSecondary = getRelativePathOnSecondaryForBackup(delta.getAccountId(), delta.getVolumeId(), UUID.randomUUID().toString()); + ArrayList temporaryDeltasToRemove = new ArrayList<>(); + boolean result = false; + try { + imagePool = storagePoolManager.getStoragePoolByURI(imageStoreUrl); + if (chainImageStoreUrls != null) { + parentImagePool = storagePoolManager.getStoragePoolByURI(chainImageStoreUrls.get(0)); + chainImagePools = chainImageStoreUrls.subList(1, chainImageStoreUrls.size()).stream().map(storagePoolManager::getStoragePoolByURI).collect(Collectors.toList()); + } + + PrimaryDataStoreTO primaryDataStoreTO = (PrimaryDataStoreTO) delta.getDataStore(); + KVMStoragePool primaryPool = storagePoolManager.getStoragePool(primaryDataStoreTO.getPoolType(), primaryDataStoreTO.getUuid()); + + String topDelta = backupOnSecondary; + while (!deltaPathsToCopy.isEmpty()) { + String backupDeltaFullPathOnSecondary = imagePool.getLocalPathFor(topDelta); + temporaryDeltasToRemove.add(backupDeltaFullPathOnSecondary); + String parentBackupFullPath = null; + + if (parentDeltaPathOnSecondary != null) { + parentBackupFullPath = parentImagePool.getLocalPathFor(parentDeltaPathOnSecondary); + } + + String backupDeltaFullPathOnPrimary = primaryPool.getLocalPathFor(deltaPathsToCopy.remove(0)); + convertDeltaToSecondary(backupDeltaFullPathOnPrimary, backupDeltaFullPathOnSecondary, parentBackupFullPath, delta.getUuid(), waitInMillis); + + if (!deltaPathsToCopy.isEmpty()) { + parentDeltaPathOnSecondary = topDelta; + topDelta = getRelativePathOnSecondaryForBackup(delta.getAccountId(), delta.getVolumeId(), UUID.randomUUID().toString()); + parentImagePool = imagePool; + } + } + + String backupOnSecondaryFullPath = imagePool.getLocalPathFor(backupOnSecondary); + + commitTopDeltaOnBaseBackupOnSecondaryIfNeeded(topDelta, backupOnSecondary, imagePool, backupOnSecondaryFullPath, waitInMillis); + + backupSize = Files.size(Path.of(backupOnSecondaryFullPath)); + result = true; + } catch (LibvirtException | QemuImgException | IOException e) { + logger.error("Exception while converting backup [{}] to secondary storage [{}] due to: [{}].", delta.getPath(), imagePool, e.getMessage(), e); + throw new BackupException("Exception while converting backup to secondary storage.", e, true); + } finally { + removeTemporaryDeltas(temporaryDeltasToRemove, result); + + if (parentImagePool != null) { + storagePoolManager.deleteStoragePool(parentImagePool.getType(), parentImagePool.getUuid()); + } + if (chainImagePools != null) { + chainImagePools.forEach(pool -> storagePoolManager.deleteStoragePool(pool.getType(), pool.getUuid())); + } + if (imagePool != null) { + storagePoolManager.deleteStoragePool(imagePool.getType(), imagePool.getUuid()); + } + } + return new Pair<>(backupOnSecondary, backupSize); + } + + /** + * If there were VM snapshots created after the last backup, we will have copied them alongside the backup delta. If this is the case, we will commit all of them into a single + * base file so that we are left with one file per volume per backup. + * */ + private void commitTopDeltaOnBaseBackupOnSecondaryIfNeeded(String topDelta, String backupOnSecondary, KVMStoragePool imagePool, String backupOnSecondaryFullPath, + int waitInMillis) throws LibvirtException, QemuImgException { + if (topDelta.equals(backupOnSecondary)) { + return; + } + + QemuImg qemuImg = new QemuImg(waitInMillis); + QemuImgFile topDeltaImg = new QemuImgFile(imagePool.getLocalPathFor(topDelta), QemuImg.PhysicalDiskFormat.QCOW2); + QemuImgFile baseDeltaImg = new QemuImgFile(backupOnSecondaryFullPath, QemuImg.PhysicalDiskFormat.QCOW2); + + logger.debug("Committing top delta [{}] on base delta [{}].", topDeltaImg, baseDeltaImg); + qemuImg.commit(topDeltaImg, baseDeltaImg, true); + } + + /** + * Will remove any temporary deltas created on secondary storage. If result is true, this means that the backup was a success and the first "temporary delta" is our backup, so + * it will not be removed. + *
+ * There are two uses for this method:
+ * - If we fail to backup we have to clean up the secondary storage.
+ * - If we had VM snapshots created after the last backup, we copied multiple files to secondary storage, and thus we have to clean them up after merging them. + * */ + private void removeTemporaryDeltas(List temporaryDeltasToRemove, boolean result) { + if (result) { + temporaryDeltasToRemove.remove(0); + } + logger.debug("Removing temporary deltas [{}].", temporaryDeltasToRemove); + for (String delta : temporaryDeltasToRemove) { + try { + Files.deleteIfExists(Path.of(delta)); + } catch (IOException ex) { + logger.error("Failed to remove temporary delta [{}]. Will not stop the backup process, but this should be investigated.", delta, ex); + } + } + } + + /** + * Converts a delta from primary storage to secondary storage, if a parent was given, will set it as the backing file for the delta being copied. + * + * @param pathDeltaOnPrimary absolute path of the delta to be copied. + * @param pathDeltaOnSecondary absolute path of the destination of the delta to be copied. + * @param pathParentOnSecondary absolute path of the parent delta, if it exists. + * @param volumeUuid volume uuid, used for logging. + * @param waitInMillis timeout in milliseconds. + * */ + private void convertDeltaToSecondary(String pathDeltaOnPrimary, String pathDeltaOnSecondary, String pathParentOnSecondary, String volumeUuid, int waitInMillis) + throws QemuImgException, LibvirtException { + QemuImgFile backupDestination = new QemuImgFile(pathDeltaOnSecondary, QemuImg.PhysicalDiskFormat.QCOW2); + QemuImgFile backupOrigin = new QemuImgFile(pathDeltaOnPrimary, QemuImg.PhysicalDiskFormat.QCOW2); + QemuImgFile parentBackup = null; + + if (pathParentOnSecondary != null) { + parentBackup = new QemuImgFile(pathParentOnSecondary, QemuImg.PhysicalDiskFormat.QCOW2); + } + + logger.debug("Converting delta [{}] to [{}] with {}", backupOrigin, backupDestination, parentBackup == null ? "no parent." : String.format("parent [%s].", parentBackup)); + + createDirsIfNeeded(pathDeltaOnSecondary, volumeUuid); + + QemuImg qemuImg = new QemuImg(waitInMillis); + qemuImg.convert(backupOrigin, backupDestination, parentBackup, null, null, new QemuImageOptions(backupOrigin.getFormat(), backupOrigin.getFileName(), null), null, + true, false, false, false, null, null); + } + + + private void endChainForVolume(LibvirtComputingResource resource, VolumeObjectTO volumeObjectTO, String vmName, boolean isVmRunning, String volumeUuid, String baseVolumePath) + throws BackupException { + + BackupDeltaTO baseVolume = new BackupDeltaTO(volumeObjectTO.getDataStore(), Hypervisor.HypervisorType.KVM, baseVolumePath); + DeltaMergeTreeTO deltaMergeTreeTO = new DeltaMergeTreeTO(volumeObjectTO, baseVolume, volumeObjectTO, new ArrayList<>()); + + logger.debug("Ending backup chain for volume [{}], the next backup will be a full backup.", volumeObjectTO.getUuid()); + + mergeBackupDelta(resource, deltaMergeTreeTO, volumeObjectTO, vmName, isVmRunning, volumeUuid, false); + } + + /** + * Tries to recover the previous state of the VM. Should only be called if an exception in the backup creation process happened.
+ * For each volume, will:
+ * - Merge back any backup deltas created; + * - Remove the data backed up to the secondary storage; + * */ + private void recoverPreviousVmStateAndDeletePartialBackup(LibvirtComputingResource resource, List volumeObjectTos, + Map> mapVolumeUuidToDeltaSizeAndNewVolumePath, String vmName, boolean runningVm, + Map> mapVolumeUuidToDeltaPathOnSecondaryAndSize, KVMStoragePoolManager storagePoolManager, String imageStoreUrl) { + logger.error("There has been an exception during the backup creation process. We will try to revert the VM [{}] to its previous state.", vmName); + + for (VolumeObjectTO volumeObjectTO : volumeObjectTos) { + String volumeUuid = volumeObjectTO.getUuid(); + + BackupDeltaTO oldDelta = new BackupDeltaTO(volumeObjectTO.getDataStore(), Hypervisor.HypervisorType.KVM, volumeObjectTO.getPath()); + volumeObjectTO.setPath(mapVolumeUuidToDeltaSizeAndNewVolumePath.get(volumeUuid).second()); + DeltaMergeTreeTO deltaMergeTreeTO = new DeltaMergeTreeTO(volumeObjectTO, oldDelta, volumeObjectTO, new ArrayList<>()); + + mergeBackupDelta(resource, deltaMergeTreeTO, volumeObjectTO, vmName, runningVm, volumeUuid, false); + + Pair deltaPathOnSecondaryAndSize = mapVolumeUuidToDeltaPathOnSecondaryAndSize.get(volumeUuid); + if (deltaPathOnSecondaryAndSize == null) { + continue; + } + + cleanupDeltaOnSecondary(storagePoolManager, imageStoreUrl, deltaPathOnSecondaryAndSize.first()); + } + } + + private void cleanupDeltaOnSecondary(KVMStoragePoolManager storagePoolManager, String imageStoreUrl, String deltaPath) { + KVMStoragePool imagePool = null; + + try { + imagePool = storagePoolManager.getStoragePoolByURI(imageStoreUrl); + String fullDeltaPath = imagePool.getLocalPathFor(deltaPath); + + logger.debug("Cleaning up delta at [{}] as part of the post backup error normalization effort.", fullDeltaPath); + + Files.deleteIfExists(Path.of(fullDeltaPath)); + } catch (IOException e) { + logger.error("Exception while trying to cleanup delta at [{}].", deltaPath, e); + } finally { + if (imagePool != null) { + storagePoolManager.deleteStoragePool(imagePool.getType(), imagePool.getUuid()); + } + } + } + + + private void mergeBackupDelta(LibvirtComputingResource resource, DeltaMergeTreeTO deltaMergeTreeTO, VolumeObjectTO volumeObjectTO, String vmName, boolean isVmRunning, + String volumeUuid, boolean countNewestDeltaAsGrandchild) throws BackupException { + try { + if (isVmRunning) { + resource.mergeDeltaForRunningVm(deltaMergeTreeTO, vmName, volumeObjectTO); + } else { + if (countNewestDeltaAsGrandchild) { + deltaMergeTreeTO.addGrandChild(volumeObjectTO); + } + resource.mergeDeltaForStoppedVm(deltaMergeTreeTO); + } + } catch (LibvirtException | QemuImgException | IOException e) { + logger.error("Exception while merging the last backup delta using delta merge tree [{}] for VM [{}] and volume [{}].", deltaMergeTreeTO, vmName, volumeUuid, e); + throw new BackupException(String.format("Exception during backup wrap-up phase for VM [%s].", vmName), e, false); + } + } + + private String getRelativePathOnSecondaryForBackup(long accountId, long volumeId, String backupPath) { + return String.format("%s%s%s%s%s%s%s", "backups", File.separator, accountId, File.separator, volumeId, File.separator, backupPath); + } + + private void createDirsIfNeeded(String deltaFullPath, String volumeUuid) { + String dirs = deltaFullPath.substring(0, deltaFullPath.lastIndexOf(File.separator)); + try { + Files.createDirectories(Path.of(dirs)); + } catch (IOException e) { + throw new BackupException(String.format("Error while creating directories for backup of volume [%s].", volumeUuid), e, true); + } + } + +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java index 6665cf625e2f..24fb472ad4a5 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java @@ -269,10 +269,10 @@ public boolean disconnectPhysicalDisksViaVmSpec(VirtualMachineTO vmSpec) { } public KVMStoragePool getStoragePool(StoragePoolType type, String uuid) { - return this.getStoragePool(type, uuid, false); + return this.getStoragePool(type, uuid, false, true); } - public KVMStoragePool getStoragePool(StoragePoolType type, String uuid, boolean refreshInfo) { + public synchronized KVMStoragePool getStoragePool(StoragePoolType type, String uuid, boolean refreshInfo, boolean addDetails) { StorageAdaptor adaptor = getStorageAdaptor(type); KVMStoragePool pool = null; @@ -287,7 +287,7 @@ public KVMStoragePool getStoragePool(StoragePoolType type, String uuid, boolean } } - if (pool instanceof LibvirtStoragePool) { + if (pool instanceof LibvirtStoragePool && addDetails) { addPoolDetails(uuid, (LibvirtStoragePool) pool); } @@ -408,7 +408,7 @@ public boolean disconnectPhysicalDisk(StoragePoolType type, String poolUuid, Str return adaptor.disconnectPhysicalDisk(volPath, pool); } - public boolean deleteStoragePool(StoragePoolType type, String uuid) { + public synchronized boolean deleteStoragePool(StoragePoolType type, String uuid) { StorageAdaptor adaptor = getStorageAdaptor(type); if (type == StoragePoolType.NetworkFilesystem) { _haMonitor.removeStoragePool(uuid); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index 030d9747d6cd..06c9a15d3699 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -81,6 +81,7 @@ import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand; import org.apache.cloudstack.storage.command.SyncVolumePathCommand; import org.apache.cloudstack.storage.formatinspector.Qcow2Inspector; +import org.apache.cloudstack.storage.to.BackupDeltaTO; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; @@ -223,6 +224,7 @@ public class KVMStorageProcessor implements StorageProcessor { " \n" + ""; + public static final List poolTypesToDeleteChainInfo = Arrays.asList(StoragePoolType.Filesystem, StoragePoolType.NetworkFilesystem, StoragePoolType.SharedMountPoint); public KVMStorageProcessor(final KVMStoragePoolManager storagePoolMgr, final LibvirtComputingResource resource) { this.storagePoolMgr = storagePoolMgr; @@ -2245,7 +2247,7 @@ private SnapshotObjectTO takeFullVolumeSnapshotOfRunningVm(CreateObjectCommand c String convertResult = convertBaseFileToSnapshotFileInStorageDir(ObjectUtils.defaultIfNull(secondaryPool, primaryPool), disk, snapshotPath, directoryPath, volume, cmd.getWait()); - resource.mergeSnapshotIntoBaseFile(vm, diskLabel, diskPath, null, true, snapshotName, volume, conn); + resource.mergeDeltaIntoBaseFile(vm, diskLabel, diskPath, null, true, snapshotName, volume, conn); validateConvertResult(convertResult, snapshotPath); } catch (LibvirtException e) { @@ -2654,6 +2656,10 @@ public Answer deleteVolume(final DeleteCommand cmd) { return new Answer(null); } pool.deletePhysicalDisk(vol.getPath(), vol.getFormat()); + if (vol.getChainInfo() != null && poolTypesToDeleteChainInfo.contains(pool.getType()) && vol.getFormat() == ImageFormat.QCOW2 && cmd.isDeleteChain()) { + logger.debug("Deleting leftover backup delta at [{}].", vol.getChainInfo()); + pool.deletePhysicalDisk(vol.getChainInfo(), vol.getFormat()); + } return new Answer(null); } catch (final CloudRuntimeException e) { logger.debug("Failed to delete volume: ", e); @@ -3204,6 +3210,20 @@ public Answer syncVolumePath(SyncVolumePathCommand cmd) { return new Answer(cmd, false, "Not currently applicable for KVMStorageProcessor"); } + @Override + public Answer deleteBackup(DeleteCommand cmd) { + BackupDeltaTO delta = (BackupDeltaTO)cmd.getData(); + logger.debug("Deleting backup delta [{}].", delta); + PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)delta.getDataStore(); + KVMStoragePool pool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid()); + try { + pool.deletePhysicalDisk(delta.getPath(), delta.getFormat()); + } catch (CloudRuntimeException e) { + return new Answer(cmd, e); + } + return new Answer(cmd); + } + /** * Determine if migration is using host-local source pool. If so, return this host's storage as the template source, * rather than remote host's diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java index a03daeb197bf..80a34e92caa5 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java @@ -1619,8 +1619,9 @@ to support snapshots(backuped) as qcow2 files. */ destFile = new QemuImgFile(destPath, destFormat); try { boolean isQCOW2 = PhysicalDiskFormat.QCOW2.equals(sourceFormat); - qemu.convert(srcFile, destFile, null, null, new QemuImageOptions(srcFile.getFormat(), srcFile.getFileName(), null), - null, false, isQCOW2); + qemu.convert(srcFile, destFile, null, null, null, new QemuImageOptions(srcFile.getFormat(), srcFile.getFileName(), null), + null, false, isQCOW2, false, + false, null, null); Map destInfo = qemu.info(destFile); Long virtualSize = Long.parseLong(destInfo.get(QemuImg.VIRTUAL_SIZE)); newDisk.setVirtualSize(virtualSize); diff --git a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImg.java b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImg.java index 1fec561dc890..08608cb770a9 100644 --- a/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImg.java +++ b/plugins/hypervisors/kvm/src/main/java/org/apache/cloudstack/utils/qemu/QemuImg.java @@ -55,6 +55,7 @@ public class QemuImg { public static final String PREALLOCATION = "preallocation"; public static final long QEMU_2_10 = 2010000; public static final long QEMU_5_10 = 5010000; + public static final long QEMU_5_20 = 5020000; public static final int MIN_BITMAP_VERSION = 3; @@ -392,7 +393,7 @@ public void convert(final QemuImgFile srcFile, final QemuImgFile destFile, */ public void convert(final QemuImgFile srcFile, final QemuImgFile destFile, final Map options, final List qemuObjects, final QemuImageOptions srcImageOpts, final String snapshotName, final boolean forceSourceFormat) throws QemuImgException { - convert(srcFile, destFile, options, qemuObjects, srcImageOpts, snapshotName, forceSourceFormat, false); + convert(srcFile, destFile, null, options, qemuObjects, srcImageOpts, snapshotName, forceSourceFormat, false, false, false, null, null); } protected Map getResizeOptionsFromConvertOptions(final Map options) { @@ -408,31 +409,41 @@ protected Map getResizeOptionsFromConvertOptions(final Map * This method is a facade for 'qemu-img convert' and converts a disk image or snapshot into a disk image with the specified filename and format. * * @param srcFile - * The source file. + * The source file. * @param destFile - * The destination file. + * The destination file. + * @param backingFile + * The destination's backing file. * @param options - * Options for the conversion. Takes a Map with key value - * pairs which are passed on to qemu-img without validation. + * Options for the conversion. Takes a Map with key value + * pairs which are passed on to qemu-img without validation. * @param qemuObjects - * Pass qemu Objects to create - see objects in the qemu main page. + * Pass qemu Objects to create - see objects in the qemu main page. * @param srcImageOpts - * pass qemu --image-opts to convert. + * pass qemu --image-opts to convert. * @param snapshotName - * If it is provided, conversion uses it as parameter. + * If it is provided, conversion uses it as parameter. * @param forceSourceFormat - * If true, specifies the source format in the conversion command. + * If true, specifies the source format in the conversion command. * @param keepBitmaps - * If true, copies the bitmaps to the destination image. + * If true, copies the bitmaps to the destination image. + * @param outOfOrderWrites + * If true, inform -W to convert + * @param compress + * If true, inform -c to convert + * @param coroutines + * If not null, inform -m and number of coroutines. By default, qemu uses 8 coroutines. + * @param rateLimit + * If not null, inform -r and rate limit in MB/s. By default, qemu does not limit the convert rate. * @return void */ - public void convert(final QemuImgFile srcFile, final QemuImgFile destFile, - final Map options, final List qemuObjects, final QemuImageOptions srcImageOpts, final String snapshotName, final boolean forceSourceFormat, - boolean keepBitmaps) throws QemuImgException { + public void convert(final QemuImgFile srcFile, final QemuImgFile destFile, QemuImgFile backingFile, final Map options, final List qemuObjects, + final QemuImageOptions srcImageOpts, final String snapshotName, final boolean forceSourceFormat, boolean keepBitmaps, boolean outOfOrderWrites, boolean compress, + Integer coroutines, Integer rateLimit) throws QemuImgException { Script script = new Script(_qemuImgPath, timeout); if (StringUtils.isNotBlank(snapshotName)) { String qemuPath = Script.runSimpleBashScript(getQemuImgPathScript); @@ -455,9 +466,28 @@ public void convert(final QemuImgFile srcFile, final QemuImgFile destFile, script.add("-O"); script.add(destFile.getFormat().toString()); + addBackingFileToConvertCommand(script, backingFile); addScriptOptionsFromMap(options, script); addSnapshotToConvertCommand(srcFile.getFormat().toString(), snapshotName, forceSourceFormat, script, version); + if (outOfOrderWrites) { + script.add("-W"); + } + + if (rateLimit != null) { + script.add("-r"); + script.add(rateLimit + "M"); + } + + if (coroutines != null) { + script.add("-m"); + script.add(String.valueOf(coroutines)); + } + + if (compress) { + script.add("-c"); + } + if (noCache) { script.add("-t"); script.add("none"); @@ -500,6 +530,23 @@ public void convert(final QemuImgFile srcFile, final QemuImgFile destFile, } } + + protected void addBackingFileToConvertCommand(Script script, QemuImgFile backingFile) { + if (backingFile == null) { + return; + } + + script.add("-o"); + + String opts; + if (backingFile.getFormat() == null) { + opts = String.format("backing_file=%s", backingFile.getFileName()); + } else { + opts = String.format("backing_file=%s,backing_fmt=%s", backingFile.getFileName(), backingFile.getFormat().toString()); + } + script.add(opts); + } + /** * Qemu version 2.0.0 added (via commit ef80654d0dc1edf2dd2a51feff8cc3e1102a6583) the * flag "-l" to inform the snapshot name or ID @@ -873,9 +920,6 @@ public void commit(QemuImgFile file, QemuImgFile base, boolean skipEmptyingFiles final Script s = new Script(_qemuImgPath, timeout); s.add("commit"); - if (skipEmptyingFiles) { - s.add("-d"); - } if (file.getFormat() != null) { s.add("-f"); @@ -885,6 +929,8 @@ public void commit(QemuImgFile file, QemuImgFile base, boolean skipEmptyingFiles if (base != null) { s.add("-b"); s.add(base.getFileName()); + } else if (skipEmptyingFiles) { + s.add("-d"); } s.add(file.getFileName()); @@ -1006,4 +1052,9 @@ private void removeBitmap(QemuImgFile srcFile, String bitmapName) throws QemuImg throw new QemuImgException(String.format("Exception while removing bitmap [%s] from file [%s]. Result is [%s].", srcFile.getFileName(), bitmapName, result)); } } + + public long getVersion() { + return this.version; + } + } diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java index cde87fd93842..9df2c47a3408 100644 --- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java @@ -52,7 +52,6 @@ import java.util.Random; import java.util.UUID; import java.util.Vector; -import java.util.concurrent.Semaphore; import javax.naming.ConfigurationException; import javax.xml.parsers.DocumentBuilderFactory; @@ -2406,7 +2405,7 @@ public void testGetStorageStatsCommand() { final KVMStoragePool secondaryPool = Mockito.mock(KVMStoragePool.class); when(libvirtComputingResourceMock.getStoragePoolMgr()).thenReturn(storagePoolMgr); - when(storagePoolMgr.getStoragePool(command.getPooltype(), command.getStorageId(), true)).thenReturn(secondaryPool); + when(storagePoolMgr.getStoragePool(command.getPooltype(), command.getStorageId(), true, true)).thenReturn(secondaryPool); final LibvirtRequestWrapper wrapper = LibvirtRequestWrapper.getInstance(); assertNotNull(wrapper); @@ -2415,7 +2414,7 @@ public void testGetStorageStatsCommand() { assertTrue(answer.getResult()); verify(libvirtComputingResourceMock, times(1)).getStoragePoolMgr(); - verify(storagePoolMgr, times(1)).getStoragePool(command.getPooltype(), command.getStorageId(), true); + verify(storagePoolMgr, times(1)).getStoragePool(command.getPooltype(), command.getStorageId(), true, true); } @SuppressWarnings("unchecked") @@ -6669,10 +6668,12 @@ public void mergeSnapshotIntoBaseFileTestActiveAndDeleteFlags() throws Exception libvirtComputingResourceSpy.qcow2DeltaMergeTimeout = 10; try (MockedStatic libvirtUtilitiesHelperMockedStatic = Mockito.mockStatic(LibvirtUtilitiesHelper.class); - MockedStatic threadContextMockedStatic = Mockito.mockStatic(ThreadContext.class)) { + MockedStatic threadContextMockedStatic = Mockito.mockStatic(ThreadContext.class); + MockedStatic agentPropertiesFileHandlerMockedStatic = Mockito.mockStatic(AgentPropertiesFileHandler.class)) { + + agentPropertiesFileHandlerMockedStatic.when(() -> AgentPropertiesFileHandler.getPropertyValue(Mockito.any())).thenAnswer(invocation -> true); libvirtUtilitiesHelperMockedStatic.when(() -> LibvirtUtilitiesHelper.isLibvirtSupportingFlagDeleteOnCommandVirshBlockcommit(Mockito.any())).thenAnswer(invocation -> true); - Mockito.doReturn(new Semaphore(1)).when(libvirtComputingResourceSpy).getSemaphoreToWaitForMerge(); threadContextMockedStatic.when(() -> ThreadContext.get(Mockito.anyString())).thenReturn("logid"); @@ -6684,7 +6685,7 @@ public void mergeSnapshotIntoBaseFileTestActiveAndDeleteFlags() throws Exception String baseFilePath = "/file"; String snapshotName = "snap"; - libvirtComputingResourceSpy.mergeSnapshotIntoBaseFileWithEventsAndConfigurableTimeout(domainMock, diskLabel, baseFilePath, null, true, snapshotName, volumeObjectToMock, connMock); + libvirtComputingResourceSpy.mergeDeltaIntoBaseFile(domainMock, diskLabel, baseFilePath, null, true, snapshotName, volumeObjectToMock, connMock); Mockito.verify(domainMock, Mockito.times(1)).blockCommit(diskLabel, baseFilePath, null, 0, Domain.BlockCommitFlags.ACTIVE | Domain.BlockCommitFlags.DELETE); Mockito.verify(libvirtComputingResourceSpy, Mockito.times(1)).manuallyDeleteUnusedSnapshotFile(true, "/" + snapshotName); @@ -6694,10 +6695,12 @@ public void mergeSnapshotIntoBaseFileTestActiveAndDeleteFlags() throws Exception @Test public void mergeSnapshotIntoBaseFileTestActiveFlag() throws Exception { try (MockedStatic libvirtUtilitiesHelperMockedStatic = Mockito.mockStatic(LibvirtUtilitiesHelper.class); - MockedStatic threadContextMockedStatic = Mockito.mockStatic(ThreadContext.class)) { + MockedStatic threadContextMockedStatic = Mockito.mockStatic(ThreadContext.class); + MockedStatic agentPropertiesFileHandlerMockedStatic = Mockito.mockStatic(AgentPropertiesFileHandler.class)) { + + agentPropertiesFileHandlerMockedStatic.when(() -> AgentPropertiesFileHandler.getPropertyValue(Mockito.any())).thenAnswer(invocation -> true); libvirtUtilitiesHelperMockedStatic.when(() -> LibvirtUtilitiesHelper.isLibvirtSupportingFlagDeleteOnCommandVirshBlockcommit(Mockito.any())).thenAnswer(invocation -> false); - Mockito.doReturn(new Semaphore(1)).when(libvirtComputingResourceSpy).getSemaphoreToWaitForMerge(); threadContextMockedStatic.when(() -> ThreadContext.get(Mockito.anyString())).thenReturn("logid"); @@ -6709,7 +6712,7 @@ public void mergeSnapshotIntoBaseFileTestActiveFlag() throws Exception { String baseFilePath = "/file"; String snapshotName = "snap"; - libvirtComputingResourceSpy.mergeSnapshotIntoBaseFileWithEventsAndConfigurableTimeout(domainMock, diskLabel, baseFilePath, null, true, snapshotName, volumeObjectToMock, connMock); + libvirtComputingResourceSpy.mergeDeltaIntoBaseFile(domainMock, diskLabel, baseFilePath, null, true, snapshotName, volumeObjectToMock, connMock); Mockito.verify(domainMock, Mockito.times(1)).blockCommit(diskLabel, baseFilePath, null, 0, Domain.BlockCommitFlags.ACTIVE); Mockito.verify(libvirtComputingResourceSpy, Mockito.times(1)).manuallyDeleteUnusedSnapshotFile(false, "/" + snapshotName); @@ -6719,10 +6722,12 @@ public void mergeSnapshotIntoBaseFileTestActiveFlag() throws Exception { @Test public void mergeSnapshotIntoBaseFileTestDeleteFlag() throws Exception { try (MockedStatic libvirtUtilitiesHelperMockedStatic = Mockito.mockStatic(LibvirtUtilitiesHelper.class); - MockedStatic threadContextMockedStatic = Mockito.mockStatic(ThreadContext.class)) { + MockedStatic threadContextMockedStatic = Mockito.mockStatic(ThreadContext.class); + MockedStatic agentPropertiesFileHandlerMockedStatic = Mockito.mockStatic(AgentPropertiesFileHandler.class)) { + + agentPropertiesFileHandlerMockedStatic.when(() -> AgentPropertiesFileHandler.getPropertyValue(Mockito.any())).thenAnswer(invocation -> true); libvirtComputingResourceSpy.qcow2DeltaMergeTimeout = 10; libvirtUtilitiesHelperMockedStatic.when(() -> LibvirtUtilitiesHelper.isLibvirtSupportingFlagDeleteOnCommandVirshBlockcommit(Mockito.any())).thenReturn(true); - Mockito.doReturn(new Semaphore(1)).when(libvirtComputingResourceSpy).getSemaphoreToWaitForMerge(); threadContextMockedStatic.when(() -> ThreadContext.get(Mockito.anyString())).thenReturn("logid"); Mockito.doNothing().when(domainMock).addBlockJobListener(Mockito.any()); Mockito.doReturn(null).when(domainMock).getBlockJobInfo(Mockito.anyString(), Mockito.anyInt()); @@ -6733,7 +6738,7 @@ public void mergeSnapshotIntoBaseFileTestDeleteFlag() throws Exception { String baseFilePath = "/file"; String snapshotName = "snap"; - libvirtComputingResourceSpy.mergeSnapshotIntoBaseFileWithEventsAndConfigurableTimeout(domainMock, diskLabel, baseFilePath, null, false, snapshotName, volumeObjectToMock, connMock); + libvirtComputingResourceSpy.mergeDeltaIntoBaseFile(domainMock, diskLabel, baseFilePath, null, false, snapshotName, volumeObjectToMock, connMock); Mockito.verify(domainMock, Mockito.times(1)).blockCommit(diskLabel, baseFilePath, null, 0, Domain.BlockCommitFlags.DELETE); Mockito.verify(libvirtComputingResourceSpy, Mockito.times(1)).manuallyDeleteUnusedSnapshotFile(true, "/" + snapshotName); @@ -6743,10 +6748,12 @@ public void mergeSnapshotIntoBaseFileTestDeleteFlag() throws Exception { @Test public void mergeSnapshotIntoBaseFileTestNoFlags() throws Exception { try (MockedStatic libvirtUtilitiesHelperMockedStatic = Mockito.mockStatic(LibvirtUtilitiesHelper.class); - MockedStatic threadContextMockedStatic = Mockito.mockStatic(ThreadContext.class)) { + MockedStatic threadContextMockedStatic = Mockito.mockStatic(ThreadContext.class); + MockedStatic agentPropertiesFileHandlerMockedStatic = Mockito.mockStatic(AgentPropertiesFileHandler.class)) { + + agentPropertiesFileHandlerMockedStatic.when(() -> AgentPropertiesFileHandler.getPropertyValue(Mockito.any())).thenAnswer(invocation -> true); libvirtComputingResourceSpy.qcow2DeltaMergeTimeout = 10; libvirtUtilitiesHelperMockedStatic.when(() -> LibvirtUtilitiesHelper.isLibvirtSupportingFlagDeleteOnCommandVirshBlockcommit(Mockito.any())).thenReturn(false); - Mockito.doReturn(new Semaphore(1)).when(libvirtComputingResourceSpy).getSemaphoreToWaitForMerge(); threadContextMockedStatic.when(() -> ThreadContext.get(Mockito.anyString())).thenReturn("logid"); Mockito.doNothing().when(domainMock).addBlockJobListener(Mockito.any()); Mockito.doReturn(null).when(domainMock).getBlockJobInfo(Mockito.anyString(), Mockito.anyInt()); @@ -6757,7 +6764,7 @@ public void mergeSnapshotIntoBaseFileTestNoFlags() throws Exception { String baseFilePath = "/file"; String snapshotName = "snap"; - libvirtComputingResourceSpy.mergeSnapshotIntoBaseFileWithEventsAndConfigurableTimeout(domainMock, diskLabel, baseFilePath, null, false, snapshotName, volumeObjectToMock, connMock); + libvirtComputingResourceSpy.mergeDeltaIntoBaseFile(domainMock, diskLabel, baseFilePath, null, false, snapshotName, volumeObjectToMock, connMock); Mockito.verify(domainMock, Mockito.times(1)).blockCommit(diskLabel, baseFilePath, null, 0, 0); Mockito.verify(libvirtComputingResourceSpy, Mockito.times(1)).manuallyDeleteUnusedSnapshotFile(false, "/" + snapshotName); @@ -6770,20 +6777,13 @@ public void mergeSnapshotIntoBaseFileTestMergeFailsThrowException() throws Excep MockedStatic threadContextMockedStatic = Mockito.mockStatic(ThreadContext.class)) { libvirtComputingResourceSpy.qcow2DeltaMergeTimeout = 10; libvirtUtilitiesHelperMockedStatic.when(() -> LibvirtUtilitiesHelper.isLibvirtSupportingFlagDeleteOnCommandVirshBlockcommit(Mockito.any())).thenReturn(false); - Mockito.doReturn(new Semaphore(1)).when(libvirtComputingResourceSpy).getSemaphoreToWaitForMerge(); threadContextMockedStatic.when(() -> ThreadContext.get(Mockito.anyString())).thenReturn("logid"); - Mockito.doNothing().when(domainMock).addBlockJobListener(Mockito.any()); - Mockito.doReturn(null).when(domainMock).getBlockJobInfo(Mockito.anyString(), Mockito.anyInt()); - Mockito.doNothing().when(domainMock).removeBlockJobListener(Mockito.any()); - - Mockito.doReturn(blockCommitListenerMock).when(libvirtComputingResourceSpy).getBlockCommitListener(Mockito.any(), Mockito.any()); - Mockito.doReturn("Failed").when(blockCommitListenerMock).getResult(); String diskLabel = "vda"; String baseFilePath = "/file"; String snapshotName = "snap"; - libvirtComputingResourceSpy.mergeSnapshotIntoBaseFileWithEventsAndConfigurableTimeout(domainMock, diskLabel, baseFilePath, null, false, snapshotName, volumeObjectToMock, connMock); + libvirtComputingResourceSpy.mergeDeltaIntoBaseFile(domainMock, diskLabel, baseFilePath, null, false, snapshotName, volumeObjectToMock, connMock); } } diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVolumesOnStorageCommandWrapperTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVolumesOnStorageCommandWrapperTest.java index 4e039f318928..f4c85aa611f4 100644 --- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVolumesOnStorageCommandWrapperTest.java +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVolumesOnStorageCommandWrapperTest.java @@ -88,7 +88,7 @@ public void setUp() { Mockito.when(pool.getUuid()).thenReturn(poolUuid); Mockito.when(pool.getType()).thenReturn(poolType); Mockito.when(libvirtComputingResource.getStoragePoolMgr()).thenReturn(storagePoolMgr); - Mockito.when(storagePoolMgr.getStoragePool(poolType, poolUuid, true)).thenReturn(storagePool); + Mockito.when(storagePoolMgr.getStoragePool(poolType, poolUuid, true, true)).thenReturn(storagePool); qemuImg = Mockito.mockConstruction(QemuImg.class, (mock, context) -> { Mockito.when(mock.info(Mockito.any(QemuImgFile.class), Mockito.eq(true))).thenReturn(qemuImgInfo); diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertSnapshotCommandWrapperTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertSnapshotCommandWrapperTest.java index cfcb2a2f972d..368f963a9a8b 100644 --- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertSnapshotCommandWrapperTest.java +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertSnapshotCommandWrapperTest.java @@ -148,8 +148,8 @@ public void validateRevertVolumeToSnapshotReplaceSuccessfully() throws LibvirtEx Mockito.doReturn(volumeObjectToMock).when(snapshotObjectToSecondaryMock).getVolume(); Mockito.doReturn(pairStringSnapshotObjectToMock).when(libvirtRevertSnapshotCommandWrapperSpy).getSnapshot(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any()); Mockito.doNothing().when(libvirtRevertSnapshotCommandWrapperSpy).replaceVolumeWithSnapshot(Mockito.any(), Mockito.any()); - libvirtRevertSnapshotCommandWrapperSpy.revertVolumeToSnapshot(kvmStoragePoolSecondaryMock, snapshotObjectToPrimaryMock, snapshotObjectToSecondaryMock, kvmStoragePoolPrimaryMock, resourceMock - ); + libvirtRevertSnapshotCommandWrapperSpy.revertVolumeToSnapshot(kvmStoragePoolSecondaryMock, snapshotObjectToPrimaryMock, snapshotObjectToSecondaryMock, kvmStoragePoolPrimaryMock, resourceMock, + false); } @Test (expected = CloudRuntimeException.class) @@ -157,8 +157,8 @@ public void validateRevertVolumeToSnapshotReplaceVolumeThrowsQemuImgException() Mockito.doReturn(volumeObjectToMock).when(snapshotObjectToSecondaryMock).getVolume(); Mockito.doReturn(pairStringSnapshotObjectToMock).when(libvirtRevertSnapshotCommandWrapperSpy).getSnapshot(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any()); Mockito.doThrow(QemuImgException.class).when(libvirtRevertSnapshotCommandWrapperSpy).replaceVolumeWithSnapshot(Mockito.any(), Mockito.any()); - libvirtRevertSnapshotCommandWrapperSpy.revertVolumeToSnapshot(kvmStoragePoolSecondaryMock, snapshotObjectToPrimaryMock, snapshotObjectToSecondaryMock, kvmStoragePoolPrimaryMock, resourceMock - ); + libvirtRevertSnapshotCommandWrapperSpy.revertVolumeToSnapshot(kvmStoragePoolSecondaryMock, snapshotObjectToPrimaryMock, snapshotObjectToSecondaryMock, kvmStoragePoolPrimaryMock, resourceMock, + false); } @Test (expected = CloudRuntimeException.class) @@ -167,6 +167,6 @@ public void validateRevertVolumeToSnapshotReplaceVolumeThrowsLibvirtException() Mockito.doReturn(pairStringSnapshotObjectToMock).when(libvirtRevertSnapshotCommandWrapperSpy).getSnapshot(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any()); Mockito.doThrow(LibvirtException.class).when(libvirtRevertSnapshotCommandWrapperSpy).replaceVolumeWithSnapshot(Mockito.any(), Mockito.any()); libvirtRevertSnapshotCommandWrapperSpy.revertVolumeToSnapshot(kvmStoragePoolSecondaryMock, snapshotObjectToPrimaryMock, snapshotObjectToSecondaryMock, kvmStoragePoolPrimaryMock, resourceMock - ); + , false); } } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java index 1a8d9f7b59e2..81128938c60c 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java @@ -41,6 +41,7 @@ import com.vmware.vim25.VirtualMachinePowerState; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.backup.Backup; +import org.apache.cloudstack.backup.BackupProvider; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; @@ -1162,7 +1163,8 @@ private ManagedObjectReference getDestStoreMor(VirtualMachineMO vmMo) throws Exc } @Override - public VirtualMachine importVirtualMachineFromBackup(long zoneId, long domainId, long accountId, long userId, String vmInternalName, Backup backup) throws Exception { + public VirtualMachine importVirtualMachineFromBackup(long zoneId, long domainId, long accountId, long userId, String vmInternalName, Backup backup, + BackupProvider backupProvider) throws Exception { logger.debug(String.format("Trying to import VM [vmInternalName: %s] from Backup [%s].", vmInternalName, ReflectionToStringBuilderUtils.reflectOnlySelectedFields(backup, "id", "uuid", "vmId", "externalId", "backupType"))); DatacenterMO dcMo = getDatacenterMO(zoneId); @@ -1191,7 +1193,8 @@ public VirtualMachine importVirtualMachineFromBackup(long zoneId, long domainId, } @Override - public boolean attachRestoredVolumeToVirtualMachine(long zoneId, String location, Backup.VolumeInfo volumeInfo, VirtualMachine vm, long poolId, Backup backup) + public boolean attachRestoredVolumeToVirtualMachine(long zoneId, String location, Backup.VolumeInfo volumeInfo, VirtualMachine vm, long poolId, Backup backup, + BackupProvider backupProvider) throws Exception { DatacenterMO dcMo = getDatacenterMO(zoneId); VirtualMachineMO vmRestored = findVM(dcMo, location); diff --git a/plugins/pom.xml b/plugins/pom.xml index e7d13871285e..35387ea850a1 100755 --- a/plugins/pom.xml +++ b/plugins/pom.xml @@ -63,6 +63,7 @@ backup/dummy backup/networker backup/nas + backup/knib ca/root-ca diff --git a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java index 5faa377ce3d3..7aed09325ecb 100644 --- a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java +++ b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java @@ -28,6 +28,7 @@ import com.cloud.agent.api.to.DiskTO; import com.cloud.storage.VolumeVO; +import org.apache.cloudstack.backup.NativeBackupService; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; @@ -132,6 +133,9 @@ public Map getCapabilities() { @Inject private VolumeOrchestrationService volumeOrchestrationService; + @Inject + private NativeBackupService nativeBackupService; + @Override public DataTO getTO(DataObject data) { return null; @@ -239,12 +243,14 @@ private boolean commandCanBypassHostMaintenance(DataObject data) { @Override public void deleteAsync(DataStore dataStore, DataObject data, AsyncCompletionCallback callback) { - DeleteCommand cmd = new DeleteCommand(data.getTO()); + DataTO dataTO = data.getTO(); + DeleteCommand cmd = new DeleteCommand(dataTO); cmd.setBypassHostMaintenance(commandCanBypassHostMaintenance(data)); CommandResult result = new CommandResult(); try { EndPoint ep; if (data.getType() == DataObjectType.VOLUME) { + nativeBackupService.configureChainInfo(dataTO, cmd); ep = epSelector.select(data, StorageAction.DELETEVOLUME); } else if (data.getType() == DataObjectType.SNAPSHOT) { ep = epSelector.select(data, StorageAction.DELETESNAPSHOT); @@ -417,7 +423,11 @@ public void revertSnapshot(SnapshotInfo snapshot, SnapshotInfo snapshotOnPrimary if (snapshotOnPrimaryStore != null) { dataOnPrimaryStorage = (SnapshotObjectTO)snapshotOnPrimaryStore.getTO(); } - RevertSnapshotCommand cmd = new RevertSnapshotCommand((SnapshotObjectTO)snapshot.getTO(), dataOnPrimaryStorage); + + SnapshotObjectTO snapshotObjectTO = (SnapshotObjectTO)snapshot.getTO(); + + RevertSnapshotCommand cmd = new RevertSnapshotCommand(snapshotObjectTO, dataOnPrimaryStorage); + nativeBackupService.configureChainInfo(snapshotObjectTO.getVolume(), cmd); CommandResult result = new CommandResult(); try { diff --git a/server/src/main/java/com/cloud/api/ApiResponseHelper.java b/server/src/main/java/com/cloud/api/ApiResponseHelper.java index 655f5acb46e3..00e8f9cbe552 100644 --- a/server/src/main/java/com/cloud/api/ApiResponseHelper.java +++ b/server/src/main/java/com/cloud/api/ApiResponseHelper.java @@ -120,6 +120,7 @@ import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.LoadBalancerResponse; import org.apache.cloudstack.api.response.ManagementServerResponse; +import org.apache.cloudstack.api.response.NativeBackupOfferingResponse; import org.apache.cloudstack.api.response.NetworkACLItemResponse; import org.apache.cloudstack.api.response.NetworkACLResponse; import org.apache.cloudstack.api.response.NetworkOfferingResponse; @@ -189,6 +190,7 @@ import org.apache.cloudstack.backup.BackupOffering; import org.apache.cloudstack.backup.BackupRepository; import org.apache.cloudstack.backup.BackupSchedule; +import org.apache.cloudstack.backup.NativeBackupOffering; import org.apache.cloudstack.backup.dao.BackupOfferingDao; import org.apache.cloudstack.backup.dao.BackupRepositoryDao; import org.apache.cloudstack.config.Configuration; @@ -5707,4 +5709,10 @@ public ConsoleSessionResponse createConsoleSessionResponse(ConsoleSession consol consoleSessionResponse.setObjectName("consolesession"); return consoleSessionResponse; } + + @Override + public NativeBackupOfferingResponse createNativeBackupOfferingResponse(NativeBackupOffering offering) { + return new NativeBackupOfferingResponse(offering.getUuid(), offering.getName(), offering.isCompress(), offering.isValidate(), offering.isAllowQuickRestore(), + offering.isAllowExtractFile(), offering.getBackupChainSize(), offering.getCreated(), offering.getRemoved()); + } } diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index 0cec3a38075d..2918af70aafd 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -37,14 +37,14 @@ import javax.inject.Inject; -import com.cloud.dc.Pod; +import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.Pod; import com.cloud.dc.dao.HostPodDao; import com.cloud.org.Cluster; import com.cloud.server.ManagementService; import com.cloud.storage.dao.StoragePoolAndAccessGroupMapDao; import com.cloud.cluster.ManagementServerHostPeerJoinVO; - import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.acl.SecurityChecker; @@ -89,6 +89,8 @@ import org.apache.cloudstack.api.command.user.account.ListProjectAccountsCmd; import org.apache.cloudstack.api.command.user.address.ListQuarantinedIpsCmd; import org.apache.cloudstack.api.command.user.affinitygroup.ListAffinityGroupsCmd; +import org.apache.cloudstack.api.command.user.backup.ListBackupCompressionJobsCmd; +import org.apache.cloudstack.api.command.user.backup.nativeoffering.ListNativeBackupOfferingsCmd; import org.apache.cloudstack.api.command.user.bucket.ListBucketsCmd; import org.apache.cloudstack.api.command.user.event.ListEventsCmd; import org.apache.cloudstack.api.command.user.iso.ListIsosCmd; @@ -111,6 +113,7 @@ import org.apache.cloudstack.api.command.user.zone.ListZonesCmd; import org.apache.cloudstack.api.response.AccountResponse; import org.apache.cloudstack.api.response.AsyncJobResponse; +import org.apache.cloudstack.api.response.BackupCompressionJobResponse; import org.apache.cloudstack.api.response.BucketResponse; import org.apache.cloudstack.api.response.ClusterResponse; import org.apache.cloudstack.api.response.DetailOptionsResponse; @@ -125,6 +128,7 @@ import org.apache.cloudstack.api.response.IpQuarantineResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.ManagementServerResponse; +import org.apache.cloudstack.api.response.NativeBackupOfferingResponse; import org.apache.cloudstack.api.response.ObjectStoreResponse; import org.apache.cloudstack.api.response.PeerManagementServerNodeResponse; import org.apache.cloudstack.api.response.PodResponse; @@ -148,8 +152,15 @@ import org.apache.cloudstack.api.response.VirtualMachineResponse; import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.backup.BackupCompressionJobVO; import org.apache.cloudstack.backup.BackupOfferingVO; +import org.apache.cloudstack.backup.BackupVO; +import org.apache.cloudstack.backup.NativeBackupOffering; +import org.apache.cloudstack.backup.NativeBackupOfferingVO; +import org.apache.cloudstack.backup.dao.BackupCompressionJobDao; +import org.apache.cloudstack.backup.dao.BackupDao; import org.apache.cloudstack.backup.dao.BackupOfferingDao; +import org.apache.cloudstack.backup.dao.NativeBackupOfferingDao; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; @@ -646,6 +657,15 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q @Inject ExtensionHelper extensionHelper; + @Inject + private NativeBackupOfferingDao nativeBackupOfferingDao; + + @Inject + private BackupCompressionJobDao backupCompressionJobDao; + + @Inject + private BackupDao backupDao; + /* * (non-Javadoc) * @@ -6283,6 +6303,99 @@ private List searchForBucketsInternal(ListBucketsCmd cmd) { return bucketDao.searchByIds(bktIds); } + @Override + public ListResponse listNativeBackupOfferings(ListNativeBackupOfferingsCmd cmd) { + ListResponse response = new ListResponse<>(); + Pair, Integer> result = listNativeBackupOfferingsInternal(cmd); + List nativeBackupOfferingResponses = new ArrayList<>(); + + for (NativeBackupOffering offering : result.first()) { + NativeBackupOfferingResponse nativeBackupOfferingResponse = responseGenerator.createNativeBackupOfferingResponse(offering); + nativeBackupOfferingResponses.add(nativeBackupOfferingResponse); + } + + response.setResponses(nativeBackupOfferingResponses, result.second()); + return response; + } + + private Pair, Integer> listNativeBackupOfferingsInternal(ListNativeBackupOfferingsCmd cmd) { + SearchBuilder sb = nativeBackupOfferingDao.createSearchBuilder(); + + sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); + sb.and("name", sb.entity().getName(), SearchCriteria.Op.EQ); + sb.and("compress", sb.entity().isCompress(), SearchCriteria.Op.EQ); + sb.and("validate", sb.entity().isValidate(), SearchCriteria.Op.EQ); + sb.and("allowquickrestore", sb.entity().isAllowQuickRestore(), SearchCriteria.Op.EQ); + sb.and("allowextractfile", sb.entity().isAllowExtractFile(), SearchCriteria.Op.EQ); + + SearchCriteria sc = sb.create(); + + sc.setParametersIfNotNull("id", cmd.getId()); + sc.setParametersIfNotNull("name", cmd.getName()); + sc.setParametersIfNotNull("compress", cmd.isCompress()); + sc.setParametersIfNotNull("validate", cmd.isValidate()); + sc.setParametersIfNotNull("allowquickrestore", cmd.isAllowQuickRestore()); + sc.setParametersIfNotNull("allowextractfile", cmd.isAllowExtractFile()); + + Filter filter = new Filter(NativeBackupOfferingVO.class, "created", false, cmd.getStartIndex(), cmd.getPageSizeVal()); + + return nativeBackupOfferingDao.searchAndCount(sc, filter, cmd.isShowRemoved()); + } + + @Override + public ListResponse listBackupCompressionJobs(ListBackupCompressionJobsCmd cmd) { + ListResponse responses = new ListResponse<>(); + Pair, Integer> result = listBackupCompressionJobsInternal(cmd); + List compressionJobResponses = new ArrayList<>(); + + for (BackupCompressionJobVO jobVO : result.first()) { + BackupVO backup = backupDao.findByIdIncludingRemoved(jobVO.getBackupId()); + DataCenterVO zone = dataCenterDao.findByIdIncludingRemoved(jobVO.getZoneId()); + + BackupCompressionJobResponse response = new BackupCompressionJobResponse(jobVO.getId(), backup.getUuid(), zone.getUuid(), jobVO.getAttempts(), + jobVO.getType().toString(), jobVO.getStartTime(), jobVO.getScheduledStartTime(), jobVO.getRemoved()); + + if (jobVO.getHostId() != null) { + response.setHostId(hostDao.findByIdIncludingRemoved(jobVO.getHostId()).getUuid()); + } + compressionJobResponses.add(response); + } + + responses.setResponses(compressionJobResponses, result.second()); + return responses; + } + + private Pair, Integer> listBackupCompressionJobsInternal(ListBackupCompressionJobsCmd cmd) { + SearchBuilder sb = backupCompressionJobDao.createSearchBuilder(); + + sb.and("id", sb.entity().getId(), Op.EQ); + sb.and("backup_id", sb.entity().getBackupId(), Op.EQ); + sb.and("host_id", sb.entity().getHostId(), Op.EQ); + sb.and("zone_id", sb.entity().getZoneId(), Op.EQ); + sb.and("type", sb.entity().getType(), Op.EQ); + + boolean removed = !cmd.getExecuting() && !cmd.getScheduled(); + if (cmd.getExecuting() && !cmd.getScheduled()) { + sb.and("executing", sb.entity().getStartTime(), Op.NNULL); + } else if (cmd.getScheduled() && !cmd.getExecuting()) { + sb.and("scheduled", sb.entity().getStartTime(), Op.NULL); + } + + SearchCriteria sc = sb.create(); + + sc.setParametersIfNotNull("id", cmd.getId()); + sc.setParametersIfNotNull("backup_id", cmd.getBackupId()); + sc.setParametersIfNotNull("host_id", cmd.getHostId()); + sc.setParametersIfNotNull("zone_id", cmd.getZoneId()); + if (cmd.getType() != null) { + sc.setParameters("type", StringUtils.capitalize(cmd.getType().toLowerCase())+"Compression"); + } + + Filter filter = new Filter(BackupCompressionJobVO.class, "created", false, cmd.getStartIndex(), cmd.getPageSizeVal()); + + return backupCompressionJobDao.searchAndCount(sc, filter, removed); + } + @Override public String getConfigComponentName() { return QueryService.class.getSimpleName(); diff --git a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java index db15a440ba36..db452d9f0d6c 100644 --- a/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java +++ b/server/src/main/java/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java @@ -252,7 +252,7 @@ public class ConsoleProxyManagerImpl extends ManagerBase implements ConsoleProxy protected Gson jsonParser = new GsonBuilder().setVersion(1.3).create(); - protected Set availableVmStateOnAssignProxy = new HashSet<>(Arrays.asList(State.Starting, State.Running, State.Stopping, State.Migrating)); + protected Set availableVmStateOnAssignProxy = new HashSet<>(Arrays.asList(State.Starting, State.Running, State.Stopping, State.Migrating, State.BackingUp, State.BackupError)); @Inject private KeystoreDao _ksDao; diff --git a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java index 97c453003a86..86b924d6d1bb 100644 --- a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java +++ b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java @@ -54,6 +54,7 @@ import com.cloud.vm.dao.UserVmDao; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.backup.Backup; +import org.apache.cloudstack.backup.BackupProvider; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; @@ -448,13 +449,13 @@ public Map getClusterSettings(long vmId) { @Override public VirtualMachine importVirtualMachineFromBackup(long zoneId, long domainId, long accountId, long userId, - String vmInternalName, Backup backup) throws Exception { + String vmInternalName, Backup backup, BackupProvider backupProvider) throws Exception { return null; } @Override public boolean attachRestoredVolumeToVirtualMachine(long zoneId, String location, Backup.VolumeInfo volumeInfo, - VirtualMachine vm, long poolId, Backup backup) throws Exception { + VirtualMachine vm, long poolId, Backup backup, BackupProvider backupProvider) throws Exception { return false; } diff --git a/server/src/main/java/com/cloud/hypervisor/KVMGuru.java b/server/src/main/java/com/cloud/hypervisor/KVMGuru.java index 3303bc029333..4165bd8ee0ad 100644 --- a/server/src/main/java/com/cloud/hypervisor/KVMGuru.java +++ b/server/src/main/java/com/cloud/hypervisor/KVMGuru.java @@ -32,6 +32,7 @@ import com.cloud.storage.GuestOSHypervisorVO; import com.cloud.storage.GuestOSVO; import com.cloud.storage.Volume; +import com.cloud.storage.VolumeApiService; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.GuestOSDao; import com.cloud.storage.dao.GuestOSHypervisorDao; @@ -44,6 +45,8 @@ import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.dao.VMInstanceDao; import org.apache.cloudstack.backup.Backup; +import org.apache.cloudstack.backup.BackupManagerImpl; +import org.apache.cloudstack.backup.BackupProvider; import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.command.StorageSubSystemCommand; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; @@ -72,6 +75,9 @@ public class KVMGuru extends HypervisorGuruBase implements HypervisorGuru { @Inject HypervisorCapabilitiesDao _hypervisorCapabilitiesDao; + @Inject + private VolumeApiService volumeApiService; + @Override public HypervisorType getHypervisorType() { @@ -349,7 +355,8 @@ public Map getClusterSettings(long vmId) { } @Override - public VirtualMachine importVirtualMachineFromBackup(long zoneId, long domainId, long accountId, long userId, String vmInternalName, Backup backup) { + public VirtualMachine importVirtualMachineFromBackup(long zoneId, long domainId, long accountId, long userId, String vmInternalName, Backup backup, + BackupProvider backupProvider) { logger.debug(String.format("Trying to import VM [vmInternalName: %s] from Backup [%s].", vmInternalName, ReflectionToStringBuilderUtils.reflectOnlySelectedFields(backup, "id", "uuid", "vmId", "externalId", "backupType"))); @@ -357,6 +364,9 @@ public VirtualMachine importVirtualMachineFromBackup(long zoneId, long domainId, if (vm == null) { throw new CloudRuntimeException("Cannot find VM: " + vmInternalName); } + if (backupProvider.getName().equals(BackupManagerImpl.KNIB_BACKUP_PROVIDER)) { + return vm; + } try { if (vm.getRemoved() == null) { vm.setState(VirtualMachine.State.Stopped); @@ -384,11 +394,16 @@ public VirtualMachine importVirtualMachineFromBackup(long zoneId, long domainId, return vm; } - @Override public boolean attachRestoredVolumeToVirtualMachine(long zoneId, String location, Backup.VolumeInfo volumeInfo, VirtualMachine vm, long poolId, Backup backup) { + @Override + public boolean attachRestoredVolumeToVirtualMachine(long zoneId, String location, Backup.VolumeInfo volumeInfo, VirtualMachine vm, long poolId, Backup backup, + BackupProvider backupProvider) { VMInstanceVO targetVM = _instanceDao.findVMByInstanceNameIncludingRemoved(vm.getName()); List vmVolumes = _volumeDao.findByInstance(targetVM.getId()); VolumeVO restoredVolume = _volumeDao.findByUuid(location); + if (backupProvider.getName().equals(BackupManagerImpl.KNIB_BACKUP_PROVIDER)) { + return true; + } if (restoredVolume != null) { try { _volumeDao.attachVolume(restoredVolume.getId(), vm.getId(), getNextAvailableDeviceId(vmVolumes)); @@ -405,6 +420,6 @@ public VirtualMachine importVirtualMachineFromBackup(long zoneId, long domainId, throw new RuntimeException("Unable to attach volume " + restoredVolume.getName() + " to VM" + vm.getName() + " due to : " + e.getMessage()); } } - return false; + return false; } } diff --git a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java index 4e07611ff716..a7f3f7b7af34 100644 --- a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java +++ b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java @@ -2016,7 +2016,7 @@ public void checkAutoScaleVmGroupName(String groupName) { private UserVmVO startNewVM(long vmId) { try { CallContext.current().setEventDetails("Instance ID: " + vmId); - return userVmMgr.startVirtualMachine(vmId, null, new HashMap<>(), null).first(); + return userVmMgr.startVirtualMachine(vmId, null, new HashMap<>(), null, false).first(); } catch (final ResourceUnavailableException ex) { logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, ex.getMessage()); diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 1f453a862947..09e739a8b512 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -4616,7 +4616,8 @@ public ConfigKey[] getConfigKeys() { AllowVolumeReSizeBeyondAllocation, StoragePoolHostConnectWorkers, ObjectStorageCapacityThreshold, - COPY_TEMPLATES_FROM_OTHER_SECONDARY_STORAGES + COPY_TEMPLATES_FROM_OTHER_SECONDARY_STORAGES, + AgentMaxDataMigrationWaitTime }; } diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 17961dbd955f..087b4860a386 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -58,6 +58,8 @@ import org.apache.cloudstack.api.response.GetUploadParamsResponse; import org.apache.cloudstack.backup.Backup; import org.apache.cloudstack.backup.BackupManager; +import org.apache.cloudstack.backup.BackupManagerImpl; +import org.apache.cloudstack.backup.NativeBackupService; import org.apache.cloudstack.backup.dao.BackupDao; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.direct.download.DirectDownloadHelper; @@ -370,6 +372,11 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic @Inject private VMSnapshotDetailsDao vmSnapshotDetailsDao; + @Inject + private NativeBackupService nativeBackupService; + + @Inject + private BackupManager backupManager; public static final String KVM_FILE_BASED_STORAGE_SNAPSHOT = "kvmFileBasedStorageSnapshot"; @@ -1063,7 +1070,7 @@ public VolumeVO createVolume(CreateVolumeCmd cmd) { // if VM Id is provided, attach the volume to the VM if (cmd.getVirtualMachineId() != null) { try { - attachVolumeToVM(cmd.getVirtualMachineId(), volume.getId(), volume.getDeviceId(), false); + attachVolumeToVM(cmd.getVirtualMachineId(), volume.getId(), volume.getDeviceId(), false, false); } catch (Exception ex) { StringBuilder message = new StringBuilder("Volume: "); message.append(volume.getUuid()); @@ -2486,7 +2493,7 @@ private void validateVolumeResizeWithSize(VolumeVO volume, long currentSize, Lon @Override @ActionEvent(eventType = EventTypes.EVENT_VOLUME_ATTACH, eventDescription = "attaching volume", async = true) public Volume attachVolumeToVM(AttachVolumeCmd command) { - return attachVolumeToVM(command.getVirtualMachineId(), command.getId(), command.getDeviceId(), false); + return attachVolumeToVM(command.getVirtualMachineId(), command.getId(), command.getDeviceId(), false, false); } protected VolumeVO getVmExistingVolumeForVolumeAttach(UserVmVO vm, VolumeInfo volumeToAttach) { @@ -2575,7 +2582,7 @@ protected VolumeInfo createVolumeOnPrimaryForAttachIfNeeded(final VolumeInfo vol throw new InvalidParameterValueException("Cannot attach uploaded volume, this operation is unsupported on storage pool type " + destPrimaryStorage.getPoolType()); } newVolumeOnPrimaryStorage = _volumeMgr.createVolumeOnPrimaryStorage(vm, volumeToAttach, - vm.getHypervisorType(), destPrimaryStorage); + vm.getHypervisorType(), destPrimaryStorage, null, null); } catch (NoTransitionException e) { logger.debug("Failed to create volume on primary storage", e); throw new CloudRuntimeException("Failed to create volume on primary storage", e); @@ -2631,12 +2638,13 @@ private Volume orchestrateAttachVolumeToVM(Long vmId, Long volumeId, Long device return newVol; } - public Volume attachVolumeToVM(Long vmId, Long volumeId, Long deviceId, Boolean allowAttachForSharedFS) { + @Override + public Volume attachVolumeToVM(Long vmId, Long volumeId, Long deviceId, Boolean allowAttachForSharedFS, boolean allowAttachOnRestoring) { Account caller = CallContext.current().getCallingAccount(); VolumeInfo volumeToAttach = getAndCheckVolumeInfo(volumeId); - UserVmVO vm = getAndCheckUserVmVO(vmId, volumeToAttach); + UserVmVO vm = getAndCheckUserVmVO(vmId, volumeToAttach, allowAttachOnRestoring); if (!allowAttachForSharedFS && UserVmManager.SHAREDFSVM.equals(vm.getUserVmType())) { throw new InvalidParameterValueException("Can't attach a volume to a Shared FileSystem Instance"); @@ -2657,7 +2665,7 @@ public Volume attachVolumeToVM(Long vmId, Long volumeId, Long deviceId, Boolean checkForVMSnapshots(vmId, vm); - checkForBackups(vm, true); + validateIfVmHasBackups(vm, true); checkRightsToAttach(caller, volumeToAttach, vm); @@ -2814,15 +2822,16 @@ private void checkDeviceId(Long deviceId, VolumeInfo volumeToAttach, UserVmVO vm * * @return the user vm vo object correcponding to the vmId to attach to */ - @NotNull private UserVmVO getAndCheckUserVmVO(Long vmId, VolumeInfo volumeToAttach) { + @NotNull private UserVmVO getAndCheckUserVmVO(Long vmId, VolumeInfo volumeToAttach, boolean allowAttachOnRestoring) { UserVmVO vm = _userVmDao.findById(vmId); if (vm == null || vm.getType() != VirtualMachine.Type.User) { throw new InvalidParameterValueException("Please specify a valid User VM."); } - // Check that the VM is in the correct state - if (vm.getState() != State.Running && vm.getState() != State.Stopped) { - throw new InvalidParameterValueException("Please specify a VM that is either running or stopped."); + if (allowAttachOnRestoring) { + validateVmState(vm, State.Running, State.Stopped, State.Restoring); + } else { + validateVmState(vm, State.Running, State.Stopped); } // Check that the VM and the volume are in the same zone @@ -2832,6 +2841,13 @@ private void checkDeviceId(Long deviceId, VolumeInfo volumeToAttach, UserVmVO vm return vm; } + private void validateVmState(UserVmVO vm, State... states) { + List allowedStates = Arrays.asList(states); + if (!allowedStates.contains(vm.getState())) { + throw new InvalidParameterValueException(String.format("Please specify a VM that is on of the following states: %s.", allowedStates)); + } + } + /** * Check that the volume ID is valid * Check that the volume is a data volume @@ -2861,9 +2877,11 @@ private void checkDeviceId(Long deviceId, VolumeInfo volumeToAttach, UserVmVO vm return volumeToAttach; } - protected void checkForBackups(UserVmVO vm, boolean attach) { - if ((vm.getBackupOfferingId() == null || CollectionUtils.isEmpty(vm.getBackupVolumeList())) || BooleanUtils.isTrue(BackupManager.BackupEnableAttachDetachVolumes.value())) { - return; + protected boolean validateIfVmHasBackups(UserVmVO vm, boolean attach) { + if (vm.getBackupOfferingId() == null || CollectionUtils.isEmpty(backupDao.listByVmId(vm.getDataCenterId(), vm.getId()))) { + return false; + } else if (BooleanUtils.isTrue(BackupManager.BackupEnableAttachDetachVolumes.value())) { + return true; } String errorMsg = String.format("Unable to detach volume, cannot detach volume from a VM that has backups. First remove the VM from the backup offering or " + "set the global configuration '%s' to true.", BackupManager.BackupEnableAttachDetachVolumes.key()); @@ -3086,7 +3104,10 @@ public Volume detachVolumeFromVM(DetachVolumeCmd cmmd) { throw new InvalidParameterValueException("Unable to detach volume, please specify an Instance that does not have Instance Snapshots"); } - checkForBackups(vm, false); + boolean hasBackup = validateIfVmHasBackups(vm, false); + if (hasBackup) { + nativeBackupService.prepareVolumeForDetach(volume, vm); + } AsyncJobExecutionContext asyncExecutionContext = AsyncJobExecutionContext.getCurrentExecutionContext(); if (asyncExecutionContext != null) { @@ -4085,6 +4106,12 @@ public Snapshot allocSnapshot(Long volumeId, Long policyId, String snapshotName, } } + VirtualMachine attachedVm = volume.getAttachedVM(); + if (attachedVm != null && HypervisorType.KVM.equals(attachedVm.getHypervisorType()) && SnapshotManager.kvmIncrementalSnapshot.valueIn(_hostDao.findClusterIdByVolumeInfo(volume)) && + backupManager.getBackupProvider(attachedVm.getDataCenterId()).getName().equals(BackupManagerImpl.KNIB_BACKUP_PROVIDER) && + CollectionUtils.isNotEmpty(backupDao.listByVmId(attachedVm.getDataCenterId(), attachedVm.getId()))) { + throw new CloudRuntimeException(String.format("VM [%s] has KNIB backups, cannot take incremental snapshots of it.", attachedVm.getUuid())); + } return snapshotMgr.allocSnapshot(volumeId, policyId, snapshotName, locationType, false, zoneIds); } diff --git a/server/src/main/java/com/cloud/vm/UserVmManager.java b/server/src/main/java/com/cloud/vm/UserVmManager.java index c035165a3fa8..cd27ff990ad8 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManager.java +++ b/server/src/main/java/com/cloud/vm/UserVmManager.java @@ -151,13 +151,13 @@ public interface UserVmManager extends UserVmService { boolean expunge(UserVmVO vm); - Pair> startVirtualMachine(long vmId, Long hostId, Map additionalParams, String deploymentPlannerToUse) + Pair> startVirtualMachine(long vmId, Long hostId, Map additionalParams, String deploymentPlannerToUse, boolean quickRestore) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException; - Pair> startVirtualMachine(long vmId, Long podId, Long clusterId, Long hostId, Map additionalParams, String deploymentPlannerToUse) + Pair> startVirtualMachine(long vmId, Long podId, Long clusterId, Long hostId, Map additionalParams, String deploymentPlannerToUse, boolean quickRestore) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException; - Pair> startVirtualMachine(long vmId, Long podId, Long clusterId, Long hostId, Map additionalParams, String deploymentPlannerToUse, boolean isExplicitHost) + Pair> startVirtualMachine(long vmId, Long podId, Long clusterId, Long hostId, Map additionalParams, String deploymentPlannerToUse, boolean isExplicitHost, boolean quickRestore) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException; boolean upgradeVirtualMachine(Long id, Long serviceOfferingId, Map customParameters) throws ResourceUnavailableException, diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 36f1f7a2f126..e60f351f9a6b 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -62,6 +62,7 @@ import com.cloud.storage.SnapshotPolicyVO; import com.cloud.storage.dao.SnapshotPolicyDao; +import com.cloud.vm.snapshot.dao.VMSnapshotDetailsDao; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; @@ -107,6 +108,7 @@ import org.apache.cloudstack.backup.BackupManager; import org.apache.cloudstack.backup.BackupScheduleVO; import org.apache.cloudstack.backup.BackupVO; +import org.apache.cloudstack.backup.NativeBackupService; import org.apache.cloudstack.backup.dao.BackupDao; import org.apache.cloudstack.backup.dao.BackupScheduleDao; import org.apache.cloudstack.context.CallContext; @@ -531,6 +533,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir @Inject private VMSnapshotDao _vmSnapshotDao; @Inject + private VMSnapshotDetailsDao vmSnapshotDetailsDao; + @Inject private VMSnapshotManager _vmSnapshotMgr; @Inject private AffinityGroupVMMapDao _affinityGroupVMMapDao; @@ -593,6 +597,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir @Inject private BackupManager backupManager; @Inject + private NativeBackupService nativeBackupService; + @Inject private AnnotationDao annotationDao; @Inject private VmStatsDao vmStatsDao; @@ -3381,7 +3387,7 @@ public UserVm startVirtualMachine(StartVMCmd cmd) throws ExecutionException, Con additonalParams.put(VirtualMachineProfile.Param.ConsiderLastHost, cmd.getConsiderLastHost().toString()); } - return startVirtualMachine(cmd.getId(), cmd.getPodId(), cmd.getClusterId(), cmd.getHostId(), additonalParams, cmd.getDeploymentPlanner()).first(); + return startVirtualMachine(cmd.getId(), cmd.getPodId(), cmd.getClusterId(), cmd.getHostId(), additonalParams, cmd.getDeploymentPlanner(), false).first(); } @Override @@ -5276,7 +5282,7 @@ private UserVm startVirtualMachine(long vmId, Long podId, Long clusterId, Long h Pair> vmParamPair = null; try { - vmParamPair = startVirtualMachine(vmId, podId, clusterId, hostId, additonalParams, deploymentPlannerToUse); + vmParamPair = startVirtualMachine(vmId, podId, clusterId, hostId, additonalParams, deploymentPlannerToUse, false); vm = vmParamPair.first(); // At this point VM should be in "Running" state @@ -5667,20 +5673,20 @@ public void finalizeStop(VirtualMachineProfile profile, Answer answer) { @Override public Pair> startVirtualMachine(long vmId, Long hostId, @NotNull Map additionalParams, - String deploymentPlannerToUse) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException { - return startVirtualMachine(vmId, null, null, hostId, additionalParams, deploymentPlannerToUse); + String deploymentPlannerToUse, boolean quickRestore) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException { + return startVirtualMachine(vmId, null, null, hostId, additionalParams, deploymentPlannerToUse, quickRestore); } @Override public Pair> startVirtualMachine(long vmId, Long podId, Long clusterId, Long hostId, - @NotNull Map additionalParams, String deploymentPlannerToUse) + @NotNull Map additionalParams, String deploymentPlannerToUse, boolean quickRestore) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException { - return startVirtualMachine(vmId, podId, clusterId, hostId, additionalParams, deploymentPlannerToUse, true); + return startVirtualMachine(vmId, podId, clusterId, hostId, additionalParams, deploymentPlannerToUse, true, quickRestore); } @Override public Pair> startVirtualMachine(long vmId, Long podId, Long clusterId, Long hostId, - @NotNull Map additionalParams, String deploymentPlannerToUse, boolean isExplicitHost) + @NotNull Map additionalParams, String deploymentPlannerToUse, boolean isExplicitHost, boolean quickRestore) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException, ResourceAllocationException { // Input validation final Account callerAccount = CallContext.current().getCallingAccount(); @@ -5739,7 +5745,7 @@ public Pair> startVirtualMach boolean isRootAdmin = _accountService.isRootAdmin(callerAccount.getId()); Pod destinationPod = getDestinationPod(podId, isRootAdmin); Cluster destinationCluster = getDestinationCluster(clusterId, isRootAdmin); - HostVO destinationHost = getDestinationHost(hostId, isRootAdmin, isExplicitHost); + HostVO destinationHost = getDestinationHost(hostId, isRootAdmin, isExplicitHost, quickRestore); DataCenterDeployment plan = null; boolean deployOnGivenHost = false; if (destinationHost != null) { @@ -5922,10 +5928,10 @@ private Cluster getDestinationCluster(Long clusterId, boolean isRootAdmin) { return destinationCluster; } - private HostVO getDestinationHost(Long hostId, boolean isRootAdmin, boolean isExplicitHost) { + private HostVO getDestinationHost(Long hostId, boolean isRootAdmin, boolean isExplicitHost, boolean quickRestore) { HostVO destinationHost = null; if (hostId != null) { - if (isExplicitHost && !isRootAdmin) { + if (isExplicitHost && !isRootAdmin && !quickRestore) { throw new PermissionDeniedException( "Parameter " + ApiConstants.HOST_ID + " can only be specified by a Root Admin, permission denied"); } @@ -6449,7 +6455,7 @@ private UserVm createVirtualMachine(BaseDeployVMCmd cmd, DataCenter zone, Accoun boolean isRootAdmin = _accountService.isRootAdmin(callerId); Long hostId = cmd.getHostId(); - getDestinationHost(hostId, isRootAdmin, true); + getDestinationHost(hostId, isRootAdmin, true, false); String ipAddress = cmd.getIpAddress(); String ip6Address = cmd.getIp6Address(); @@ -8894,6 +8900,7 @@ public UserVm restoreVirtualMachine(final Account caller, final long vmId, final if (needRestart) { try { _itMgr.stop(vm.getUuid()); + vm.setState(State.Stopped); } catch (ResourceUnavailableException e) { logger.debug("Stop vm {} failed", vm, e); CloudRuntimeException ex = new CloudRuntimeException("Stop vm failed for specified vmId"); @@ -8969,6 +8976,7 @@ public Pair doInTransaction(final TransactionStatus status) th newVol.getDiskOfferingId(), newVol.getTemplateId(), newVol.getSize(), Volume.class.getName(), newVol.getUuid(), vmId, newVol.isDisplay()); // Detach, destroy and create the usage event for the old root volume. + nativeBackupService.prepareVolumeForDetach(root, vm); _volsDao.detachVolume(root.getId()); destroyVolumeInContext(vm, Volume.State.Allocated.equals(root.getState()) || expunge, root); @@ -9740,7 +9748,7 @@ public UserVm restoreVMFromBackup(CreateVMFromBackupCmd cmd) throws ResourceUnav try { Pair> vmParamPair = null; - vmParamPair = startVirtualMachine(vmId, null, null, null, additonalParams, null); + vmParamPair = startVirtualMachine(vmId, null, null, null, additonalParams, null, false); vm = vmParamPair.first(); Long isoId = vm.getIsoId(); @@ -9750,7 +9758,7 @@ public UserVm restoreVMFromBackup(CreateVMFromBackupCmd cmd) throws ResourceUnav _vmDao.update(vm.getId(), vmVO); } - backupManager.restoreBackupToVM(cmd.getBackupId(), vmId); + backupManager.restoreBackupToVM(cmd.getBackupId(), vmId, cmd.getQuickRestore()); } catch (CloudRuntimeException | ResourceUnavailableException | ResourceAllocationException | InsufficientCapacityException e) { UserVmVO vmVO = _vmDao.findById(vmId); @@ -9771,7 +9779,7 @@ public UserVm restoreVMFromBackup(CreateVMFromBackupCmd cmd) throws ResourceUnav vm = resetVMSSHKeyInternal(userVm, owner, sshKeyPairNames); } - if (cmd.getStartVm()) { + if (cmd.getStartVm() && !cmd.getQuickRestore()) { Long podId = null; Long clusterId = null; if (cmd instanceof CreateVMFromBackupCmdByAdmin) { diff --git a/server/src/main/java/org/apache/cloudstack/backup/BackupCompressionService.java b/server/src/main/java/org/apache/cloudstack/backup/BackupCompressionService.java new file mode 100644 index 000000000000..132dc9517194 --- /dev/null +++ b/server/src/main/java/org/apache/cloudstack/backup/BackupCompressionService.java @@ -0,0 +1,388 @@ +//Licensed to the Apache Software Foundation (ASF) under one +//or more contributor license agreements. See the NOTICE file +//distributed with this work for additional information +//regarding copyright ownership. The ASF licenses this file +//to you under the Apache License, Version 2.0 (the +//"License"); you may not use this file except in compliance +//the License. You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, +//software distributed under the License is distributed on an +//"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +//KIND, either express or implied. See the License for the +//specific language governing permissions and limitations +//under the License. +package org.apache.cloudstack.backup; + +import com.cloud.dc.ClusterVO; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.resource.ResourceState; +import com.cloud.utils.DateUtil; +import com.cloud.utils.Pair; +import com.cloud.utils.UuidUtils; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.concurrency.NamedThreadFactory; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionLegacy; +import com.cloud.utils.db.TransactionStatus; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.VMInstanceDao; +import org.apache.cloudstack.backup.dao.BackupCompressionJobDao; +import org.apache.cloudstack.backup.dao.BackupDao; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; +import org.apache.cloudstack.framework.jobs.AsyncJobManager; +import org.apache.logging.log4j.ThreadContext; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Comparator; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +public class BackupCompressionService extends ManagerBase implements Configurable { + + private static final String COMPRESSION_JOB = "compression-job-"; + private static final String LOGCONTEXTID = "logcontextid"; + + private static final String LOCK = "compression_lock"; + private static final double RESCHEDULE_TO_TIMEOUT_RATIO = 2.5; + protected ConfigKey backupCompressionMaxConcurrentCompressionsPerHost = new ConfigKey<>("Advanced", Integer.class, + "backup.compression.max.concurrent.compressions.per.host", "5", "Determines the maximum number of concurrent backup compressions per host. Values lower than 0 remove" + + " the limit, meaning that as many compressions as possible will be done at the same time.", true, ConfigKey.Scope.Cluster); + + protected ConfigKey backupCompressionMaxJobRetries = new ConfigKey<>("Advanced", Integer.class, + "backup.compression.max.job.retries", "2", "Determines the maximum number of retries for backup compression jobs. This includes both start compression jobs and " + + "finalize compression jobs.", true, ConfigKey.Scope.Cluster); + + protected ConfigKey backupCompressionRetryInterval = new ConfigKey<>("Advanced", Integer.class, + "backup.compression.retry.interval", "60", "Determines the minimum amount of time (in minutes) to retry a backup compression job. This includes both start " + + "compression jobs and finalize compression jobs.", true, ConfigKey.Scope.Cluster); + + protected ConfigKey backupCompressionTaskEnabled = new ConfigKey<>("Advanced", Boolean.class, "backup.compression.task.enabled", "true", "Whether the backup " + + "compression task should be running or not. Please set this to false and wait for any compression jobs to finish before restarting the Management Server.", true, + ConfigKey.Scope.Global); + + @Inject + private BackupCompressionJobDao backupCompressionJobDao; + + @Inject + private HostDao hostDao; + + @Inject + private BackupDao backupDao; + + @Inject + private DataCenterDao dataCenterDao; + + @Inject + private NativeBackupService nativeBackupService; + + @Inject + private ClusterDao clusterDao; + + @Inject + private AsyncJobManager asyncJobManager; + + @Inject + private VMInstanceDao instanceDao; + + private ExecutorService executor; + + private ScheduledExecutorService scheduledExecutor; + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + super.configure(name, params); + + executor = Executors.newCachedThreadPool(new NamedThreadFactory("BackupCompressionTask")); + scheduledExecutor = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("BackupCompressionScheduler")); + scheduledExecutor.scheduleAtFixedRate(this::searchAndDispatchJobs, 60, 60, TimeUnit.SECONDS); + return true; + } + + /** + * For each zone, get the jobs that should be started and distribute them through the hosts. + * Will lock the backup compression table so that only one management server executes this task at a time. + * Catches all exceptions and only sends them to the log. If we throw an exception, the task will stop running until the server is restarted. + * */ + private void searchAndDispatchJobs() { + ThreadContext.put(LOGCONTEXTID, UuidUtils.first(UUID.randomUUID().toString())); + logger.debug("Searching for backup compression jobs to dispatch."); + + if (!asyncJobManager.isAsyncJobsEnabled()) { + logger.debug("A management shutdown has been triggered. Not running backup compression task."); + return; + } + + Transaction.execute(TransactionLegacy.CLOUD_DB, new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + try { + List zones = dataCenterDao.listEnabledZones(); + if (!backupCompressionJobDao.lockInLockTable(LOCK, 300)) { + logger.warn("Unable to get lock for compression jobs."); + return; + } + + rescheduleLostJobs(); + + if (!backupCompressionTaskEnabled.value()) { + logger.debug("Backup compression task is disabled. Not running."); + return; + } + + for (DataCenterVO zone : zones) { + if (!BackupManager.BackupFrameworkEnabled.valueIn(zone.getId())) { + logger.debug("Backup framework is not enabled for zone [{}], will not run the backup compression task for this zone.", zone.getUuid()); + continue; + } + List jobsToStart = backupCompressionJobDao.listWaitingJobsAndScheduledToBeforeNow(zone.getId()); + if (jobsToStart.isEmpty()) { + continue; + } + logger.debug("Found [{}] compression jobs to submit."); + HashMap hostToNumberOfExecutingJobs = getHostToNumberOfExecutingJobs(zone); + List> hostAndNumberOfJobsPairList = filterHostsWithTooManyCompressionJobs(hostToNumberOfExecutingJobs); + HashSet busyInstances = submitFinalizeJobsForExecution(jobsToStart, hostAndNumberOfJobsPairList, zone.getId()); + submitStartJobsForExecution(jobsToStart, hostAndNumberOfJobsPairList, busyInstances, zone.getId()); + } + + ThreadContext.pop(); + } catch (Exception e) { + logger.error("Caught exception [{}] while trying to search and dispatch backup compression jobs.", e.getMessage(), e); + } finally { + backupCompressionJobDao.unlockFromLockTable(LOCK); + } + } + }); + } + + /** + * Goes through all the executing jobs for the zone and returns a map from up and enabled KVM hosts to the number of executing jobs. + * */ + private HashMap getHostToNumberOfExecutingJobs(DataCenterVO zone) { + List allKvmHostsForZone = hostDao.listAllRoutingHostsByZoneAndHypervisorType(zone.getId(), Hypervisor.HypervisorType.KVM); + HashMap hostToNumberOfExecutingJobs = new HashMap<>(); + + for (HostVO host : allKvmHostsForZone) { + if (host.getStatus() == Status.Up && host.getResourceState() == ResourceState.Enabled) { + hostToNumberOfExecutingJobs.put(host, 0L); + } + } + + List executingStartJobs = backupCompressionJobDao.listExecutingJobsByZoneIdAndJobType(zone.getId(), BackupCompressionJobType.StartCompression); + for (BackupCompressionJobVO executingJob : executingStartJobs) { + HostVO host = allKvmHostsForZone.stream().filter(hostVO -> hostVO.getId() == executingJob.getHostId()).findFirst().orElse(null); + if (host == null) { + logger.error("Compression job [{}] is running in an unknown host. This job will be rescheduled in the future.", executingJob); + continue; + } else if (host.getStatus() != Status.Up || host.getResourceState() != ResourceState.Enabled) { + logger.warn("Compression job [{}] is running in host [{}], which is not up or not enabled. If possible, wait for the job to finish before restarting the Agent.", + executingJob, host); + continue; + } + + hostToNumberOfExecutingJobs.computeIfPresent(host, (hostVO, numberOfJobs) -> numberOfJobs + 1); + } + + return hostToNumberOfExecutingJobs; + } + + private List> filterHostsWithTooManyCompressionJobs(HashMap hostToNumberOfExecutingJobs) { + List> hostAndNumberOfJobsPairList = new ArrayList<>(); + for (HostVO host : hostToNumberOfExecutingJobs.keySet()) { + Long numberOfJobs = hostToNumberOfExecutingJobs.get(host); + Integer maxConcurrentCompressionsPerHost = backupCompressionMaxConcurrentCompressionsPerHost.valueIn(host.getClusterId()); + if (maxConcurrentCompressionsPerHost > 0 && numberOfJobs >= maxConcurrentCompressionsPerHost) { + logger.debug("Host [{}] is already executing the maximum number of concurrent compression jobs set in [{}]. Current number of jobs being executed is " + + "[{}], the value for the configuration is [{]].", host, backupCompressionMaxConcurrentCompressionsPerHost.toString(), numberOfJobs, maxConcurrentCompressionsPerHost); + continue; + } + hostAndNumberOfJobsPairList.add(new Pair<>(host, numberOfJobs)); + } + return hostAndNumberOfJobsPairList; + } + + /** + * Reschedule jobs that seem to be stuck. This should run even if the backup compression task is disabled, so that stuck jobs get cleaned if necessary. + * */ + private void rescheduleLostJobs() { + for (DataCenterVO dataCenterVO : dataCenterDao.listAllZones()) { + logger.debug("Searching lost compression jobs to reschedule in zone [{}].", dataCenterVO.getUuid()); + for (ClusterVO clusterVO : clusterDao.listByDcHyType(dataCenterVO.getId(), Hypervisor.HypervisorType.KVM.toString())) { + List hostVOS = hostDao.findRoutingByClusterId(clusterVO.getId()); + if (hostVOS.isEmpty()) { + logger.debug("No hosts found in cluster [{}]. Cannot reschedule jobs for it.", clusterVO.getUuid()); + continue; + } + Calendar date = Calendar.getInstance(); + date.add(Calendar.SECOND, (int)Math.round(NativeBackupProvider.backupCompressionTimeout.valueIn(clusterVO.getId()) * -RESCHEDULE_TO_TIMEOUT_RATIO)); + List lostJobs = backupCompressionJobDao.listExecutingJobsByHostsAndStartTimeBefore(hostVOS.stream().map(HostVO::getId).toArray(), + date.getTime()); + if (lostJobs.isEmpty()) { + logger.debug("Found no compression jobs to reschedule for cluster [{}].", clusterVO.getUuid()); + continue; + } + logger.debug("Found [{}] compression jobs to reschedule for cluster [{}]. Processing them as failures and rescheduling them.", lostJobs.size(), clusterVO.getUuid()); + lostJobs.forEach(job -> processJobResult(job, false)); + } + } + } + + /** + * Submit StartCompression jobs, this should be called after submitFinalizeJobsForExecution. + * */ + private void submitStartJobsForExecution(List jobsToExecute, List> hostAndNumberOfJobsPairList, + HashSet instancesWithFinalizingJobs, long zoneId) { + for (BackupCompressionJobVO job : jobsToExecute) { + if (hostAndNumberOfJobsPairList.isEmpty()) { + logger.debug("There are no more available hosts to send [{}] jobs. Will try to submit them later.", BackupCompressionJobType.StartCompression); + return; + } + + if (instancesWithFinalizingJobs.contains(job.getInstanceId())) { + VirtualMachine vm = instanceDao.findById(job.getInstanceId()); + logger.debug("Instance [{}] has a finalize compression job running, will not schedule a compression job for it now.", vm.getUuid()); + continue; + } + + String logId = UuidUtils.first(UUID.randomUUID().toString()); + logger.debug("Dispatching backup compression job [{}{}] with logid:{} for backup [{}].", COMPRESSION_JOB, job.getId(), logId, job.getBackupId()); + + Pair hostAndNumberOfJobs; + hostAndNumberOfJobs = hostAndNumberOfJobsPairList.remove(0); + hostAndNumberOfJobs.second(hostAndNumberOfJobs.second()+1); + job.setHostId(hostAndNumberOfJobs.first().getId()); + job.setStartTime(DateUtil.now()); + backupCompressionJobDao.update(job); + + executor.submit(() -> startBackupCompression(job, zoneId, logId)); + + Integer maxJobsPerHost = backupCompressionMaxConcurrentCompressionsPerHost.valueIn(hostAndNumberOfJobs.first().getClusterId()); + if (hostAndNumberOfJobs.second() < maxJobsPerHost || maxJobsPerHost < 0) { + hostAndNumberOfJobsPairList.add(hostAndNumberOfJobs); + hostAndNumberOfJobsPairList.sort(Comparator.comparing(Pair::second)); + } + } + } + + /** + * Submit FinalizeCompression jobs, this should be called before submitStartJobsForExecution. + * */ + private HashSet submitFinalizeJobsForExecution(List jobsToExecute, List> hostAndNumberOfJobsPairList, long zoneId) { + List submittedJobs = new ArrayList<>(); + HashSet setOfInstancesWithExecutingCompressionJobs = new HashSet<>(); + for (BackupCompressionJobVO job : jobsToExecute) { + if (job.getType() != BackupCompressionJobType.FinalizeCompression) { + continue; + } + submittedJobs.add(job); + String logId = UuidUtils.first(UUID.randomUUID().toString()); + logger.debug("Dispatching backup compression job [{}{}] with logid:{} for backup [{}].", COMPRESSION_JOB, job.getId(), logId, job.getBackupId()); + + Pair hostAndNumberOfJobs = hostAndNumberOfJobsPairList.get((int) (Math.random()*hostAndNumberOfJobsPairList.size())); + job.setHostId(hostAndNumberOfJobs.first().getId()); + job.setStartTime(DateUtil.now()); + backupCompressionJobDao.update(job); + + setOfInstancesWithExecutingCompressionJobs.add(job.getInstanceId()); + executor.submit(() -> finalizeBackupCompression(job, zoneId, logId)); + } + jobsToExecute.removeAll(submittedJobs); + return setOfInstancesWithExecutingCompressionJobs; + } + + private void startBackupCompression(BackupCompressionJobVO job, long zoneId, String logId) { + boolean result = false; + try { + ThreadContext.push(COMPRESSION_JOB + job.getId()); + ThreadContext.put(LOGCONTEXTID, logId); + result = nativeBackupService.startBackupCompression(job.getBackupId(), job.getHostId(), zoneId); + } catch (Exception e) { + logger.error("Caught exception [{}] while trying to compress backup [{}].", e.getMessage(), job.getBackupId(), e); + } finally { + processJobResult(job, result); + ThreadContext.clearAll(); + } + } + + private void finalizeBackupCompression(BackupCompressionJobVO job, long zoneId, String logId) { + boolean result = false; + try { + ThreadContext.push(COMPRESSION_JOB + job.getId()); + ThreadContext.put(LOGCONTEXTID, logId); + result = nativeBackupService.finalizeBackupCompression(job.getBackupId(), job.getHostId(), zoneId); + } catch (Exception e) { + logger.error("Caught exception [{}] while trying to finalize backup compression [{}].", e.getMessage(), job.getBackupId(), e); + } finally { + processJobResult(job, result); + ThreadContext.clearAll(); + } + } + + private void processJobResult(BackupCompressionJobVO job, boolean result) { + job.setAttempts(job.getAttempts() + 1); + if (result) { + logger.debug("Compression job [{}] finished with success. Removing it from queue.", job); + job.setRemoved(DateUtil.now()); + backupCompressionJobDao.update(job); + return; + } + + BackupVO backupVO = backupDao.findByIdIncludingRemoved(job.getBackupId()); + if (backupVO.getRemoved() != null) { + logger.debug("Backup [{}] is marked as removed. Will not reschedule the compression job for it.", backupVO); + job.setRemoved(DateUtil.now()); + backupCompressionJobDao.update(job); + return; + } + + HostVO hostVO = hostDao.findById(job.getHostId()); + int maxAttempts = backupCompressionMaxJobRetries.valueIn(hostVO.getClusterId()); + if (job.getAttempts() >= maxAttempts) { + logger.debug("Compression job [{}] reached the maximum amount of attempts [{}]. Removing it from queue.", job, maxAttempts); + job.setRemoved(DateUtil.now()); + backupCompressionJobDao.update(job); + return; + } + + Calendar calendar = Calendar.getInstance(); + calendar.setTime(new Date()); + calendar.add(Calendar.MINUTE, backupCompressionRetryInterval.valueIn(hostVO.getClusterId())); + job.setScheduledStartTime(calendar.getTime()); + job.setStartTime(null); + job.setHostId(null); + logger.debug("Compression job [{}] failed. Scheduling it to retry at [{}].", job, job.getScheduledStartTime()); + backupCompressionJobDao.update(job); + } + + @Override + public String getConfigComponentName() { + return BackupCompressionService.class.getSimpleName(); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[] {backupCompressionMaxConcurrentCompressionsPerHost, backupCompressionMaxJobRetries, backupCompressionRetryInterval, backupCompressionTaskEnabled}; + } +} \ No newline at end of file diff --git a/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java b/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java index ed8391fe0c65..0c07435968f3 100644 --- a/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/backup/BackupManagerImpl.java @@ -35,6 +35,11 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import com.cloud.host.Host; +import com.cloud.storage.VolumeApiService; +import com.cloud.utils.exception.BackupProviderException; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.vm.VirtualMachineManager; import javax.inject.Inject; import javax.naming.ConfigurationException; @@ -53,6 +58,7 @@ import org.apache.cloudstack.api.command.user.backup.CreateBackupScheduleCmd; import org.apache.cloudstack.api.command.user.backup.DeleteBackupCmd; import org.apache.cloudstack.api.command.user.backup.DeleteBackupScheduleCmd; +import org.apache.cloudstack.api.command.user.backup.ListBackupCompressionJobsCmd; import org.apache.cloudstack.api.command.user.backup.ListBackupOfferingsCmd; import org.apache.cloudstack.api.command.user.backup.ListBackupScheduleCmd; import org.apache.cloudstack.api.command.user.backup.ListBackupsCmd; @@ -60,6 +66,9 @@ import org.apache.cloudstack.api.command.user.backup.RestoreBackupCmd; import org.apache.cloudstack.api.command.user.backup.RestoreVolumeFromBackupAndAttachToVMCmd; import org.apache.cloudstack.api.command.user.backup.UpdateBackupScheduleCmd; +import org.apache.cloudstack.api.command.user.backup.nativeoffering.CreateNativeBackupOfferingCmd; +import org.apache.cloudstack.api.command.user.backup.nativeoffering.DeleteNativeBackupOfferingCmd; +import org.apache.cloudstack.api.command.user.backup.nativeoffering.ListNativeBackupOfferingsCmd; import org.apache.cloudstack.api.command.user.backup.repository.AddBackupRepositoryCmd; import org.apache.cloudstack.api.command.user.backup.repository.DeleteBackupRepositoryCmd; import org.apache.cloudstack.api.command.user.backup.repository.ListBackupRepositoriesCmd; @@ -128,7 +137,6 @@ import com.cloud.storage.ScopeType; import com.cloud.storage.Storage; import com.cloud.storage.Volume; -import com.cloud.storage.VolumeApiService; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.GuestOSDao; @@ -160,11 +168,9 @@ import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.fsm.NoTransitionException; import com.cloud.vm.VMInstanceDetailVO; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VirtualMachineManager; import com.cloud.vm.VmDiskInfo; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.dao.VMInstanceDao; @@ -251,6 +257,10 @@ public class BackupManagerImpl extends ManagerBase implements BackupManager { private static Map backupProvidersMap = new HashMap<>(); private List backupProviders; + public static final String KNIB_BACKUP_PROVIDER = "knib"; + + private static List quiesceSupported = List.of("nas", KNIB_BACKUP_PROVIDER); + public AsyncJobDispatcher getAsyncJobDispatcher() { return asyncJobDispatcher; } @@ -639,6 +649,7 @@ public BackupSchedule configureBackupSchedule(CreateBackupScheduleCmd cmd) { final DateUtil.IntervalType intervalType = cmd.getIntervalType(); final String scheduleString = cmd.getSchedule(); final TimeZone timeZone = TimeZone.getTimeZone(cmd.getTimezone()); + boolean isolated = cmd.isIsolated(); if (intervalType == null) { throw new CloudRuntimeException("Invalid interval type provided"); @@ -659,8 +670,12 @@ public BackupSchedule configureBackupSchedule(CreateBackupScheduleCmd cmd) { final int maxBackups = validateAndGetDefaultBackupRetentionIfRequired(cmd.getMaxBackups(), offering, vm); - if (!"nas".equals(offering.getProvider()) && cmd.getQuiesceVM() != null) { - throw new InvalidParameterValueException("Quiesce VM option is supported only for NAS backup provider"); + if (isolated && !KNIB_BACKUP_PROVIDER.equals(offering.getProvider())) { + throw new InvalidParameterValueException("Isolated backups are only supported by KNIB backup provider."); + } + + if (!"nas".equals(offering.getProvider()) && !KNIB_BACKUP_PROVIDER.equals(offering.getProvider()) && cmd.getQuiesceVM() != null) { + throw new InvalidParameterValueException("Quiesce VM option is supported only for NAS and KNIB backup providers."); } final String timezoneId = timeZone.getID(); @@ -677,7 +692,8 @@ public BackupSchedule configureBackupSchedule(CreateBackupScheduleCmd cmd) { final BackupScheduleVO schedule = backupScheduleDao.findByVMAndIntervalType(vmId, intervalType); if (schedule == null) { - return backupScheduleDao.persist(new BackupScheduleVO(vmId, intervalType, scheduleString, timezoneId, nextDateTime, maxBackups, cmd.getQuiesceVM(), vm.getAccountId(), vm.getDomainId())); + return backupScheduleDao.persist(new BackupScheduleVO(vmId, intervalType, scheduleString, timezoneId, nextDateTime, maxBackups, cmd.getQuiesceVM(), vm.getAccountId(), + vm.getDomainId(), isolated)); } schedule.setScheduleType((short) intervalType.ordinal()); @@ -686,6 +702,7 @@ public BackupSchedule configureBackupSchedule(CreateBackupScheduleCmd cmd) { schedule.setScheduledTimestamp(nextDateTime); schedule.setMaxBackups(maxBackups); schedule.setQuiesceVM(cmd.getQuiesceVM()); + schedule.setIsolated(isolated); backupScheduleDao.update(schedule.getId(), schedule); return backupScheduleDao.findById(schedule.getId()); } @@ -838,7 +855,6 @@ protected boolean deleteAllVmBackupSchedules(long vmId) { public boolean createBackup(CreateBackupCmd cmd, Object job) throws ResourceAllocationException { Long vmId = cmd.getVmId(); Account caller = CallContext.current().getCallingAccount(); - final VMInstanceVO vm = findVmById(vmId); validateBackupForZone(vm.getDataCenterId()); accountManager.checkAccess(caller, null, true, vm); @@ -861,8 +877,8 @@ public boolean createBackup(CreateBackupCmd cmd, Object job) throws ResourceAllo throw new CloudRuntimeException("The assigned backup offering does not allow ad-hoc user backup"); } - if (!"nas".equals(offering.getProvider()) && cmd.getQuiesceVM() != null) { - throw new InvalidParameterValueException("Quiesce VM option is supported only for NAS backup provider"); + if (!quiesceSupported.contains(offering.getProvider()) && cmd.getQuiesceVM() != null) { + throw new InvalidParameterValueException("Quiesce VM option is supported only for NAS or KNIB backup providers"); } Long backupScheduleId = getBackupScheduleId(job); @@ -901,7 +917,7 @@ public boolean createBackup(CreateBackupCmd cmd, Object job) throws ResourceAllo vmId, ApiCommandResourceType.VirtualMachine.toString(), true, 0); - Pair result = backupProvider.takeBackup(vm, cmd.getQuiesceVM()); + Pair result = backupProvider.takeBackup(vm, cmd.getQuiesceVM(), cmd.isIsolated()); if (!result.first()) { throw new CloudRuntimeException("Failed to create VM backup"); } @@ -1090,11 +1106,11 @@ public Pair, Integer> listBackups(final ListBackupsCmd cmd) { } public boolean importRestoredVM(long zoneId, long domainId, long accountId, long userId, - String vmInternalName, Hypervisor.HypervisorType hypervisorType, Backup backup) { + String vmInternalName, Hypervisor.HypervisorType hypervisorType, Backup backup, BackupOffering offering) { VirtualMachine vm = null; HypervisorGuru guru = hypervisorGuruManager.getGuru(hypervisorType); try { - vm = guru.importVirtualMachineFromBackup(zoneId, domainId, accountId, userId, vmInternalName, backup); + vm = guru.importVirtualMachineFromBackup(zoneId, domainId, accountId, userId, vmInternalName, backup, getBackupProvider(offering.getProvider())); } catch (final Exception e) { logger.error(String.format("Failed to import VM [vmInternalName: %s] from backup restoration [%s] with hypervisor [type: %s] due to: [%s].", vmInternalName, ReflectionToStringBuilderUtils.reflectOnlySelectedFields(backup, "id", "uuid", "vmId", "externalId", "backupType"), hypervisorType, e.getMessage()), e); @@ -1119,7 +1135,7 @@ public boolean importRestoredVM(long zoneId, long domainId, long accountId, long @Override @ActionEvent(eventType = EventTypes.EVENT_VM_BACKUP_RESTORE, eventDescription = "restoring VM from backup", async = true) - public boolean restoreBackup(final Long backupId) { + public boolean restoreBackup(final Long backupId, boolean quickRestore, Long hostId) { final BackupVO backup = backupDao.findById(backupId); if (backup == null) { throw new CloudRuntimeException("Backup " + backupId + " does not exist"); @@ -1133,38 +1149,62 @@ public boolean restoreBackup(final Long backupId) { if (vm == null || VirtualMachine.State.Expunging.equals(vm.getState())) { throw new CloudRuntimeException("The Instance from which the backup was taken could not be found."); } - accountManager.checkAccess(CallContext.current().getCallingAccount(), null, true, vm); + + Account callerAccount = CallContext.current().getCallingAccount(); + accountManager.checkAccess(callerAccount, null, true, vm); + validateHostIdParameter(hostId, callerAccount); if (vm.getRemoved() == null && !vm.getState().equals(VirtualMachine.State.Stopped) && !vm.getState().equals(VirtualMachine.State.Destroyed)) { throw new CloudRuntimeException("Existing VM should be stopped before being restored from backup"); } - // This is done to handle historic backups if any with Veeam / Networker plugins - List backupVolumes = CollectionUtils.isEmpty(backup.getBackedUpVolumes()) ? - vm.getBackupVolumeList() : backup.getBackedUpVolumes(); - List vmVolumes = volumeDao.findByInstance(vm.getId()); - if (vmVolumes.size() != backupVolumes.size()) { - throw new CloudRuntimeException("Unable to restore VM with the current backup as the backup has different number of disks as the VM"); - } - - BackupOffering offering = backupOfferingDao.findByIdIncludingRemoved(vm.getBackupOfferingId()); - String errorMessage = "Failed to find backup offering of the VM backup."; - if (offering == null) { - logger.warn(errorMessage); - } logger.debug("Attempting to get backup offering from VM backup"); - offering = backupOfferingDao.findByIdIncludingRemoved(backup.getBackupOfferingId()); + BackupOffering offering = backupOfferingDao.findByIdIncludingRemoved(backup.getBackupOfferingId()); if (offering == null) { - throw new CloudRuntimeException(errorMessage); + throw new CloudRuntimeException("Failed to find backup offering of the VM backup."); } + validateBackupVolumes(backup, vm, offering); String backupDetailsInMessage = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(backup, "uuid", "externalId", "vmId", "name"); - tryRestoreVM(backup, vm, offering, backupDetailsInMessage); + tryRestoreVM(backup, vm, offering, backupDetailsInMessage, quickRestore, hostId); + + updateStates(vm, getBackupProvider(offering.getProvider()), quickRestore); + + return importRestoredVM(vm.getDataCenterId(), vm.getDomainId(), vm.getAccountId(), vm.getUserId(), + vm.getInstanceName(), vm.getHypervisorType(), backup, offering); + } + + private void validateHostIdParameter(Long hostId, Account callerAccount) { + if (!accountService.isRootAdmin(callerAccount.getId()) && hostId != null) { + throw new PermissionDeniedException(String.format("Parameter %s can only be specified by a Root Admin", ApiConstants.HOST_ID)); + } + } + + /** + * Updates the VM and volume states. + * If using quick restore, the states should already be set (the VM should be running). + * Only KNIB supports this parameter for now; will do nothing if the backup provider is KNIB and quickRestore is true. + * */ + private void updateStates(VMInstanceVO vm, BackupProvider backupProvider, boolean quickRestore) { + if (KNIB_BACKUP_PROVIDER.equals(backupProvider.getName()) && quickRestore) { + return; + } updateVolumeState(vm, Volume.Event.RestoreSucceeded, Volume.State.Ready); updateVmState(vm, VirtualMachine.Event.RestoringSuccess, VirtualMachine.State.Stopped); + } - return importRestoredVM(vm.getDataCenterId(), vm.getDomainId(), vm.getAccountId(), vm.getUserId(), - vm.getInstanceName(), vm.getHypervisorType(), backup); + protected void validateBackupVolumes(BackupVO backup, VMInstanceVO vm, BackupOffering offering) { + BackupProvider backupProvider = getBackupProvider(offering.getProvider()); + if (KNIB_BACKUP_PROVIDER.equals(backupProvider.getName())) { + return; + } + // This is done to handle historic backups if any with Veeam / Networker plugins + List backupVolumes = CollectionUtils.isEmpty(backup.getBackedUpVolumes()) ? + vm.getBackupVolumeList() : backup.getBackedUpVolumes(); + List vmVolumes = volumeDao.findByInstance(vm.getId()); + if (vmVolumes.size() != backupVolumes.size()) { + throw new CloudRuntimeException("Unable to restore VM with the current backup as the backup has different number of disks as the VM"); + } } /** @@ -1174,7 +1214,7 @@ public boolean restoreBackup(final Long backupId) { * * If restore fails, then update the VM state to {@link VirtualMachine.Event#RestoringFailed}, and its volumes to {@link Volume.Event#RestoreFailed} and throw an {@link CloudRuntimeException}. */ - protected void tryRestoreVM(BackupVO backup, VMInstanceVO vm, BackupOffering offering, String backupDetailsInMessage) { + protected void tryRestoreVM(BackupVO backup, VMInstanceVO vm, BackupOffering offering, String backupDetailsInMessage, boolean quickRestore, Long hostId) { try { updateVmState(vm, VirtualMachine.Event.RestoringRequested, VirtualMachine.State.Restoring); updateVolumeState(vm, Volume.Event.RestoreRequested, Volume.State.Restoring); @@ -1184,7 +1224,7 @@ protected void tryRestoreVM(BackupVO backup, VMInstanceVO vm, BackupOffering off true, 0); final BackupProvider backupProvider = getBackupProvider(offering.getProvider()); - if (!backupProvider.restoreVMFromBackup(vm, backup)) { + if (!backupProvider.restoreVMFromBackup(vm, backup, quickRestore, hostId)) { ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, vm.getAccountId(), EventVO.LEVEL_ERROR, EventTypes.EVENT_VM_BACKUP_RESTORE, String.format("Failed to restore VM %s from backup %s", vm.getInstanceName(), backup.getUuid()), vm.getId(), ApiCommandResourceType.VirtualMachine.toString(),0); @@ -1196,6 +1236,9 @@ protected void tryRestoreVM(BackupVO backup, VMInstanceVO vm, BackupOffering off logger.error(String.format("Failed to restore backup [%s] due to: [%s].", backupDetailsInMessage, e.getMessage()), e); updateVolumeState(vm, Volume.Event.RestoreFailed, Volume.State.Ready); updateVmState(vm, VirtualMachine.Event.RestoringFailed, VirtualMachine.State.Stopped); + if (e instanceof BackupProviderException) { + throw e; + } throw new CloudRuntimeException(String.format("Error restoring VM from backup [%s].", backupDetailsInMessage)); } } @@ -1406,7 +1449,7 @@ public Boolean canCreateInstanceFromBackupAcrossZones(final Long backupId) { } @Override - public boolean restoreBackupToVM(final Long backupId, final Long vmId) throws CloudRuntimeException { + public boolean restoreBackupToVM(final Long backupId, final Long vmId, boolean quickRestore) throws CloudRuntimeException { final BackupVO backup = backupDao.findById(backupId); if (backup == null) { throw new CloudRuntimeException("Backup " + backupId + " does not exist"); @@ -1448,6 +1491,10 @@ public boolean restoreBackupToVM(final Long backupId, final Long vmId) throws Cl throw new CloudRuntimeException("Create instance from backup is not supported by the " + offering.getProvider() + " provider."); } + if (quickRestore && !backupProvider.getName().equals(KNIB_BACKUP_PROVIDER)) { + throw new CloudRuntimeException("Quick restore is only supported by KNIB."); + } + String backupDetailsInMessage = ReflectionToStringBuilderUtils.reflectOnlySelectedFields(backup, "uuid", "externalId", "name"); Pair result = null; Long eventId = null; @@ -1466,7 +1513,7 @@ public boolean restoreBackupToVM(final Long backupId, final Long vmId) throws Cl host = restoreInfo.first().getPrivateIpAddress(); dataStore = restoreInfo.second().getUuid(); } - result = backupProvider.restoreBackupToVM(vm, backup, host, dataStore); + result = backupProvider.restoreBackupToVM(vm, backup, host, dataStore, quickRestore); } catch (Exception e) { logger.error(String.format("Failed to create Instance [%s] from backup [%s] due to: [%s]", vm.getInstanceName(), backupDetailsInMessage, e.getMessage()), e); @@ -1481,8 +1528,7 @@ public boolean restoreBackupToVM(final Long backupId, final Long vmId) throws Cl throw new CloudRuntimeException(error_msg); } - updateVolumeState(vm, Volume.Event.RestoreSucceeded, Volume.State.Ready); - updateVmState(vm, VirtualMachine.Event.RestoringSuccess, VirtualMachine.State.Stopped); + updateStates(vm, backupProvider, quickRestore); ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, vm.getAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_VM_CREATE_FROM_BACKUP, String.format("Successfully created Instance %s from backup %s", vm.getInstanceName(), backup.getUuid()), vm.getId(), ApiCommandResourceType.VirtualMachine.toString(),eventId); @@ -1491,7 +1537,8 @@ public boolean restoreBackupToVM(final Long backupId, final Long vmId) throws Cl @Override @ActionEvent(eventType = EventTypes.EVENT_VM_BACKUP_RESTORE, eventDescription = "restoring VM from backup", async = true) - public boolean restoreBackupVolumeAndAttachToVM(final String backedUpVolumeUuid, final Long backupId, final Long vmId) throws Exception { + public boolean restoreBackupVolumeAndAttachToVM(final String backedUpVolumeUuid, final Long backupId, final Long vmId, boolean isQuickRestore, + Long hostId) throws Exception { if (StringUtils.isEmpty(backedUpVolumeUuid)) { throw new CloudRuntimeException("Invalid volume ID passed"); } @@ -1505,7 +1552,9 @@ public boolean restoreBackupVolumeAndAttachToVM(final String backedUpVolumeUuid, validateBackupForZone(backup.getZoneId()); final VMInstanceVO vm = findVmById(vmId); - accountManager.checkAccess(CallContext.current().getCallingAccount(), null, true, vm); + Account callerAccount = CallContext.current().getCallingAccount(); + accountManager.checkAccess(callerAccount, null, true, vm); + validateHostIdParameter(hostId, callerAccount); if (vm.getBackupOfferingId() != null && !BackupEnableAttachDetachVolumes.value()) { throw new CloudRuntimeException("The selected VM has backups, cannot restore and attach volume to the VM."); @@ -1516,8 +1565,8 @@ public boolean restoreBackupVolumeAndAttachToVM(final String backedUpVolumeUuid, } List volumeInfoList = backup.getBackedUpVolumes(); + final VMInstanceVO vmFromBackup = vmInstanceDao.findByIdIncludingRemoved(backup.getVmId()); if (volumeInfoList == null) { - final VMInstanceVO vmFromBackup = vmInstanceDao.findByIdIncludingRemoved(backup.getVmId()); if (vmFromBackup == null) { throw new CloudRuntimeException("VM reference for the provided VM backup not found"); } else if (vmFromBackup == null || vmFromBackup.getBackupVolumeList() == null) { @@ -1530,19 +1579,26 @@ public boolean restoreBackupVolumeAndAttachToVM(final String backedUpVolumeUuid, throw new CloudRuntimeException("Failed to find volume with Id " + backedUpVolumeUuid + " in the backed-up volumes metadata"); } - accountManager.checkAccess(CallContext.current().getCallingAccount(), null, true, vm); + accountManager.checkAccess(CallContext.current().getCallingAccount(), null, true, vmFromBackup); final BackupOffering offering = backupOfferingDao.findByIdIncludingRemoved(backup.getBackupOfferingId()); if (offering == null) { throw new CloudRuntimeException("Failed to find VM backup offering"); } + if (!StringUtils.equals(KNIB_BACKUP_PROVIDER, offering.getProvider()) && !VirtualMachine.PowerState.PowerOff.equals(vm.getPowerState())) { + throw new CloudRuntimeException(String.format("VM [%s] needs to be powered off to restore the volume [%s].", vm.getUuid(), backedUpVolumeUuid)); + } + BackupProvider backupProvider = getBackupProvider(offering.getProvider()); - VolumeVO backedUpVolume = volumeDao.findByUuid(backedUpVolumeUuid); + VolumeVO backedUpVolume = volumeDao.findByUuidIncludingRemoved(backedUpVolumeUuid); Pair restoreInfo; - if (!"nas".equals(offering.getProvider()) || (backedUpVolume == null)) { - restoreInfo = getRestoreVolumeHostAndDatastore(vm); - } else { + + if ("nas".equals(offering.getProvider()) && backedUpVolume != null) { restoreInfo = getRestoreVolumeHostAndDatastoreForNas(vm, backedUpVolume); + } else if (KNIB_BACKUP_PROVIDER.equals(offering.getProvider())){ + restoreInfo = getRestoreVolumeHostAndDatastoreForKnib(vm, backedUpVolume, isQuickRestore, hostId); + } else { + restoreInfo = getRestoreVolumeHostAndDatastore(vm); } HostVO host = restoreInfo.first(); @@ -1556,21 +1612,21 @@ public boolean restoreBackupVolumeAndAttachToVM(final String backedUpVolumeUuid, String[] hostPossibleValues = {host.getPrivateIpAddress(), host.getName()}; String[] datastoresPossibleValues = {datastore.getUuid(), datastore.getName()}; - Pair result = restoreBackedUpVolume(backupVolumeInfo, backup, backupProvider, hostPossibleValues, datastoresPossibleValues, vm); + Pair result = restoreBackedUpVolume(backupVolumeInfo, backup, backupProvider, hostPossibleValues, datastoresPossibleValues, vm, isQuickRestore); if (BooleanUtils.isFalse(result.first())) { throw new CloudRuntimeException(String.format("Error restoring volume [%s] of VM [%s] to host [%s] using backup provider [%s] due to: [%s].", backedUpVolumeUuid, vm.getUuid(), host.getUuid(), backupProvider.getName(), result.second())); } if (!attachVolumeToVM(vm.getDataCenterId(), result.second(), backupVolumeInfo, - backedUpVolumeUuid, vm, datastore.getUuid(), backup)) { + backedUpVolumeUuid, vm, datastore.getUuid(), backup, backupProvider)) { throw new CloudRuntimeException(String.format("Error attaching volume [%s] to VM [%s].", backedUpVolumeUuid, vm.getUuid())); } return true; } protected Pair restoreBackedUpVolume(final Backup.VolumeInfo backupVolumeInfo, final BackupVO backup, - BackupProvider backupProvider, String[] hostPossibleValues, String[] datastoresPossibleValues, VMInstanceVO vm) { + BackupProvider backupProvider, String[] hostPossibleValues, String[] datastoresPossibleValues, VMInstanceVO vm, boolean quickRestore) { Pair result = new Pair<>(false, ""); for (String hostData : hostPossibleValues) { for (String datastoreData : datastoresPossibleValues) { @@ -1578,7 +1634,7 @@ protected Pair restoreBackedUpVolume(final Backup.VolumeInfo ba backupVolumeInfo.getUuid(), hostData, datastoreData)); try { - result = backupProvider.restoreBackedUpVolume(backup, backupVolumeInfo, hostData, datastoreData, new Pair<>(vm.getName(), vm.getState())); + result = backupProvider.restoreBackedUpVolume(backup, backupVolumeInfo, hostData, datastoreData, new Pair<>(vm.getName(), vm.getState()), vm, quickRestore); if (BooleanUtils.isTrue(result.first())) { return result; @@ -1586,6 +1642,12 @@ protected Pair restoreBackedUpVolume(final Backup.VolumeInfo ba } catch (Exception e) { logger.debug(String.format("Failed to restore volume [UUID: %s], using host [%s] and datastore [%s] due to: [%s].", backupVolumeInfo.getUuid(), hostData, datastoreData, e.getMessage()), e); + if (e instanceof BackupProviderException) { + throw e; + } + if (KNIB_BACKUP_PROVIDER.equals(backupProvider.getName())) { + return result; + } } } } @@ -1651,6 +1713,29 @@ private Pair getRestoreVolumeHostAndDatastoreForNas(VMIns return new Pair<>(hostVO, storagePoolVO); } + private Pair getRestoreVolumeHostAndDatastoreForKnib(VMInstanceVO vm, VolumeVO backedVolume, boolean quickRestore, Long hostId) { + StoragePoolVO storagePool = primaryDataStoreDao.findById(backedVolume.getPoolId()); + if (vm.getHostId() != null) { + hostId = vm.getHostId(); + } else if (hostId == null || !quickRestore) { + if (vm.getLastHostId() != null) { + hostId = vm.getLastHostId(); + } else { + if (storagePool == null) { + throw new InvalidParameterValueException(String.format("Storage pool of volume [%s] was not found.", backedVolume.getUuid())); + } + List listHost = + hostDao.listAllUpAndEnabledNonHAHosts(Host.Type.Routing, storagePool.getClusterId(), storagePool.getPodId(), storagePool.getDataCenterId(), null); + return new Pair<>(listHost.stream().findFirst().orElseThrow(), null); + } + } + if (hostId == null) { + throw new InvalidParameterValueException(String.format("No host found to quick restore VM [%s]. Please check the logs.", vm.getUuid())); + } + + return new Pair<>(hostDao.findById(hostId), storagePool); + } + /** * Find a host from storage pool access */ @@ -1670,14 +1755,14 @@ private HostVO getFirstHostFromStoragePool(StoragePoolVO storagePoolVO) { * Attach volume to VM */ private boolean attachVolumeToVM(Long zoneId, String restoredVolumeLocation, Backup.VolumeInfo backupVolumeInfo, - String volumeUuid, VMInstanceVO vm, String datastoreUuid, Backup backup) throws Exception { + String volumeUuid, VMInstanceVO vm, String datastoreUuid, Backup backup, BackupProvider backupProvider) throws Exception { HypervisorGuru guru = hypervisorGuruManager.getGuru(vm.getHypervisorType()); backupVolumeInfo.setType(Volume.Type.DATADISK); logger.info("Attaching the restored volume {} to VM {}.", () -> ReflectionToStringBuilder.toString(backupVolumeInfo, ToStringStyle.JSON_STYLE), () -> vm); StoragePoolVO pool = primaryDataStoreDao.findByUuid(datastoreUuid); try { - return guru.attachRestoredVolumeToVirtualMachine(zoneId, restoredVolumeLocation, backupVolumeInfo, vm, pool.getId(), backup); + return guru.attachRestoredVolumeToVirtualMachine(zoneId, restoredVolumeLocation, backupVolumeInfo, vm, pool.getId(), backup, backupProvider); } catch (Exception e) { throw new CloudRuntimeException("Error attach restored volume to VM " + vm.getUuid() + " due to: " + e.getMessage()); } @@ -1724,6 +1809,7 @@ public BackupProvider getBackupProvider(final Long zoneId) { return getBackupProvider(name); } + @Override public BackupProvider getBackupProvider(final String name) { if (StringUtils.isEmpty(name)) { throw new CloudRuntimeException("Invalid backup provider name provided"); @@ -1768,6 +1854,10 @@ public List> getCommands() { cmdList.add(ListBackupRepositoriesCmd.class); cmdList.add(CreateVMFromBackupCmd.class); cmdList.add(CreateVMFromBackupCmdByAdmin.class); + cmdList.add(CreateNativeBackupOfferingCmd.class); + cmdList.add(ListNativeBackupOfferingsCmd.class); + cmdList.add(DeleteNativeBackupOfferingCmd.class); + cmdList.add(ListBackupCompressionJobsCmd.class); return cmdList; } @@ -1927,6 +2017,7 @@ public void scheduleBackups() { if (quiesceVm != null) { params.put(ApiConstants.QUIESCE_VM, "" + quiesceVm.toString()); } + params.put(ApiConstants.ISOLATED, String.valueOf(backupSchedule.isIsolated())); params.put("ctxUserId", "1"); params.put("ctxAccountId", "" + vm.getAccountId()); params.put("ctxStartEventId", String.valueOf(eventId)); @@ -2322,6 +2413,10 @@ public BackupResponse createBackupResponse(Backup backup, Boolean listVmDetails) response.setProtectedSize(backup.getProtectedSize()); response.setStatus(backup.getStatus()); response.setIntervalType("MANUAL"); + response.setCompressionStatus(backup.getCompressionStatus()); + if (backup.getUncompressedSize() != null && backup.getUncompressedSize() > 0) { + response.setUncompressedSize(backup.getUncompressedSize()); + } if (backup.getBackupScheduleId() != null) { BackupScheduleVO scheduleVO = backupScheduleDao.findById(backup.getBackupScheduleId()); if (scheduleVO != null) { diff --git a/server/src/main/java/org/apache/cloudstack/backup/NativeBackupOfferingServiceImpl.java b/server/src/main/java/org/apache/cloudstack/backup/NativeBackupOfferingServiceImpl.java new file mode 100644 index 000000000000..fef54c655bea --- /dev/null +++ b/server/src/main/java/org/apache/cloudstack/backup/NativeBackupOfferingServiceImpl.java @@ -0,0 +1,62 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.utils.component.ManagerBase; +import org.apache.cloudstack.api.command.user.backup.nativeoffering.CreateNativeBackupOfferingCmd; +import org.apache.cloudstack.api.command.user.backup.nativeoffering.DeleteNativeBackupOfferingCmd; +import org.apache.cloudstack.backup.dao.BackupOfferingDao; +import org.apache.cloudstack.backup.dao.NativeBackupOfferingDao; + +import javax.inject.Inject; + +public class NativeBackupOfferingServiceImpl extends ManagerBase implements NativeBackupOfferingService { + @Inject + private NativeBackupOfferingDao nativeBackupOfferingDao; + + @Inject + private BackupOfferingDao backupOfferingDao; + + @Override + public NativeBackupOffering createNativeBackupOffering(CreateNativeBackupOfferingCmd cmd) { + NativeBackupOfferingVO offeringVO = new NativeBackupOfferingVO(cmd.getName(), cmd.isCompress(), cmd.isValidate(), cmd.isAllowQuickRestore(), + cmd.isAllowExtractFile(), cmd.getBackupChainSize(), cmd.getCompressionLibrary()); + return nativeBackupOfferingDao.persist(offeringVO); + } + + @Override + public NativeBackupOffering deleteNativeBackupOffering(DeleteNativeBackupOfferingCmd cmd) { + NativeBackupOfferingVO nativeOfferingVO = nativeBackupOfferingDao.findByIdIncludingRemoved(cmd.getId()); + + if (nativeOfferingVO.getRemoved() != null) { + logger.info("Offering [%s] is already deleted."); + return nativeOfferingVO; + } + + BackupOffering offeringVO = backupOfferingDao.findByExternalId(nativeOfferingVO.getUuid(), null); + + if (offeringVO != null) { + throw new InvalidParameterValueException(String.format("Cannot remove a native backup offering that is in use. Currently imported offering is [%s].", + offeringVO.getName())); + } + + nativeBackupOfferingDao.remove(cmd.getId()); + + return nativeBackupOfferingDao.findByIdIncludingRemoved(cmd.getId()); + } +} diff --git a/server/src/main/java/org/apache/cloudstack/backup/NativeBackupServiceImpl.java b/server/src/main/java/org/apache/cloudstack/backup/NativeBackupServiceImpl.java new file mode 100644 index 000000000000..9d0aa86320dc --- /dev/null +++ b/server/src/main/java/org/apache/cloudstack/backup/NativeBackupServiceImpl.java @@ -0,0 +1,276 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +package org.apache.cloudstack.backup; + +import com.cloud.agent.api.Command; +import com.cloud.agent.api.to.DataTO; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.Pair; +import com.cloud.utils.ReflectionUse; +import com.cloud.utils.component.ComponentLifecycleBase; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionLegacy; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineManager; +import com.cloud.vm.VmWork; +import com.cloud.vm.VmWorkDeleteBackup; +import com.cloud.vm.VmWorkJobHandler; +import com.cloud.vm.VmWorkJobHandlerProxy; +import com.cloud.vm.VmWorkRestoreBackup; +import com.cloud.vm.VmWorkRestoreVolumeBackupAndAttach; +import com.cloud.vm.VmWorkTakeBackup; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.snapshot.VMSnapshot; +import org.apache.cloudstack.backup.dao.BackupDao; +import org.apache.cloudstack.backup.dao.BackupDetailsDao; +import org.apache.cloudstack.backup.dao.NativeBackupJoinDao; +import org.apache.cloudstack.backup.dao.NativeBackupStoragePoolDao; +import org.apache.cloudstack.framework.jobs.AsyncJobManager; +import org.apache.cloudstack.jobs.JobInfo; +import org.apache.cloudstack.storage.command.DeleteCommand; +import org.apache.cloudstack.storage.command.RevertSnapshotCommand; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.commons.collections4.CollectionUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import javax.inject.Inject; +import java.util.HashMap; +import java.util.List; + +public class NativeBackupServiceImpl extends ComponentLifecycleBase implements NativeBackupService, VmWorkJobHandler { + protected Logger logger = LogManager.getLogger(getClass()); + + @Inject + private NativeBackupStoragePoolDao nativeBackupStoragePoolDao; + + @Inject + private BackupManager backupManager; + @Inject + private BackupDao backupDao; + @Inject + private AsyncJobManager jobManager; + @Inject + private UserVmDao userVmDao; + @Inject + private VirtualMachineManager virtualMachineManager; + @Inject + private VolumeDao volumeDao; + @Inject + private NativeBackupJoinDao nativeBackupJoinDao; + @Inject + private BackupDetailsDao backupDetailDao; + + private VmWorkJobHandlerProxy jobHandlerProxy = new VmWorkJobHandlerProxy(this); + private HashMap nativeBackupProviderMap = new HashMap<>(); + private List nativeBackupProviders; + + public void setNativeBackupProviders(final List nativeBackupProviders) { + this.nativeBackupProviders = nativeBackupProviders; + } + + @Override + public boolean start() { + super.start(); + + if (nativeBackupProviders != null) { + for (NativeBackupProvider nativeBackupProvider : nativeBackupProviders) { + nativeBackupProviderMap.put(nativeBackupProvider.getName().toLowerCase(), nativeBackupProvider); + } + } + return true; + } + + @Override + public void configureChainInfo(DataTO volumeTo, Command cmd) { + if (!(volumeTo instanceof VolumeObjectTO)) { + return; + } + VolumeObjectTO volumeObjectTO = (VolumeObjectTO) volumeTo; + NativeBackupStoragePoolVO backupDelta = nativeBackupStoragePoolDao.findOneByVolumeId(volumeObjectTO.getVolumeId()); + if (backupDelta == null) { + return; + } + volumeObjectTO.setChainInfo(backupDelta.getBackupDeltaParentPath()); + if (cmd instanceof DeleteCommand) { + ((DeleteCommand) cmd).setDeleteChain(true); + } + if (cmd instanceof RevertSnapshotCommand) { + ((RevertSnapshotCommand) cmd).setDeleteChain(true); + } + logger.debug("Configured chain info for volume [{}]. Set it as [{}].", volumeObjectTO.getUuid(), volumeObjectTO.getChainInfo()); + } + + @Override + public void cleanupBackupMetadata(long volumeId) { + logger.debug("Cleaning up backup metadata for volume [{}].", volumeId); + NativeBackupStoragePoolVO delta = nativeBackupStoragePoolDao.findOneByVolumeId(volumeId); + if (delta == null) { + return; + } + nativeBackupStoragePoolDao.expungeByVolumeId(volumeId); + if (CollectionUtils.isNotEmpty(nativeBackupStoragePoolDao.listByBackupId(delta.getBackupId()))) { + return; + } + + NativeBackupJoinVO joinVO = nativeBackupJoinDao.findById(delta.getBackupId()); + logger.debug("Volume [{}] was the last volume with deltas in backup [{}]. Setting the backup as not current and not END_OF_CHAIN.", volumeId, joinVO.getUuid()); + backupDetailDao.removeDetail(joinVO.getId(), BackupDetailsDao.CURRENT); + if (!joinVO.getEndOfChain()) { + backupDetailDao.persist(new BackupDetailVO(joinVO.getId(), BackupDetailsDao.END_OF_CHAIN, Boolean.TRUE.toString(), true)); + } + } + + + @Override + public void prepareVolumeForDetach(Volume volume, VirtualMachine virtualMachine) { + if (!backupManager.BackupFrameworkEnabled.valueIn(virtualMachine.getDataCenterId())) { + return; + } + + NativeBackupProvider nativeBackupProvider = getNativeBackupProviderForZone(virtualMachine.getDataCenterId()); + if (nativeBackupProvider == null) { + return; + } + nativeBackupProvider.prepareVolumeForDetach(volume, virtualMachine); + } + + @Override + public void prepareVolumeForMigration(Volume volume) { + if (volume.getInstanceId() == null) { + return; + } + VirtualMachine virtualMachine = virtualMachineManager.findById(volume.getInstanceId()); + if (!backupManager.BackupFrameworkEnabled.valueIn(virtualMachine.getDataCenterId())) { + return; + } + NativeBackupProvider nativeBackupProvider = getNativeBackupProviderForZone(volume.getDataCenterId()); + if (nativeBackupProvider == null) { + return; + } + nativeBackupProvider.prepareVolumeForMigration(volume, virtualMachine); + } + + @Override + public void updateVolumeId(long oldVolumeId, long newVolumeId) { + VolumeVO volumeVO = volumeDao.findById(newVolumeId); + if (volumeVO.getInstanceId() == null) { + return; + } + VirtualMachine virtualMachine = virtualMachineManager.findById(volumeVO.getInstanceId()); + if (!backupManager.BackupFrameworkEnabled.valueIn(virtualMachine.getDataCenterId())) { + return; + } + + NativeBackupProvider nativeBackupProvider = getNativeBackupProviderForZone(virtualMachine.getDataCenterId()); + if (nativeBackupProvider == null) { + return; + } + nativeBackupProvider.updateVolumeId(virtualMachine, oldVolumeId, newVolumeId); + } + + @Override + public void prepareVmForSnapshotRevert(VMSnapshot vmSnapshot) { + VirtualMachine virtualMachine = virtualMachineManager.findById(vmSnapshot.getVmId()); + if (!backupManager.BackupFrameworkEnabled.valueIn(virtualMachine.getDataCenterId())) { + return; + } + + NativeBackupProvider nativeBackupProvider = getNativeBackupProviderForZone(virtualMachine.getDataCenterId()); + if (nativeBackupProvider == null) { + return; + } + nativeBackupProvider.prepareVmForSnapshotRevert(vmSnapshot, virtualMachine); + } + + @Override + public boolean startBackupCompression(long backupId, long hostId, long zoneId) { + NativeBackupProvider nativeBackupProvider = getNativeBackupProviderForZone(zoneId); + if (nativeBackupProvider == null) { + return false; + } + return nativeBackupProvider.startBackupCompression(backupId, hostId); + } + + @Override + public boolean finalizeBackupCompression(long backupId, long hostId, long zoneId) { + NativeBackupProvider nativeBackupProvider = getNativeBackupProviderForZone(zoneId); + if (nativeBackupProvider == null) { + return false; + } + return nativeBackupProvider.finalizeBackupCompression(backupId, hostId); + } + + @Override + public Pair handleVmWorkJob(VmWork work) throws Exception { + return jobHandlerProxy.handleVmWorkJob(work); + } + + @ReflectionUse + public Pair orchestrateTakeBackup(VmWorkTakeBackup work) { + BackupVO backupVO = backupDao.findById(work.getBackupId()); + NativeBackupProvider nativeBackupProvider = getNativeBackupProviderForZone(backupVO.getZoneId()); + if (nativeBackupProvider == null) { + return new Pair<>(JobInfo.Status.FAILED, jobManager.marshallResultObject(Boolean.FALSE)); + } + return new Pair<>(JobInfo.Status.SUCCEEDED, jobManager.marshallResultObject(nativeBackupProvider.orchestrateTakeBackup(backupVO, work.isQuiesceVm(), work.isIsolated()))); + } + + @ReflectionUse + public Pair orchestrateDeleteBackup(VmWorkDeleteBackup work) { + BackupVO backupVO = backupDao.findById(work.getBackupId()); + NativeBackupProvider nativeBackupProvider = getNativeBackupProviderForZone(backupVO.getZoneId()); + if (nativeBackupProvider == null) { + return new Pair<>(JobInfo.Status.FAILED, jobManager.marshallResultObject(Boolean.FALSE)); + } + return new Pair<>(JobInfo.Status.SUCCEEDED, jobManager.marshallResultObject(nativeBackupProvider.orchestrateDeleteBackup(backupVO, work.isForced()))); + } + + @ReflectionUse + public Pair orchestrateRestoreVMFromBackup(VmWorkRestoreBackup work) { + BackupVO backupVO = backupDao.findById(work.getBackupId()); + NativeBackupProvider nativeBackupProvider = getNativeBackupProviderForZone(backupVO.getZoneId()); + if (nativeBackupProvider == null) { + return new Pair<>(JobInfo.Status.FAILED, jobManager.marshallResultObject(Boolean.FALSE)); + } + return new Pair<>(JobInfo.Status.SUCCEEDED, jobManager.marshallResultObject(nativeBackupProvider.orchestrateRestoreVMFromBackup(backupVO, + userVmDao.findById(work.getVmId()), work.isQuickRestore(), work.getHostId(), true))); + } + + @ReflectionUse + public Pair orchestrateRestoreBackupVolumeAndAttachToVM(VmWorkRestoreVolumeBackupAndAttach work) { + BackupVO backupVO = backupDao.findById(work.getBackupId()); + NativeBackupProvider nativeBackupProvider = getNativeBackupProviderForZone(backupVO.getZoneId()); + if (nativeBackupProvider == null) { + return new Pair<>(JobInfo.Status.FAILED, jobManager.marshallResultObject(Boolean.FALSE)); + } + return new Pair<>(JobInfo.Status.SUCCEEDED, jobManager.marshallResultObject(nativeBackupProvider.orchestrateRestoreBackedUpVolume(backupVO, userVmDao.findById(work.getVmId()), + work.getBackupVolumeInfo(), work.getHostIp(), work.isQuickRestore()))); + } + + protected NativeBackupProvider getNativeBackupProviderForZone(long zoneId) { + return Transaction.execute(TransactionLegacy.CLOUD_DB, (TransactionCallback)status -> { + BackupProvider backupProvider = backupManager.getBackupProvider(zoneId); + return nativeBackupProviderMap.get(backupProvider.getName()); + }); + } +} \ No newline at end of file diff --git a/server/src/main/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelper.java b/server/src/main/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelper.java index beecf90d2b83..bac4b6316bcd 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelper.java +++ b/server/src/main/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelper.java @@ -27,6 +27,8 @@ import com.cloud.user.AccountVO; import com.cloud.user.dao.AccountDao; import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.backup.BackupVO; +import org.apache.cloudstack.backup.dao.BackupOfferingDao; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; @@ -36,6 +38,7 @@ import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.cloudstack.storage.heuristics.presetvariables.Account; +import org.apache.cloudstack.storage.heuristics.presetvariables.Backup; import org.apache.cloudstack.storage.heuristics.presetvariables.Domain; import org.apache.cloudstack.storage.heuristics.presetvariables.PresetVariables; import org.apache.cloudstack.storage.heuristics.presetvariables.SecondaryStorage; @@ -78,6 +81,9 @@ public class HeuristicRuleHelper { @Inject private DataCenterDao zoneDao; + @Inject + private BackupOfferingDao backupOfferingDao; + /** * Returns the {@link DataStore} object if the zone, specified by the ID, has an active heuristic rule for the given {@link HeuristicType}. * It returns null otherwise. @@ -120,6 +126,10 @@ protected void buildPresetVariables(JsInterpreter jsInterpreter, HeuristicType h presetVariables.setVolume(setVolumePresetVariable((com.cloud.storage.Volume) obj)); accountId = ((com.cloud.storage.Volume) obj).getAccountId(); break; + case BACKUP: + presetVariables.setBackup(setBackupPresetVariable((BackupVO) obj)); + accountId = ((BackupVO) obj).getAccountId(); + break; } presetVariables.setAccount(setAccountPresetVariable(accountId)); presetVariables.setSecondaryStorages(setSecondaryStoragesVariable(zoneId)); @@ -154,6 +164,10 @@ protected void injectPresetVariables(JsInterpreter jsInterpreter, PresetVariable jsInterpreter.injectVariable("volume", presetVariables.getVolume()); } + if (presetVariables.getBackup() != null) { + jsInterpreter.injectVariable("backup", presetVariables.getBackup().toString()); + } + if (presetVariables.getAccount() != null) { jsInterpreter.injectVariable("account", presetVariables.getAccount()); } @@ -211,6 +225,16 @@ protected Snapshot setSnapshotPresetVariable(SnapshotInfo snapshotInfo) { return snapshot; } + protected Backup setBackupPresetVariable(BackupVO backupVO) { + Backup backup = new Backup(); + + backup.setName(backupVO.getName()); + backup.setVirtualSize(backupVO.getProtectedSize()); + backup.setOfferingUuid(backupOfferingDao.findById(backupVO.getBackupOfferingId()).getUuid()); + + return backup; + } + protected Account setAccountPresetVariable(Long accountId) { if (accountId == null) { return null; diff --git a/server/src/main/java/org/apache/cloudstack/storage/heuristics/presetvariables/Backup.java b/server/src/main/java/org/apache/cloudstack/storage/heuristics/presetvariables/Backup.java new file mode 100644 index 000000000000..fd8909e3e786 --- /dev/null +++ b/server/src/main/java/org/apache/cloudstack/storage/heuristics/presetvariables/Backup.java @@ -0,0 +1,40 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.heuristics.presetvariables; + +public class Backup extends GenericHeuristicPresetVariable { + + private Long virtualSize; + + private String offeringUuid; + + public Long getVirtualSize() { + return virtualSize; + } + + public void setVirtualSize(Long virtualSize) { + this.virtualSize = virtualSize; + } + + public String getOfferingUuid() { + return offeringUuid; + } + + public void setOfferingUuid(String offeringUuid) { + this.offeringUuid = offeringUuid; + } +} diff --git a/server/src/main/java/org/apache/cloudstack/storage/heuristics/presetvariables/PresetVariables.java b/server/src/main/java/org/apache/cloudstack/storage/heuristics/presetvariables/PresetVariables.java index d04874953272..099c76b7ccb5 100644 --- a/server/src/main/java/org/apache/cloudstack/storage/heuristics/presetvariables/PresetVariables.java +++ b/server/src/main/java/org/apache/cloudstack/storage/heuristics/presetvariables/PresetVariables.java @@ -30,6 +30,8 @@ public class PresetVariables { private Volume volume; + private Backup backup; + public List getSecondaryStorages() { return secondaryStorages; } @@ -62,6 +64,14 @@ public void setVolume(Volume volume) { this.volume = volume; } + public Backup getBackup() { + return backup; + } + + public void setBackup(Backup backup) { + this.backup = backup; + } + public Account getAccount() { return account; } diff --git a/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml b/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml index b90c40dc95e7..093e272f3a96 100644 --- a/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml +++ b/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml @@ -343,6 +343,8 @@ + + @@ -398,4 +400,8 @@ + + + + diff --git a/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java b/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java index c186083b8ce1..7278135b89a4 100644 --- a/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java +++ b/server/src/test/java/com/cloud/network/as/AutoScaleManagerImplTest.java @@ -1521,13 +1521,14 @@ public void testDoScaleUp() throws ResourceUnavailableException, InsufficientCap when(loadBalancerVMMapMock.getInstanceId()).thenReturn(virtualMachineId + 1); when(loadBalancingRulesService.assignToLoadBalancer(anyLong(), any(), any(), eq(true))).thenReturn(true); - Mockito.doReturn(new Pair>(userVmMock, null)).when(userVmMgr).startVirtualMachine(virtualMachineId, null, new HashMap<>(), null); + Mockito.doReturn(new Pair>(userVmMock, null)).when(userVmMgr).startVirtualMachine(virtualMachineId, null, + new HashMap<>(), null, false); autoScaleManagerImplSpy.doScaleUp(vmGroupId, 1); Mockito.verify(autoScaleManagerImplSpy).createNewVM(asVmGroupMock); Mockito.verify(loadBalancingRulesService).assignToLoadBalancer(anyLong(), any(), any(), eq(true)); - Mockito.verify(userVmMgr).startVirtualMachine(virtualMachineId, null, new HashMap<>(), null); + Mockito.verify(userVmMgr).startVirtualMachine(virtualMachineId, null, new HashMap<>(), null, false); } } diff --git a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java index 0575b430ef10..8ee7896b812f 100644 --- a/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java +++ b/server/src/test/java/com/cloud/storage/VolumeApiServiceImplTest.java @@ -514,44 +514,44 @@ public void testDetachVolumeFromStoppedXenVm() throws NoSuchFieldException, Ille // Negative test - try to attach non-root non-datadisk volume @Test(expected = InvalidParameterValueException.class) public void attachIncorrectDiskType() throws NoSuchFieldException, IllegalAccessException { - volumeApiServiceImpl.attachVolumeToVM(1L, 5L, 0L, false); + volumeApiServiceImpl.attachVolumeToVM(1L, 5L, 0L, false, false); } // Negative test - attach root volume to running vm @Test(expected = InvalidParameterValueException.class) public void attachRootDiskToRunningVm() throws NoSuchFieldException, IllegalAccessException { - volumeApiServiceImpl.attachVolumeToVM(1L, 6L, 0L, false); + volumeApiServiceImpl.attachVolumeToVM(1L, 6L, 0L, false, false); } // Negative test - attach root volume to non-xen vm @Test(expected = InvalidParameterValueException.class) public void attachRootDiskToHyperVm() throws NoSuchFieldException, IllegalAccessException { - volumeApiServiceImpl.attachVolumeToVM(3L, 6L, 0L, false); + volumeApiServiceImpl.attachVolumeToVM(3L, 6L, 0L, false, false); } // Negative test - attach root volume from the managed data store @Test(expected = InvalidParameterValueException.class) public void attachRootDiskOfManagedDataStore() throws NoSuchFieldException, IllegalAccessException { - volumeApiServiceImpl.attachVolumeToVM(2L, 7L, 0L, false); + volumeApiServiceImpl.attachVolumeToVM(2L, 7L, 0L, false, false); } // Negative test - root volume can't be attached to the vm already having a root volume attached @Test(expected = InvalidParameterValueException.class) public void attachRootDiskToVmHavingRootDisk() throws NoSuchFieldException, IllegalAccessException { - volumeApiServiceImpl.attachVolumeToVM(4L, 6L, 0L, false); + volumeApiServiceImpl.attachVolumeToVM(4L, 6L, 0L, false, false); } // Negative test - root volume in uploaded state can't be attached @Test(expected = InvalidParameterValueException.class) public void attachRootInUploadedState() throws NoSuchFieldException, IllegalAccessException { - volumeApiServiceImpl.attachVolumeToVM(2L, 8L, 0L, false); + volumeApiServiceImpl.attachVolumeToVM(2L, 8L, 0L, false, false); } // Positive test - attach ROOT volume in correct state, to the vm not having root volume attached @Test public void attachRootVolumePositive() throws NoSuchFieldException, IllegalAccessException { thrown.expect(NullPointerException.class); - volumeApiServiceImpl.attachVolumeToVM(2L, 6L, 0L, false); + volumeApiServiceImpl.attachVolumeToVM(2L, 6L, 0L, false, false); } // Negative test - attach data volume, to the vm on non-kvm hypervisor @@ -560,7 +560,7 @@ public void attachDiskWithEncryptEnabledOfferingonNonKVM() throws NoSuchFieldExc DiskOfferingVO diskOffering = Mockito.mock(DiskOfferingVO.class); when(diskOffering.getEncrypt()).thenReturn(true); when(_diskOfferingDao.findById(anyLong())).thenReturn(diskOffering); - volumeApiServiceImpl.attachVolumeToVM(2L, 10L, 1L, false); + volumeApiServiceImpl.attachVolumeToVM(2L, 10L, 1L, false, false); } // Positive test - attach data volume, to the vm on kvm hypervisor @@ -570,7 +570,7 @@ public void attachDiskWithEncryptEnabledOfferingOnKVM() throws NoSuchFieldExcept DiskOfferingVO diskOffering = Mockito.mock(DiskOfferingVO.class); when(diskOffering.getEncrypt()).thenReturn(true); when(_diskOfferingDao.findById(anyLong())).thenReturn(diskOffering); - volumeApiServiceImpl.attachVolumeToVM(4L, 10L, 1L, false); + volumeApiServiceImpl.attachVolumeToVM(4L, 10L, 1L, false, false); } // volume not Ready @@ -679,7 +679,7 @@ public void testResourceLimitCheckForUploadedVolume() throws NoSuchFieldExceptio when(_dcDao.findById(anyLong())).thenReturn(zoneWithDisabledLocalStorage); when(zoneWithDisabledLocalStorage.isLocalStorageEnabled()).thenReturn(true); try { - volumeApiServiceImpl.attachVolumeToVM(2L, 9L, null, false); + volumeApiServiceImpl.attachVolumeToVM(2L, 9L, null, false, false); } catch (InvalidParameterValueException e) { Assert.assertEquals(e.getMessage(), ("primary storage resource limit check failed")); } @@ -1310,7 +1310,7 @@ public void validateIfVmHaveBackupsTestExceptionWhenTryToDetachVolumeFromVMWhich try { UserVmVO vm = Mockito.mock(UserVmVO.class); when(vm.getBackupOfferingId()).thenReturn(1l); - volumeApiServiceImpl.checkForBackups(vm, false); + volumeApiServiceImpl.validateIfVmHasBackups(vm, false); } catch (Exception e) { Assert.assertEquals("Unable to detach volume, cannot detach volume from a VM that has backups. First remove the VM from the backup offering or set the global configuration 'backup.enable.attach.detach.of.volumes' to true.", e.getMessage()); } @@ -1321,7 +1321,7 @@ public void validateIfVmHaveBackupsTestExceptionWhenTryToAttachVolumeFromVMWhich try { UserVmVO vm = Mockito.mock(UserVmVO.class); when(vm.getBackupOfferingId()).thenReturn(1l); - volumeApiServiceImpl.checkForBackups(vm, true); + volumeApiServiceImpl.validateIfVmHasBackups(vm, true); } catch (Exception e) { Assert.assertEquals("Unable to attach volume, please specify a VM that does not have any backups or set the global configuration 'backup.enable.attach.detach.of.volumes' to true.", e.getMessage()); } @@ -1331,7 +1331,7 @@ public void validateIfVmHaveBackupsTestExceptionWhenTryToAttachVolumeFromVMWhich public void validateIfVmHaveBackupsTestSuccessWhenVMDontHaveBackupOffering() { UserVmVO vm = Mockito.mock(UserVmVO.class); when(vm.getBackupOfferingId()).thenReturn(null); - volumeApiServiceImpl.checkForBackups(vm, true); + volumeApiServiceImpl.validateIfVmHasBackups(vm, true); } @Test @@ -2113,7 +2113,7 @@ public void testCreateVolumeOnSecondaryForAttachIfNeeded_ExistingVolumeDetermine Mockito.when(primaryDataStoreDaoMock.findById(1L)).thenReturn(destPrimaryStorage); VolumeInfo newVolumeOnPrimaryStorage = Mockito.mock(VolumeInfo.class); try { - Mockito.when(volumeOrchestrationService.createVolumeOnPrimaryStorage(vm, volumeToAttach, vm.getHypervisorType(), destPrimaryStorage)) + Mockito.when(volumeOrchestrationService.createVolumeOnPrimaryStorage(vm, volumeToAttach, vm.getHypervisorType(), destPrimaryStorage, null, null)) .thenReturn(newVolumeOnPrimaryStorage); } catch (NoTransitionException nte) { Assert.fail(nte.getMessage()); @@ -2134,7 +2134,7 @@ public void testCreateVolumeOnPrimaryForAttachIfNeeded_UsesGetPoolForAttach() { VolumeInfo newVolumeOnPrimaryStorage = Mockito.mock(VolumeInfo.class); try { Mockito.when(volumeOrchestrationService.createVolumeOnPrimaryStorage( - vm, volumeToAttach, vm.getHypervisorType(), destPrimaryStorage)) + vm, volumeToAttach, vm.getHypervisorType(), destPrimaryStorage, null, null)) .thenReturn(newVolumeOnPrimaryStorage); } catch (NoTransitionException nte) { Assert.fail(nte.getMessage()); @@ -2166,7 +2166,7 @@ public void testCreateVolumeOnSecondaryForAttachIfNeeded_CreateVolumeFails_Throw Mockito.doReturn(destPrimaryStorage).when(volumeApiServiceImpl) .getSuitablePoolForAllocatedOrUploadedVolumeForAttach(volumeToAttach, vm); try { - Mockito.when(volumeOrchestrationService.createVolumeOnPrimaryStorage(vm, volumeToAttach, vm.getHypervisorType(), destPrimaryStorage)) + Mockito.when(volumeOrchestrationService.createVolumeOnPrimaryStorage(vm, volumeToAttach, vm.getHypervisorType(), destPrimaryStorage, null, null)) .thenThrow(new NoTransitionException("Mocked exception")); } catch (NoTransitionException nte) { Assert.fail(nte.getMessage()); @@ -2202,7 +2202,7 @@ public void testCreateVolumeOnSecondaryForAttachIfNeeded_NoSuitablePool_ReturnSa Assert.assertSame(volumeToAttach, result); try { Mockito.verify(volumeOrchestrationService, Mockito.never()).createVolumeOnPrimaryStorage(Mockito.any(), - Mockito.any(), Mockito.any(), Mockito.any()); + Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any()); } catch (NoTransitionException e) { Assert.fail(); } diff --git a/server/src/test/java/com/cloud/template/TemplateManagerImplTest.java b/server/src/test/java/com/cloud/template/TemplateManagerImplTest.java index 7bdc1e7c6040..d3e6abbbf7b8 100755 --- a/server/src/test/java/com/cloud/template/TemplateManagerImplTest.java +++ b/server/src/test/java/com/cloud/template/TemplateManagerImplTest.java @@ -77,6 +77,7 @@ import org.apache.cloudstack.api.command.user.template.UpdateTemplateCmd; import org.apache.cloudstack.api.command.user.template.UpdateVnfTemplateCmd; import org.apache.cloudstack.api.command.user.userdata.LinkUserDataToTemplateCmd; +import org.apache.cloudstack.backup.dao.BackupOfferingDao; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -1012,6 +1013,11 @@ public SnapshotJoinDao snapshotJoinDao() { } + @Bean + public BackupOfferingDao backupOfferingDao() { + return Mockito.mock(BackupOfferingDao.class); + } + public static class Library implements TypeFilter { @Override public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException { diff --git a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java index 4edafb3a05a8..e36a4775787e 100644 --- a/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java +++ b/server/src/test/java/com/cloud/vm/UserVmManagerImplTest.java @@ -79,6 +79,7 @@ import org.apache.cloudstack.api.command.user.vm.UpdateVMCmd; import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; import org.apache.cloudstack.backup.BackupManager; +import org.apache.cloudstack.backup.BackupProvider; import org.apache.cloudstack.backup.BackupVO; import org.apache.cloudstack.backup.dao.BackupDao; import org.apache.cloudstack.backup.dao.BackupScheduleDao; @@ -414,6 +415,9 @@ public class UserVmManagerImplTest { @Mock SSHKeyPairDao sshKeyPairDao; + @Mock + private BackupProvider backupProviderMock; + @Mock private VMInstanceVO vmInstanceMock; @@ -3632,12 +3636,12 @@ public void testRestoreVMFromBackup() throws ResourceUnavailableException, Insuf when(vm.getState()).thenReturn(VirtualMachine.State.Running); when(vm.getTemplateId()).thenReturn(templateId); - when(backupManager.restoreBackupToVM(backupId, vmId)).thenReturn(true); + when(backupManager.restoreBackupToVM(backupId, vmId, false)).thenReturn(true); Map params = new HashMap<>(); Pair> vmPair = new Pair<>(vm, params); - doReturn(vmPair).when(userVmManagerImpl).startVirtualMachine(anyLong(), isNull(), isNull(), isNull(), anyMap(), isNull()); - doReturn(vmPair).when(userVmManagerImpl).startVirtualMachine(anyLong(), isNull(), isNull(), anyLong(), anyMap(), isNull()); + doReturn(vmPair).when(userVmManagerImpl).startVirtualMachine(anyLong(), isNull(), isNull(), isNull(), anyMap(), isNull(), anyBoolean()); + doReturn(vmPair).when(userVmManagerImpl).startVirtualMachine(anyLong(), isNull(), isNull(), anyLong(), anyMap(), isNull(), anyBoolean()); when(userVmDao.findById(vmId)).thenReturn(vm); when(templateDao.findByIdIncludingRemoved(templateId)).thenReturn(mock(VMTemplateVO.class)); @@ -3645,7 +3649,7 @@ public void testRestoreVMFromBackup() throws ResourceUnavailableException, Insuf assertNotNull(result); assertEquals(vm, result); - Mockito.verify(backupManager).restoreBackupToVM(backupId, vmId); + Mockito.verify(backupManager).restoreBackupToVM(backupId, vmId, false); } @Test diff --git a/server/src/test/java/org/apache/cloudstack/backup/BackupManagerTest.java b/server/src/test/java/org/apache/cloudstack/backup/BackupManagerTest.java index a9c083228e2b..1d5d90f3be66 100644 --- a/server/src/test/java/org/apache/cloudstack/backup/BackupManagerTest.java +++ b/server/src/test/java/org/apache/cloudstack/backup/BackupManagerTest.java @@ -54,6 +54,7 @@ import com.cloud.storage.dao.VolumeDao; import com.cloud.user.Account; import com.cloud.user.AccountManager; +import com.cloud.user.AccountService; import com.cloud.user.AccountVO; import com.cloud.user.DomainManager; import com.cloud.user.ResourceLimitService; @@ -250,6 +251,9 @@ public class BackupManagerTest { @Mock DomainHelper domainHelper; + @Mock + private AccountService accountServiceMock; + private Gson gson; private String[] hostPossibleValues = {"127.0.0.1", "hostname"}; @@ -385,15 +389,16 @@ public void restoreBackedUpVolumeTestHostIpAndDatastoreUuid() { doReturn(new Pair(Boolean.TRUE, "Success")) .when(backupProvider).restoreBackedUpVolume(any(Backup.class), any(Backup.VolumeInfo.class), - any(String.class), any(String.class), any(Pair.class)); + any(String.class), any(String.class), any(Pair.class), any(), any(Boolean.class)); - Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeInfo, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues, vm); + Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeInfo, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues, vm, + false); assertEquals(Boolean.TRUE, restoreBackedUpVolume.first()); assertEquals("Success", restoreBackedUpVolume.second()); verify(backupProvider, atLeastOnce()).restoreBackedUpVolume(any(Backup.class), any(Backup.VolumeInfo.class), - any(String.class), any(String.class), any(Pair.class)); + any(String.class), any(String.class), any(Pair.class), any(), Mockito.anyBoolean()); } @Test @@ -412,15 +417,16 @@ public void restoreBackedUpVolumeTestHostIpAndDatastoreName() { doReturn(new Pair(Boolean.TRUE, "Success2")) .when(backupProvider).restoreBackedUpVolume(any(Backup.class), any(Backup.VolumeInfo.class), - any(String.class), any(String.class), any(Pair.class)); + any(String.class), any(String.class), any(Pair.class), any(), any(Boolean.class)); - Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeInfo, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues, vm); + Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeInfo, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues, vm, + false); assertEquals(Boolean.TRUE, restoreBackedUpVolume.first()); assertEquals("Success2", restoreBackedUpVolume.second()); verify(backupProvider, atLeastOnce()).restoreBackedUpVolume(any(Backup.class), any(Backup.VolumeInfo.class), - any(String.class), any(String.class), any(Pair.class)); + any(String.class), any(String.class), any(Pair.class), any(), Mockito.anyBoolean()); } @Test @@ -439,15 +445,16 @@ public void restoreBackedUpVolumeTestHostNameAndDatastoreUuid() { doReturn(new Pair(Boolean.TRUE, "Success3")) .when(backupProvider).restoreBackedUpVolume(any(Backup.class), any(Backup.VolumeInfo.class), - any(String.class), any(String.class), any(Pair.class)); + any(String.class), any(String.class), any(Pair.class), any(), any(Boolean.class)); - Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeInfo, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues, vm); + Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeInfo, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues, vm, + false); assertEquals(Boolean.TRUE, restoreBackedUpVolume.first()); assertEquals("Success3", restoreBackedUpVolume.second()); verify(backupProvider, atLeastOnce()).restoreBackedUpVolume(any(Backup.class), any(Backup.VolumeInfo.class), - any(String.class), any(String.class), any(Pair.class)); + any(String.class), any(String.class), any(Pair.class), any(), Mockito.anyBoolean()); } @Test @@ -466,15 +473,16 @@ public void restoreBackedUpVolumeTestHostAndDatastoreName() { doReturn(new Pair(Boolean.TRUE, "Success4")) .when(backupProvider).restoreBackedUpVolume(any(Backup.class), any(Backup.VolumeInfo.class), - any(String.class), any(String.class), any(Pair.class)); + any(String.class), any(String.class), any(Pair.class), any(), any(Boolean.class)); - Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeInfo, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues, vm); + Pair restoreBackedUpVolume = backupManager.restoreBackedUpVolume(volumeInfo, backupVO, backupProvider, hostPossibleValues, datastoresPossibleValues, vm, + false); assertEquals(Boolean.TRUE, restoreBackedUpVolume.first()); assertEquals("Success4", restoreBackedUpVolume.second()); verify(backupProvider, atLeastOnce()).restoreBackedUpVolume(any(Backup.class), any(Backup.VolumeInfo.class), - any(String.class), any(String.class), any(Pair.class)); + any(String.class), any(String.class), any(Pair.class), any(), any(Boolean.class)); } @Test @@ -499,9 +507,9 @@ public void tryRestoreVMTestRestoreSucceeded() throws NoTransitionException { Mockito.when(vm.getId()).thenReturn(1L); Mockito.when(offering.getProvider()).thenReturn("veeam"); Mockito.doReturn(backupProvider).when(backupManager).getBackupProvider("veeam"); - Mockito.when(backupProvider.restoreVMFromBackup(vm, backup)).thenReturn(true); + Mockito.when(backupProvider.restoreVMFromBackup(vm, backup, false, null)).thenReturn(true); - backupManager.tryRestoreVM(backup, vm, offering, "Nothing to write here."); + backupManager.tryRestoreVM(backup, vm, offering, "Nothing to write here.", false, null); } } @@ -529,9 +537,9 @@ public void tryRestoreVMTestRestoreFails() throws NoTransitionException { Mockito.when(vm.getId()).thenReturn(1L); Mockito.when(offering.getProvider()).thenReturn("veeam"); Mockito.doReturn(backupProvider).when(backupManager).getBackupProvider("veeam"); - Mockito.when(backupProvider.restoreVMFromBackup(vm, backup)).thenReturn(false); + Mockito.when(backupProvider.restoreVMFromBackup(vm, backup, false, null)).thenReturn(false); try { - backupManager.tryRestoreVM(backup, vm, offering, "Checking message error."); + backupManager.tryRestoreVM(backup, vm, offering, "Checking message error.", false, null); fail("An exception is needed."); } catch (CloudRuntimeException e) { assertEquals("Error restoring VM from backup [Checking message error.].", e.getMessage()); @@ -661,7 +669,7 @@ public void createBackupTestCreateScheduledBackup() throws ResourceAllocationExc when(backup.getId()).thenReturn(backupId); when(backup.getSize()).thenReturn(newBackupSize); when(backupProvider.getName()).thenReturn("testbackupprovider"); - when(backupProvider.takeBackup(vmInstanceVOMock, null)).thenReturn(new Pair<>(true, backup)); + when(backupProvider.takeBackup(vmInstanceVOMock, null, false)).thenReturn(new Pair<>(true, backup)); Map backupProvidersMap = new HashMap<>(); backupProvidersMap.put(backupProvider.getName().toLowerCase(), backupProvider); ReflectionTestUtils.setField(backupManager, "backupProvidersMap", backupProvidersMap); @@ -1297,13 +1305,13 @@ public void testRestoreBackupToVM() throws NoTransitionException { when(rootVolume.getPoolId()).thenReturn(poolId); when(volumeDao.findIncludingRemovedByInstanceAndType(vmId, Volume.Type.ROOT)).thenReturn(List.of(rootVolume)); when(primaryDataStoreDao.findById(poolId)).thenReturn(pool); - when(backupProvider.restoreBackupToVM(vm, backup, null, null)).thenReturn(new Pair<>(true, null)); + when(backupProvider.restoreBackupToVM(vm, backup, null, null, false)).thenReturn(new Pair<>(true, null)); try (MockedStatic utils = Mockito.mockStatic(ActionEventUtils.class)) { - boolean result = backupManager.restoreBackupToVM(backupId, vmId); + boolean result = backupManager.restoreBackupToVM(backupId, vmId, false); assertTrue(result); - verify(backupProvider, times(1)).restoreBackupToVM(vm, backup, null, null); + verify(backupProvider, times(1)).restoreBackupToVM(vm, backup, null, null, false); verify(virtualMachineManager, times(1)).stateTransitTo(vm, VirtualMachine.Event.RestoringRequested, hostId); verify(virtualMachineManager, times(1)).stateTransitTo(vm, VirtualMachine.Event.RestoringSuccess, hostId); } catch (CloudRuntimeException e) { @@ -1353,13 +1361,13 @@ public void testRestoreBackupToVMException() throws NoTransitionException { when(rootVolume.getPoolId()).thenReturn(poolId); when(volumeDao.findIncludingRemovedByInstanceAndType(vmId, Volume.Type.ROOT)).thenReturn(List.of(rootVolume)); when(primaryDataStoreDao.findById(poolId)).thenReturn(pool); - when(backupProvider.restoreBackupToVM(vm, backup, null, null)).thenReturn(new Pair<>(false, null)); + when(backupProvider.restoreBackupToVM(vm, backup, null, null, false)).thenReturn(new Pair<>(false, null)); try (MockedStatic utils = Mockito.mockStatic(ActionEventUtils.class)) { CloudRuntimeException exception = Assert.assertThrows(CloudRuntimeException.class, - () -> backupManager.restoreBackupToVM(backupId, vmId)); + () -> backupManager.restoreBackupToVM(backupId, vmId, false)); - verify(backupProvider, times(1)).restoreBackupToVM(vm, backup, null, null); + verify(backupProvider, times(1)).restoreBackupToVM(vm, backup, null, null, false); verify(virtualMachineManager, times(1)).stateTransitTo(vm, VirtualMachine.Event.RestoringRequested, hostId); verify(virtualMachineManager, times(1)).stateTransitTo(vm, VirtualMachine.Event.RestoringFailed, hostId); } @@ -2013,7 +2021,6 @@ public void testRestoreBackupSuccess() throws NoTransitionException { when(vm.getHypervisorType()).thenReturn(hypervisorType); when(vm.getState()).thenReturn(VirtualMachine.State.Stopped); when(vm.getRemoved()).thenReturn(null); - when(vm.getBackupOfferingId()).thenReturn(offeringId); BackupOfferingVO offering = mock(BackupOfferingVO.class); when(offering.getProvider()).thenReturn("testbackupprovider"); @@ -2022,13 +2029,13 @@ public void testRestoreBackupSuccess() throws NoTransitionException { when(volumeDao.findByInstance(vmId)).thenReturn(Collections.singletonList(volume)); BackupProvider backupProvider = mock(BackupProvider.class); - when(backupProvider.restoreVMFromBackup(vm, backup)).thenReturn(true); + when(backupProvider.restoreVMFromBackup(vm, backup, false, null)).thenReturn(true); when(backupDao.findById(backupId)).thenReturn(backup); when(vmInstanceDao.findByIdIncludingRemoved(vmId)).thenReturn(vm); when(backupOfferingDao.findByIdIncludingRemoved(offeringId)).thenReturn(offering); when(backupManager.getBackupProvider("testbackupprovider")).thenReturn(backupProvider); - doReturn(true).when(backupManager).importRestoredVM(zoneId, domainId, accountId, userId, vmInstanceName, hypervisorType, backup); + doReturn(true).when(backupManager).importRestoredVM(zoneId, domainId, accountId, userId, vmInstanceName, hypervisorType, backup, offering); doNothing().when(backupManager).validateBackupForZone(any()); when(virtualMachineManager.stateTransitTo(any(), any(), any())).thenReturn(true); @@ -2037,14 +2044,14 @@ public void testRestoreBackupSuccess() throws NoTransitionException { Mockito.anyString(), Mockito.anyString(), Mockito.anyLong(), Mockito.anyString(), Mockito.eq(true), Mockito.eq(0))).thenReturn(1L); - boolean result = backupManager.restoreBackup(backupId); + boolean result = backupManager.restoreBackup(backupId, false, null); assertTrue(result); verify(backupDao, times(1)).findById(backupId); verify(vmInstanceDao, times(1)).findByIdIncludingRemoved(vmId); - verify(backupOfferingDao, times(2)).findByIdIncludingRemoved(offeringId); - verify(backupProvider, times(1)).restoreVMFromBackup(vm, backup); - verify(backupManager, times(1)).importRestoredVM(zoneId, domainId, accountId, userId, vmInstanceName, hypervisorType, backup); + verify(backupOfferingDao, times(1)).findByIdIncludingRemoved(offeringId); + verify(backupProvider, times(1)).restoreVMFromBackup(vm, backup, false, null); + verify(backupManager, times(1)).importRestoredVM(zoneId, domainId, accountId, userId, vmInstanceName, hypervisorType, backup, offering); } } @@ -2055,7 +2062,7 @@ public void testRestoreBackupBackupNotFound() { when(backupDao.findById(backupId)).thenReturn(null); CloudRuntimeException exception = Assert.assertThrows(CloudRuntimeException.class, - () -> backupManager.restoreBackup(backupId)); + () -> backupManager.restoreBackup(backupId, false, null)); assertEquals("Backup " + backupId + " does not exist", exception.getMessage()); verify(backupDao, times(1)).findById(backupId); @@ -2072,7 +2079,7 @@ public void testRestoreBackupBackupNotBackedUp() { when(backupDao.findById(backupId)).thenReturn(backup); CloudRuntimeException exception = Assert.assertThrows(CloudRuntimeException.class, - () -> backupManager.restoreBackup(backupId)); + () -> backupManager.restoreBackup(backupId, false, null)); assertEquals("Backup should be in BackedUp state", exception.getMessage()); verify(backupDao, times(1)).findById(backupId); @@ -2098,7 +2105,7 @@ public void testRestoreBackupVmExpunging() { doNothing().when(backupManager).validateBackupForZone(any()); CloudRuntimeException exception = Assert.assertThrows(CloudRuntimeException.class, - () -> backupManager.restoreBackup(backupId)); + () -> backupManager.restoreBackup(backupId, false, null)); assertEquals("The Instance from which the backup was taken could not be found.", exception.getMessage()); verify(backupDao, times(1)).findById(backupId); @@ -2125,7 +2132,7 @@ public void testRestoreBackupVmNotStopped() { doNothing().when(backupManager).validateBackupForZone(any()); CloudRuntimeException exception = Assert.assertThrows(CloudRuntimeException.class, - () -> backupManager.restoreBackup(backupId)); + () -> backupManager.restoreBackup(backupId, false, null)); assertEquals("Existing VM should be stopped before being restored from backup", exception.getMessage()); verify(backupDao, times(1)).findById(backupId); @@ -2156,13 +2163,15 @@ public void testRestoreBackupVolumeMismatch() { when(backupDao.findById(backupId)).thenReturn(backup); when(vmInstanceDao.findByIdIncludingRemoved(vmId)).thenReturn(vm); doNothing().when(backupManager).validateBackupForZone(any()); + doReturn(new BackupOfferingVO()).when(backupOfferingDao).findByIdIncludingRemoved(Mockito.anyLong()); + doReturn(backupProvider).when(backupManager).getBackupProvider(Mockito.nullable(String.class)); try (MockedStatic utils = Mockito.mockStatic(ActionEventUtils.class)) { Mockito.when(ActionEventUtils.onStartedActionEvent(Mockito.anyLong(), Mockito.anyLong(), Mockito.anyString(), Mockito.anyString(), Mockito.anyLong(), Mockito.anyString(), Mockito.eq(true), Mockito.eq(0))).thenReturn(1L); CloudRuntimeException exception = Assert.assertThrows(CloudRuntimeException.class, - () -> backupManager.restoreBackup(backupId)); + () -> backupManager.restoreBackup(backupId, false, null)); assertEquals("Unable to restore VM with the current backup as the backup has different number of disks as the VM", exception.getMessage()); } diff --git a/server/src/test/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelperTest.java b/server/src/test/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelperTest.java index 032e947fdce7..ece7e69f3f76 100644 --- a/server/src/test/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelperTest.java +++ b/server/src/test/java/org/apache/cloudstack/storage/heuristics/HeuristicRuleHelperTest.java @@ -21,6 +21,7 @@ import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VolumeVO; import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.backup.BackupVO; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; @@ -58,6 +59,9 @@ public class HeuristicRuleHelperTest { @Mock VolumeVO volumeVOMock; + @Mock + BackupVO backupVOMock; + @Mock DataStoreManager dataStoreManagerMock; @@ -165,6 +169,21 @@ public void buildPresetVariablesTestWithSnapshotHeuristicTypeShouldSetVolumeAndS Mockito.verify(heuristicRuleHelperSpy, Mockito.times(1)).injectPresetVariables(Mockito.isNull(), Mockito.any(PresetVariables.class)); } + @Test + public void buildPresetVariablesTestWithBackupHeuristicTypeShouldSetBackupAndSecondaryStorageAndAccountPresetVariables() { + Mockito.doNothing().when(heuristicRuleHelperSpy).injectPresetVariables(Mockito.isNull(), Mockito.any(PresetVariables.class)); + Mockito.doReturn(null).when(heuristicRuleHelperSpy).setBackupPresetVariable(Mockito.any(BackupVO.class)); + Mockito.doReturn(null).when(heuristicRuleHelperSpy).setSecondaryStoragesVariable(Mockito.anyLong()); + Mockito.doReturn(null).when(heuristicRuleHelperSpy).setAccountPresetVariable(Mockito.anyLong()); + + heuristicRuleHelperSpy.buildPresetVariables(null, HeuristicType.BACKUP, 1L, backupVOMock); + + Mockito.verify(heuristicRuleHelperSpy, Mockito.times(1)).setBackupPresetVariable(Mockito.any(BackupVO.class)); + Mockito.verify(heuristicRuleHelperSpy, Mockito.times(1)).setSecondaryStoragesVariable(Mockito.anyLong()); + Mockito.verify(heuristicRuleHelperSpy, Mockito.times(1)).setAccountPresetVariable(Mockito.anyLong()); + Mockito.verify(heuristicRuleHelperSpy, Mockito.times(1)).injectPresetVariables(Mockito.isNull(), Mockito.any(PresetVariables.class)); + } + @Test public void interpretHeuristicRuleTestHeuristicRuleDoesNotReturnAStringShouldThrowCloudRuntimeException() { String heuristicRule = "1"; diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index 8dd2fa23169b..102d9f3feb6f 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -57,6 +57,7 @@ import com.cloud.agent.api.ConvertSnapshotCommand; import org.apache.cloudstack.framework.security.keystore.KeystoreManager; import org.apache.cloudstack.storage.NfsMountManagerImpl.PathParser; +import org.apache.cloudstack.storage.command.BackupDeleteAnswer; import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.command.DeleteCommand; @@ -78,6 +79,7 @@ import org.apache.cloudstack.storage.template.UploadEntity; import org.apache.cloudstack.storage.template.UploadManager; import org.apache.cloudstack.storage.template.UploadManagerImpl; +import org.apache.cloudstack.storage.to.BackupDeltaTO; import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; @@ -2140,6 +2142,40 @@ protected Answer deleteSnapshot(final DeleteCommand cmd) { } + private Answer deleteBackup(DeleteCommand cmd) { + BackupDeltaTO deltaTo = (BackupDeltaTO) cmd.getData(); + NfsTO nfs = (NfsTO)deltaTo.getDataStore(); + String parent = getRootDir(nfs.getUrl(), _nfsVersion); + if (!parent.endsWith(File.separator)) { + parent += File.separator; + } + String backupRelativePath = deltaTo.getPath(); + if (backupRelativePath.startsWith(File.separator)) { + backupRelativePath = backupRelativePath.substring(1); + } + + String fullDeltaPath = parent + backupRelativePath; + File deltaFile = new File(fullDeltaPath); + logger.debug("Deleting backup at [{}].", fullDeltaPath); + String deltaDeleteResult = deleteLocalFile(fullDeltaPath); + + String details; + if (deltaDeleteResult != null) { + details = String.format("Failed to delete backup delta [%s] with result [%s]. ", fullDeltaPath, deltaDeleteResult); + logger.warn(details); + return new BackupDeleteAnswer(cmd, false, details); + } + + File deltaDir = deltaFile.getParentFile(); + if (deltaDir.isDirectory() && deltaDir.list().length == 0 && !deltaDir.delete()) { + details = String.format("Unable to delete directory [%s] at path [%s].", deltaDir.getName(), deltaDir.getPath()); + logger.debug(details); + return new BackupDeleteAnswer(cmd, false, details); + } + + return new Answer(cmd, true, null); + } + private String deleteCheckpointIfExists(DataTO obj, String parent) { SnapshotObjectTO snapshotObjectTO = (SnapshotObjectTO) obj; String checkpointPath = snapshotObjectTO.getCheckpointPath(); @@ -2451,6 +2487,8 @@ protected Answer execute(final DeleteCommand cmd) { return deleteVolume(cmd); case SNAPSHOT: return deleteSnapshot(cmd); + case BACKUP: + return deleteBackup(cmd); } return Answer.createUnsupportedCommandAnswer(cmd); } diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index 8bcc5d0a94bf..3c63624b004d 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -599,6 +599,7 @@ "label.community": "Community", "label.complete": "Complete", "label.completed": "Completed", +"label.compressionstatus": "Compression status", "label.compute": "Compute", "label.compute.offerings": "Compute Offerings", "label.compute.offering.for.sharedfs.instance": "Compute Offering for Instance", @@ -1995,6 +1996,7 @@ "label.purge.usage.records.error": "Failed while purging usage records", "label.purpose": "Purpose", "label.qostype": "QoS type", +"label.quickrestore": "Quick Restore", "label.quickview": "Quick view", "label.quiescevm": "Quiesce Instance", "label.quiettime": "Quiet time (in sec)", diff --git a/ui/public/locales/pt_BR.json b/ui/public/locales/pt_BR.json index 59123a5e45c3..e38875ed3d4c 100644 --- a/ui/public/locales/pt_BR.json +++ b/ui/public/locales/pt_BR.json @@ -384,6 +384,7 @@ "label.comments": "Coment\u00e1rios", "label.community": "Comunidade", "label.complete": "Complete", +"label.compressionstatus": "Estado de compress\u00e3o", "label.compute": "Computa\u00e7\u00e3o", "label.compute.offerings": "Oferta de computa\u00e7\u00e3o", "label.configuration": "Configura\u00e7\u00e3o", @@ -1285,6 +1286,7 @@ "label.publicport": "Porta p\u00fablica", "label.purpose": "Prop\u00f3sito", "label.qostype": "Tipo de QoS", +"label.quickrestore": "Restauração R\u00e1pida", "label.quickview": "Visualiza\u00e7\u00e3o r\u00e1pida", "label.quiescevm": "Quiesce VM", "label.quota": "Cota", diff --git a/ui/src/components/view/ListView.vue b/ui/src/components/view/ListView.vue index 66dd6b3db9e6..270ea626673f 100644 --- a/ui/src/components/view/ListView.vue +++ b/ui/src/components/view/ListView.vue @@ -477,6 +477,9 @@ displayText /> + + + + +