From 11c81b09a49d45136c342accc28c50ca279ce5a5 Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Thu, 27 Aug 2020 23:31:24 +0530 Subject: [PATCH 01/12] Storage plugin for Dell EMC PowerFlex/ScaleIO (formerly VxFlexOS) This enables support for PowerFlex/ScaleIO (v3.5 onwards) storage pool as a primary storage in CloudStack Other improvements addressed in addition to PowerFlex/ScaleIO support: - Added support for config drives in host cache for KVM => Changed configuration "vm.configdrive.primarypool.enabled" scope from Global to Zone level => Introduced new zone level configuration "vm.configdrive.force.host.cache.use" (default: false) to force host cache for config drives => Introduced new zone level configuration "vm.configdrive.use.host.cache.on.unsupported.pool" (default: true) to use host cache for config drives when storage pool doesn't support config drive => Added new parameter "host.cache.location" (default: /var/cache/cloud) in KVM agent.properties for specifying the host cache path and create config drives on the "/config" directory on the host cache path => Maintain the config drive location and use it when required on any config drive operation (migrate, delete) - Detect virtual size from the template URL while registering direct download qcow2 (of KVM hypervisor) templates - Updated full deployment destination for preparing the network(s) on VM start - Propagate the direct download certificates uploaded to the newly added KVM hosts - Discover the template size for direct download templates using any available host from the zones specified on template registration => When zones are not specified while registering template, template size discovery is performed using any available host, which is picked up randomly from one of the available zones - Release the VM resources when VM is sync-ed to Stopped state on PowerReportMissing (after graceful period) - Retry VM deployment/start when the host cannot grant access to volume/template - Mark never-used or downloaded templates as Destroyed on deletion, without sending any DeleteCommand => Do not trigger any DeleteCommand for never-used or downloaded templates as these doesn't exist and cannot be deleted from the datastore - Check the router filesystem is writable or not, before performing health checks => Introduce a new test "filesystem.writable.test" to check the filesystem is writable or not => The router health checks keeps the config info at "/var/cache/cloud" and updates the monitor results at "/root" for health checks, both are different partitions. So, test at both the locations. => Added new script: "filesystem_writable_check.py" at /opt/cloud/bin/ to check the filesystem is writable or not - Fixed NPE issue, template is null for DATA disks. Copy template to target storage for ROOT disk (with template id), skip DATA disk(s) --- agent/conf/agent.properties | 3 + .../cloud/agent/api/to/VirtualMachineTO.java | 14 + .../exception/StorageAccessException.java | 32 + .../cloud/network/element/NetworkElement.java | 4 + .../main/java/com/cloud/storage/Storage.java | 1 + .../main/java/com/cloud/storage/Volume.java | 6 + .../com/cloud/vm/VirtualMachineProfile.java | 10 + .../java/com/cloud/vm/VmDetailConstants.java | 2 + .../apache/cloudstack/alert/AlertService.java | 7 +- .../apache/cloudstack/api/ApiConstants.java | 2 + .../admin/offering/CreateDiskOfferingCmd.java | 23 +- .../offering/CreateServiceOfferingCmd.java | 10 +- .../api/response/UserVmResponse.java | 8 + .../java/com/cloud/storage/StorageTest.java | 5 +- client/pom.xml | 5 + .../agent/api/HandleConfigDriveIsoAnswer.java | 55 + .../api/HandleConfigDriveIsoCommand.java | 15 +- .../resource/virtualnetwork/VRScripts.java | 2 + .../VirtualRoutingResource.java | 17 + .../agent/directdownload/CheckUrlCommand.java | 8 +- .../directdownload/DirectDownloadCommand.java | 27 +- .../storage/to/PrimaryDataStoreTO.java | 7 +- .../cloudstack/storage/to/VolumeObjectTO.java | 10 + .../com/cloud/vm/VirtualMachineManager.java | 8 +- .../service/VolumeOrchestrationService.java | 5 +- .../api/storage/DataStoreDriver.java | 5 +- .../api/storage/PrimaryDataStoreDriver.java | 31 + .../api/storage/TemplateDataFactory.java | 4 + .../subsystem/api/storage/TemplateInfo.java | 2 + .../subsystem/api/storage/VolumeInfo.java | 3 + .../subsystem/api/storage/VolumeService.java | 7 +- .../com/cloud/resource/ResourceManager.java | 7 +- .../com/cloud/storage/StorageManager.java | 32 +- .../java/com/cloud/storage/StorageUtil.java | 15 +- .../cloud/vm/VirtualMachineProfileImpl.java | 28 + .../cloud/vm/VirtualMachineManagerImpl.java | 118 +- .../orchestration/VolumeOrchestrator.java | 121 +- .../storage/motion/DataMotionServiceImpl.java | 4 +- ...vmNonManagedStorageDataMotionStrategy.java | 5 + .../StorageSystemDataMotionStrategy.java | 63 +- ...NonManagedStorageSystemDataMotionTest.java | 4 + .../image/TemplateDataFactoryImpl.java | 38 + .../storage/image/TemplateServiceImpl.java | 9 +- .../storage/image/store/TemplateObject.java | 29 + engine/storage/snapshot/pom.xml | 6 + .../snapshot/ScaleIOSnapshotStrategy.java | 93 ++ .../StorageSystemSnapshotStrategy.java | 77 +- .../vmsnapshot/ScaleIOVMSnapshotStrategy.java | 492 ++++++++ ...ngine-storage-snapshot-storage-context.xml | 6 + .../AbstractStoragePoolAllocator.java | 10 +- .../ZoneWideStoragePoolAllocator.java | 7 - .../storage/helper/VMSnapshotHelperImpl.java | 30 + .../image/BaseImageStoreDriverImpl.java | 6 + .../storage/vmsnapshot/VMSnapshotHelper.java | 5 + .../storage/volume/VolumeObject.java | 14 +- .../storage/volume/VolumeServiceImpl.java | 356 +++++- .../download/DirectDownloadService.java | 5 + plugins/hypervisors/kvm/pom.xml | 6 + .../resource/LibvirtComputingResource.java | 51 +- .../kvm/resource/LibvirtStoragePoolDef.java | 4 +- .../resource/LibvirtStoragePoolXMLParser.java | 2 +- .../wrapper/LibvirtCheckUrlCommand.java | 15 +- .../LibvirtGetVolumeStatsCommandWrapper.java | 16 +- ...ibvirtHandleConfigDriveCommandWrapper.java | 126 +- ...virtPrepareForMigrationCommandWrapper.java | 28 +- .../kvm/storage/IscsiAdmStorageAdaptor.java | 8 +- .../kvm/storage/IscsiAdmStoragePool.java | 7 +- .../kvm/storage/KVMStoragePool.java | 4 +- .../kvm/storage/KVMStoragePoolManager.java | 31 +- .../kvm/storage/KVMStorageProcessor.java | 56 +- .../kvm/storage/LibvirtStorageAdaptor.java | 22 +- .../kvm/storage/LibvirtStoragePool.java | 12 +- .../kvm/storage/ManagedNfsStorageAdaptor.java | 3 +- .../kvm/storage/ScaleIOStorageAdaptor.java | 389 +++++++ .../kvm/storage/ScaleIOStoragePool.java | 181 +++ .../kvm/storage/StorageAdaptor.java | 5 +- .../kvm/storage/ScaleIOStoragePoolTest.java | 152 +++ plugins/pom.xml | 1 + .../ElastistorPrimaryDataStoreDriver.java | 30 + .../driver/DateraPrimaryDataStoreDriver.java | 91 +- .../CloudStackPrimaryDataStoreDriverImpl.java | 31 + .../driver/NexentaPrimaryDataStoreDriver.java | 30 + .../SamplePrimaryDataStoreDriverImpl.java | 29 + plugins/storage/volume/scaleio/pom.xml | 55 + .../datastore/api/ProtectionDomain.java | 57 + .../cloudstack/storage/datastore/api/Sdc.java | 138 +++ .../storage/datastore/api/SdcMappingInfo.java | 39 + .../storage/datastore/api/SnapshotDef.java | 48 + .../storage/datastore/api/SnapshotDefs.java | 30 + .../storage/datastore/api/SnapshotGroup.java | 46 + .../storage/datastore/api/StoragePool.java | 75 ++ .../datastore/api/StoragePoolStatistics.java | 85 ++ .../storage/datastore/api/Volume.java | 152 +++ .../datastore/api/VolumeStatistics.java | 53 + .../client/ScaleIOGatewayClient.java | 88 ++ .../client/ScaleIOGatewayClientImpl.java | 1021 +++++++++++++++++ .../driver/ScaleIOPrimaryDataStoreDriver.java | 898 +++++++++++++++ .../ScaleIOPrimaryDataStoreLifeCycle.java | 460 ++++++++ .../provider/ScaleIOHostListener.java | 148 +++ .../ScaleIOPrimaryDatastoreProvider.java | 77 ++ .../storage/datastore/util/ScaleIOUtil.java | 98 ++ .../storage-volume-scaleio/module.properties | 21 + .../spring-storage-volume-scaleio-context.xml | 35 + .../client/ScaleIOGatewayClientImplTest.java | 48 + .../ScaleIOPrimaryDataStoreLifeCycleTest.java | 259 +++++ .../SolidFirePrimaryDataStoreDriver.java | 31 + .../com/cloud/alert/AlertManagerImpl.java | 3 +- .../main/java/com/cloud/api/ApiDBUtils.java | 4 +- .../cloud/api/query/ViewResponseHelper.java | 11 +- .../api/query/dao/UserVmJoinDaoImpl.java | 5 + .../cloud/capacity/CapacityManagerImpl.java | 16 +- .../ConfigurationManagerImpl.java | 34 +- .../deploy/DeploymentPlanningManagerImpl.java | 25 +- .../cloud/hypervisor/HypervisorGuruBase.java | 1 + .../discoverer/LibvirtServerDiscoverer.java | 11 + .../element/ConfigDriveNetworkElement.java | 147 ++- .../VirtualNetworkApplianceManager.java | 2 +- .../VirtualNetworkApplianceManagerImpl.java | 54 +- .../cloud/resource/ResourceManagerImpl.java | 5 +- .../cloud/server/ManagementServerImpl.java | 28 +- .../java/com/cloud/server/StatsCollector.java | 3 +- .../com/cloud/storage/StorageManagerImpl.java | 172 ++- .../cloud/storage/VolumeApiServiceImpl.java | 75 +- .../storage/listener/StoragePoolMonitor.java | 48 +- .../storage/snapshot/SnapshotManagerImpl.java | 2 +- .../template/HypervisorTemplateAdapter.java | 36 +- .../java/com/cloud/vm/UserVmManagerImpl.java | 21 +- .../vm/snapshot/VMSnapshotManagerImpl.java | 64 +- .../download/DirectDownloadManagerImpl.java | 145 ++- .../ConfigDriveNetworkElementTest.java | 6 +- .../resource/MockResourceManagerImpl.java | 2 +- .../vm/snapshot/VMSnapshotManagerTest.java | 20 +- .../resource/NfsSecondaryStorageResource.java | 14 +- .../cloud/bin/filesystem_writable_check.py | 46 + test/integration/plugins/scaleio/README.md | 37 + .../plugins/scaleio/test_scaleio_volumes.py | 834 ++++++++++++++ utils/pom.xml | 1 + .../com/cloud/utils/SerialVersionUID.java | 1 + .../com/cloud/utils/storage/QCOW2Utils.java | 64 ++ 139 files changed, 8688 insertions(+), 459 deletions(-) create mode 100644 api/src/main/java/com/cloud/exception/StorageAccessException.java create mode 100644 core/src/main/java/com/cloud/agent/api/HandleConfigDriveIsoAnswer.java create mode 100644 engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/ScaleIOSnapshotStrategy.java create mode 100644 engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java create mode 100644 plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java create mode 100644 plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePool.java create mode 100644 plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePoolTest.java create mode 100644 plugins/storage/volume/scaleio/pom.xml create mode 100644 plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/ProtectionDomain.java create mode 100644 plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/Sdc.java create mode 100644 plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SdcMappingInfo.java create mode 100644 plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SnapshotDef.java create mode 100644 plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SnapshotDefs.java create mode 100644 plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SnapshotGroup.java create mode 100644 plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/StoragePool.java create mode 100644 plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/StoragePoolStatistics.java create mode 100644 plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/Volume.java create mode 100644 plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/VolumeStatistics.java create mode 100644 plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClient.java create mode 100644 plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java create mode 100644 plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java create mode 100644 plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java create mode 100644 plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java create mode 100644 plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOPrimaryDatastoreProvider.java create mode 100644 plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/util/ScaleIOUtil.java create mode 100755 plugins/storage/volume/scaleio/src/main/resources/META-INF/cloudstack/storage-volume-scaleio/module.properties create mode 100755 plugins/storage/volume/scaleio/src/main/resources/META-INF/cloudstack/storage-volume-scaleio/spring-storage-volume-scaleio-context.xml create mode 100644 plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImplTest.java create mode 100644 plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java create mode 100644 systemvm/debian/opt/cloud/bin/filesystem_writable_check.py create mode 100644 test/integration/plugins/scaleio/README.md create mode 100644 test/integration/plugins/scaleio/test_scaleio_volumes.py diff --git a/agent/conf/agent.properties b/agent/conf/agent.properties index 325e12da5e2b..06d8f3f2a1ef 100644 --- a/agent/conf/agent.properties +++ b/agent/conf/agent.properties @@ -143,6 +143,9 @@ hypervisor.type=kvm # This parameter specifies a directory on the host local storage for temporary storing direct download templates #direct.download.temporary.download.location=/var/lib/libvirt/images +# This parameter specifies a directory on the host local storage for creating and hosting the config drives +#host.cache.location=/var/cache/cloud + # set the rolling maintenance hook scripts directory #rolling.maintenance.hooks.dir=/etc/cloudstack/agent/hooks.d diff --git a/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java b/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java index efc735ccecf0..c4729383dd4f 100644 --- a/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java @@ -20,6 +20,7 @@ import java.util.Map; import java.util.HashMap; +import com.cloud.network.element.NetworkElement; import com.cloud.template.VirtualMachineTemplate.BootloaderType; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.Type; @@ -73,6 +74,7 @@ public class VirtualMachineTO { String configDriveLabel = null; String configDriveIsoRootFolder = null; String configDriveIsoFile = null; + NetworkElement.Location configDriveLocation = NetworkElement.Location.SECONDARY; Double cpuQuotaPercentage = null; @@ -349,6 +351,18 @@ public void setConfigDriveIsoFile(String configDriveIsoFile) { this.configDriveIsoFile = configDriveIsoFile; } + public boolean isConfigDriveOnHostCache() { + return (this.configDriveLocation == NetworkElement.Location.HOST); + } + + public NetworkElement.Location getConfigDriveLocation() { + return configDriveLocation; + } + + public void setConfigDriveLocation(NetworkElement.Location configDriveLocation) { + this.configDriveLocation = configDriveLocation; + } + public Map getGuestOsDetails() { return guestOsDetails; } diff --git a/api/src/main/java/com/cloud/exception/StorageAccessException.java b/api/src/main/java/com/cloud/exception/StorageAccessException.java new file mode 100644 index 000000000000..eefbcf5518a3 --- /dev/null +++ b/api/src/main/java/com/cloud/exception/StorageAccessException.java @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.exception; + +import com.cloud.utils.SerialVersionUID; + +/** + * If the cause is due to storage pool not accessible on host, calling + * problem with. + * + */ +public class StorageAccessException extends RuntimeException { + private static final long serialVersionUID = SerialVersionUID.StorageAccessException; + + public StorageAccessException(String message) { + super(message); + } +} diff --git a/api/src/main/java/com/cloud/network/element/NetworkElement.java b/api/src/main/java/com/cloud/network/element/NetworkElement.java index 951732f727cd..fa67575edd35 100644 --- a/api/src/main/java/com/cloud/network/element/NetworkElement.java +++ b/api/src/main/java/com/cloud/network/element/NetworkElement.java @@ -39,6 +39,10 @@ */ public interface NetworkElement extends Adapter { + enum Location { + SECONDARY, PRIMARY, HOST + } + Map> getCapabilities(); /** diff --git a/api/src/main/java/com/cloud/storage/Storage.java b/api/src/main/java/com/cloud/storage/Storage.java index 7a229b676746..362cc2cac296 100644 --- a/api/src/main/java/com/cloud/storage/Storage.java +++ b/api/src/main/java/com/cloud/storage/Storage.java @@ -135,6 +135,7 @@ public static enum StoragePoolType { OCFS2(true, false), SMB(true, false), Gluster(true, false), + PowerFlex(true, true), // Dell EMC PowerFlex/ScaleIO (formerly VxFlexOS) ManagedNFS(true, false), DatastoreCluster(true, true); // for VMware, to abstract pool of clusters diff --git a/api/src/main/java/com/cloud/storage/Volume.java b/api/src/main/java/com/cloud/storage/Volume.java index 5979697b5554..9036fa5d6c46 100644 --- a/api/src/main/java/com/cloud/storage/Volume.java +++ b/api/src/main/java/com/cloud/storage/Volume.java @@ -29,6 +29,11 @@ import com.cloud.utils.fsm.StateObject; public interface Volume extends ControlledEntity, Identity, InternalIdentity, BasedOn, StateObject, Displayable { + + // Managed storage volume parameters (specified in the compute/disk offering for PowerFlex) + String BANDWIDTH_LIMIT_IN_MBPS = "bandwidthLimitInMbps"; + String IOPS_LIMIT = "iopsLimit"; + enum Type { UNKNOWN, ROOT, SWAP, DATADISK, ISO }; @@ -79,6 +84,7 @@ public String getDescription() { s_fsm.addTransition(new StateMachine2.Transition(Creating, Event.OperationSucceeded, Ready, null)); s_fsm.addTransition(new StateMachine2.Transition(Creating, Event.DestroyRequested, Destroy, null)); s_fsm.addTransition(new StateMachine2.Transition(Creating, Event.CreateRequested, Creating, null)); + s_fsm.addTransition(new StateMachine2.Transition(Ready, Event.CreateRequested, Creating, null)); s_fsm.addTransition(new StateMachine2.Transition(Ready, Event.ResizeRequested, Resizing, null)); s_fsm.addTransition(new StateMachine2.Transition(Resizing, Event.OperationSucceeded, Ready, Arrays.asList(new StateMachine2.Transition.Impact[]{StateMachine2.Transition.Impact.USAGE}))); s_fsm.addTransition(new StateMachine2.Transition(Resizing, Event.OperationFailed, Ready, null)); diff --git a/api/src/main/java/com/cloud/vm/VirtualMachineProfile.java b/api/src/main/java/com/cloud/vm/VirtualMachineProfile.java index c17a716666d4..f87939a13f4b 100644 --- a/api/src/main/java/com/cloud/vm/VirtualMachineProfile.java +++ b/api/src/main/java/com/cloud/vm/VirtualMachineProfile.java @@ -20,7 +20,9 @@ import java.util.Map; import com.cloud.agent.api.to.DiskTO; +import com.cloud.host.Host; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.network.element.NetworkElement; import com.cloud.offering.ServiceOffering; import com.cloud.template.VirtualMachineTemplate; import com.cloud.template.VirtualMachineTemplate.BootloaderType; @@ -54,6 +56,10 @@ public interface VirtualMachineProfile { void setConfigDriveIsoFile(String isoFile); + NetworkElement.Location getConfigDriveLocation(); + + void setConfigDriveLocation(NetworkElement.Location location); + public static class Param { public static final Param VmPassword = new Param("VmPassword"); @@ -100,6 +106,10 @@ public boolean equals(Object obj) { } } + Long getHostId(); + + void setHost(Host host); + String getHostName(); String getInstanceName(); diff --git a/api/src/main/java/com/cloud/vm/VmDetailConstants.java b/api/src/main/java/com/cloud/vm/VmDetailConstants.java index 9991e1f35b4f..64de93900c98 100644 --- a/api/src/main/java/com/cloud/vm/VmDetailConstants.java +++ b/api/src/main/java/com/cloud/vm/VmDetailConstants.java @@ -56,6 +56,8 @@ public interface VmDetailConstants { String PASSWORD = "password"; String ENCRYPTED_PASSWORD = "Encrypted.Password"; + String CONFIG_DRIVE_LOCATION = "configDriveLocation"; + // VM import with nic, disk and custom params for custom compute offering String NIC = "nic"; String NETWORK = "network"; diff --git a/api/src/main/java/org/apache/cloudstack/alert/AlertService.java b/api/src/main/java/org/apache/cloudstack/alert/AlertService.java index 26c3f3cf3ab4..c2cd1b22332e 100644 --- a/api/src/main/java/org/apache/cloudstack/alert/AlertService.java +++ b/api/src/main/java/org/apache/cloudstack/alert/AlertService.java @@ -16,12 +16,12 @@ // under the License. package org.apache.cloudstack.alert; -import com.cloud.capacity.Capacity; -import com.cloud.exception.InvalidParameterValueException; - import java.util.HashSet; import java.util.Set; +import com.cloud.capacity.Capacity; +import com.cloud.exception.InvalidParameterValueException; + public interface AlertService { public static class AlertType { private static Set defaultAlertTypes = new HashSet(); @@ -69,6 +69,7 @@ private AlertType(short type, String name, boolean isDefault) { public static final AlertType ALERT_TYPE_OOBM_AUTH_ERROR = new AlertType((short)29, "ALERT.OOBM.AUTHERROR", true); public static final AlertType ALERT_TYPE_HA_ACTION = new AlertType((short)30, "ALERT.HA.ACTION", true); public static final AlertType ALERT_TYPE_CA_CERT = new AlertType((short)31, "ALERT.CA.CERT", true); + public static final AlertType ALERT_TYPE_VM_SNAPSHOT = new AlertType((short)32, "ALERT.VM.SNAPSHOT", true); public short getType() { return type; diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 5c3050c85ae5..6d1cda92fe72 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -834,6 +834,8 @@ public class ApiConstants { public static final String TEMPLATETYPE = "templatetype"; public static final String SOURCETEMPLATEID = "sourcetemplateid"; + public static final String POOL_TYPE ="pooltype"; + public enum BootType { UEFI, BIOS; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java index a830777031f7..e7b46be0040f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java @@ -16,8 +16,11 @@ // under the License. package org.apache.cloudstack.api.command.admin.offering; +import java.util.Collection; +import java.util.HashMap; import java.util.LinkedHashSet; import java.util.List; +import java.util.Map; import java.util.Set; import org.apache.cloudstack.api.APICommand; @@ -31,6 +34,7 @@ import org.apache.cloudstack.api.response.VsphereStoragePoliciesResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.collections.MapUtils; import org.apache.log4j.Logger; import com.cloud.offering.DiskOffering; @@ -155,7 +159,10 @@ public class CreateDiskOfferingCmd extends BaseCmd { @Parameter(name = ApiConstants.STORAGE_POLICY, type = CommandType.UUID, entityType = VsphereStoragePoliciesResponse.class,required = false, description = "Name of the storage policy defined at vCenter, this is applicable only for VMware", since = "4.15") private Long storagePolicy; -///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.DETAILS, type = CommandType.MAP, description = "details to specify disk offering parameters", since = "4.16") + private Map details; + + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -277,6 +284,20 @@ public String getCacheMode() { return cacheMode; } + public Map getDetails() { + Map detailsMap = new HashMap<>(); + if (MapUtils.isNotEmpty(details)) { + Collection props = details.values(); + for (Object prop : props) { + HashMap detail = (HashMap) prop; + for (Map.Entry entry: detail.entrySet()) { + detailsMap.put(entry.getKey(),entry.getValue()); + } + } + } + return detailsMap; + } + public Long getStoragePolicy() { return storagePolicy; } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java index 32194227448e..d2d6f387744c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java @@ -321,7 +321,15 @@ public Map getDetails() { Collection props = details.values(); for (Object prop : props) { HashMap detail = (HashMap) prop; - detailsMap.put(detail.get("key"), detail.get("value")); + // Compatibility with key and value pairs input from API cmd for details map parameter + if (!Strings.isNullOrEmpty(detail.get("key")) && !Strings.isNullOrEmpty(detail.get("value"))) { + detailsMap.put(detail.get("key"), detail.get("value")); + continue; + } + + for (Map.Entry entry: detail.entrySet()) { + detailsMap.put(entry.getKey(),entry.getValue()); + } } } return detailsMap; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/UserVmResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/UserVmResponse.java index 44eaba7f313f..7204d5a60f72 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/UserVmResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/UserVmResponse.java @@ -310,6 +310,10 @@ public class UserVmResponse extends BaseResponseWithTagInformation implements Co @Param(description = "Guest vm Boot Type") private String bootType; + @SerializedName(ApiConstants.POOL_TYPE) + @Param(description = "the pool type of the virtual machine", since = "4.16") + private String poolType; + public UserVmResponse() { securityGroupList = new LinkedHashSet(); nics = new LinkedHashSet(); @@ -901,4 +905,8 @@ public String getOsDisplayName() { public String getBootMode() { return bootMode; } public void setBootMode(String bootMode) { this.bootMode = bootMode; } + + public String getPoolType() { return poolType; } + + public void setPoolType(String poolType) { this.poolType = poolType; } } diff --git a/api/src/test/java/com/cloud/storage/StorageTest.java b/api/src/test/java/com/cloud/storage/StorageTest.java index 61909e72e96e..bf451696260b 100644 --- a/api/src/test/java/com/cloud/storage/StorageTest.java +++ b/api/src/test/java/com/cloud/storage/StorageTest.java @@ -16,11 +16,12 @@ // under the License. package com.cloud.storage; -import com.cloud.storage.Storage.StoragePoolType; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import com.cloud.storage.Storage.StoragePoolType; + public class StorageTest { @Before public void setUp() { @@ -37,6 +38,7 @@ public void isSharedStoragePool() { Assert.assertFalse(StoragePoolType.LVM.isShared()); Assert.assertTrue(StoragePoolType.CLVM.isShared()); Assert.assertTrue(StoragePoolType.RBD.isShared()); + Assert.assertTrue(StoragePoolType.PowerFlex.isShared()); Assert.assertTrue(StoragePoolType.SharedMountPoint.isShared()); Assert.assertTrue(StoragePoolType.VMFS.isShared()); Assert.assertTrue(StoragePoolType.PreSetup.isShared()); @@ -59,6 +61,7 @@ public void supportsOverprovisioningStoragePool() { Assert.assertFalse(StoragePoolType.LVM.supportsOverProvisioning()); Assert.assertFalse(StoragePoolType.CLVM.supportsOverProvisioning()); Assert.assertTrue(StoragePoolType.RBD.supportsOverProvisioning()); + Assert.assertTrue(StoragePoolType.PowerFlex.supportsOverProvisioning()); Assert.assertFalse(StoragePoolType.SharedMountPoint.supportsOverProvisioning()); Assert.assertTrue(StoragePoolType.VMFS.supportsOverProvisioning()); Assert.assertTrue(StoragePoolType.PreSetup.supportsOverProvisioning()); diff --git a/client/pom.xml b/client/pom.xml index 45474369def5..904b98b762d4 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -87,6 +87,11 @@ cloud-plugin-storage-volume-datera ${project.version} + + org.apache.cloudstack + cloud-plugin-storage-volume-scaleio + ${project.version} + org.apache.cloudstack cloud-server diff --git a/core/src/main/java/com/cloud/agent/api/HandleConfigDriveIsoAnswer.java b/core/src/main/java/com/cloud/agent/api/HandleConfigDriveIsoAnswer.java new file mode 100644 index 000000000000..769f886cc046 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/HandleConfigDriveIsoAnswer.java @@ -0,0 +1,55 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +import com.cloud.network.element.NetworkElement; +import com.cloud.utils.exception.ExceptionUtil; + +public class HandleConfigDriveIsoAnswer extends Answer { + + @LogLevel(LogLevel.Log4jLevel.Off) + private NetworkElement.Location location = NetworkElement.Location.SECONDARY; + + public HandleConfigDriveIsoAnswer(final HandleConfigDriveIsoCommand cmd) { + super(cmd); + } + + public HandleConfigDriveIsoAnswer(final HandleConfigDriveIsoCommand cmd, final NetworkElement.Location location) { + super(cmd); + this.location = location; + } + + public HandleConfigDriveIsoAnswer(final HandleConfigDriveIsoCommand cmd, final NetworkElement.Location location, final String details) { + super(cmd, true, details); + this.location = location; + } + + public HandleConfigDriveIsoAnswer(final HandleConfigDriveIsoCommand cmd, final String details) { + super(cmd, false, details); + } + + public HandleConfigDriveIsoAnswer(final HandleConfigDriveIsoCommand cmd, final Exception e) { + this(cmd, ExceptionUtil.toString(e)); + } + + public NetworkElement.Location getConfigDriveLocation() { + return location; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/HandleConfigDriveIsoCommand.java b/core/src/main/java/com/cloud/agent/api/HandleConfigDriveIsoCommand.java index 3d8d8f7e10e3..062274f264ef 100644 --- a/core/src/main/java/com/cloud/agent/api/HandleConfigDriveIsoCommand.java +++ b/core/src/main/java/com/cloud/agent/api/HandleConfigDriveIsoCommand.java @@ -25,16 +25,19 @@ public class HandleConfigDriveIsoCommand extends Command { @LogLevel(LogLevel.Log4jLevel.Off) private String isoData; - private String isoFile; private boolean create = false; private DataStoreTO destStore; + private boolean useHostCacheOnUnsupportedPool = false; + private boolean preferHostCache = false; - public HandleConfigDriveIsoCommand(String isoFile, String isoData, DataStoreTO destStore, boolean create) { + public HandleConfigDriveIsoCommand(String isoFile, String isoData, DataStoreTO destStore, boolean useHostCacheOnUnsupportedPool, boolean preferHostCache, boolean create) { this.isoFile = isoFile; this.isoData = isoData; this.destStore = destStore; this.create = create; + this.useHostCacheOnUnsupportedPool = useHostCacheOnUnsupportedPool; + this.preferHostCache = preferHostCache; } @Override @@ -57,4 +60,12 @@ public DataStoreTO getDestStore() { public String getIsoFile() { return isoFile; } + + public boolean isHostCachePreferred() { + return preferHostCache; + } + + public boolean getUseHostCacheOnUnsupportedPool() { + return useHostCacheOnUnsupportedPool; + } } diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VRScripts.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VRScripts.java index f8cf6d451b84..834a11c8d6eb 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VRScripts.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VRScripts.java @@ -75,4 +75,6 @@ public class VRScripts { public static final String DIAGNOSTICS = "diagnostics.py"; public static final String RETRIEVE_DIAGNOSTICS = "get_diagnostics_files.py"; public static final String VR_FILE_CLEANUP = "cleanup.sh"; + + public static final String ROUTER_FILESYSTEM_WRITABLE_CHECK = "filesystem_writable_check.py"; } \ No newline at end of file diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java index 8f4670d6e5da..30293a1f84ab 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java @@ -44,6 +44,7 @@ import org.apache.cloudstack.diagnostics.PrepareFilesAnswer; import org.apache.cloudstack.diagnostics.PrepareFilesCommand; import org.apache.cloudstack.utils.security.KeyStoreUtils; +import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; import org.joda.time.Duration; @@ -310,6 +311,22 @@ private GetRouterMonitorResultsAnswer parseLinesForHealthChecks(GetRouterMonitor private GetRouterMonitorResultsAnswer execute(GetRouterMonitorResultsCommand cmd) { String routerIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP); + ExecutionResult fsReadOnlyResult = _vrDeployer.executeInVR(routerIp, VRScripts.ROUTER_FILESYSTEM_WRITABLE_CHECK, null); + if (!fsReadOnlyResult.isSuccess()) { + s_logger.warn("Result of " + cmd + " failed with details: " + fsReadOnlyResult.getDetails()); + if (StringUtils.isNotBlank(fsReadOnlyResult.getDetails())) { + final String readOnlyFileSystemError = "Read-only file system"; + if (fsReadOnlyResult.getDetails().contains(readOnlyFileSystemError)) { + return new GetRouterMonitorResultsAnswer(cmd, false, null, readOnlyFileSystemError); + } else { + return new GetRouterMonitorResultsAnswer(cmd, false, null, fsReadOnlyResult.getDetails()); + } + } else { + s_logger.warn("Result of " + cmd + " received empty details."); + return new GetRouterMonitorResultsAnswer(cmd, false, null, "No results available."); + } + } + String args = cmd.shouldPerformFreshChecks() ? "true" : "false"; s_logger.info("Fetching health check result for " + routerIp + " and executing fresh checks: " + args); ExecutionResult result = _vrDeployer.executeInVR(routerIp, VRScripts.ROUTER_MONITOR_RESULTS, args); diff --git a/core/src/main/java/org/apache/cloudstack/agent/directdownload/CheckUrlCommand.java b/core/src/main/java/org/apache/cloudstack/agent/directdownload/CheckUrlCommand.java index ed499974f5a8..e8618d54209f 100644 --- a/core/src/main/java/org/apache/cloudstack/agent/directdownload/CheckUrlCommand.java +++ b/core/src/main/java/org/apache/cloudstack/agent/directdownload/CheckUrlCommand.java @@ -23,14 +23,20 @@ public class CheckUrlCommand extends Command { + private String format; private String url; + public String getFormat() { + return format; + } + public String getUrl() { return url; } - public CheckUrlCommand(final String url) { + public CheckUrlCommand(final String format,final String url) { super(); + this.format = format; this.url = url; } diff --git a/core/src/main/java/org/apache/cloudstack/agent/directdownload/DirectDownloadCommand.java b/core/src/main/java/org/apache/cloudstack/agent/directdownload/DirectDownloadCommand.java index aafcb5370a57..7e1ff0b34c40 100644 --- a/core/src/main/java/org/apache/cloudstack/agent/directdownload/DirectDownloadCommand.java +++ b/core/src/main/java/org/apache/cloudstack/agent/directdownload/DirectDownloadCommand.java @@ -23,6 +23,9 @@ import org.apache.cloudstack.storage.command.StorageSubSystemCommand; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.storage.to.TemplateObjectTO; + +import com.cloud.storage.Storage; public abstract class DirectDownloadCommand extends StorageSubSystemCommand { @@ -32,6 +35,7 @@ public enum DownloadProtocol { private String url; private Long templateId; + private TemplateObjectTO destData; private PrimaryDataStoreTO destPool; private String checksum; private Map headers; @@ -39,11 +43,12 @@ public enum DownloadProtocol { private Integer soTimeout; private Integer connectionRequestTimeout; private Long templateSize; - private boolean iso; + private Storage.ImageFormat format; protected DirectDownloadCommand (final String url, final Long templateId, final PrimaryDataStoreTO destPool, final String checksum, final Map headers, final Integer connectTimeout, final Integer soTimeout, final Integer connectionRequestTimeout) { this.url = url; this.templateId = templateId; + this.destData = destData; this.destPool = destPool; this.checksum = checksum; this.headers = headers; @@ -60,6 +65,14 @@ public Long getTemplateId() { return templateId; } + public TemplateObjectTO getDestData() { + return destData; + } + + public void setDestData(TemplateObjectTO destData) { + this.destData = destData; + } + public PrimaryDataStoreTO getDestPool() { return destPool; } @@ -104,12 +117,12 @@ public void setTemplateSize(Long templateSize) { this.templateSize = templateSize; } - public boolean isIso() { - return iso; + public Storage.ImageFormat getFormat() { + return format; } - public void setIso(boolean iso) { - this.iso = iso; + public void setFormat(Storage.ImageFormat format) { + this.format = format; } @Override @@ -120,4 +133,8 @@ public void setExecuteInSequence(boolean inSeq) { public boolean executeInSequence() { return false; } + + public int getWaitInMillSeconds() { + return getWait() * 1000; + } } diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java index 7dab8d9da036..0bb5b7977703 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java @@ -19,12 +19,13 @@ package org.apache.cloudstack.storage.to; +import java.util.Map; + +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; + import com.cloud.agent.api.to.DataStoreTO; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Storage.StoragePoolType; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; - -import java.util.Map; public class PrimaryDataStoreTO implements DataStoreTO { public static final String MANAGED = PrimaryDataStore.MANAGED; diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java index a076b8049634..36c35e572735 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java @@ -43,6 +43,7 @@ public class VolumeObjectTO implements DataTO { private String chainInfo; private Storage.ImageFormat format; private Storage.ProvisioningType provisioningType; + private Long poolId; private long id; private Long deviceId; @@ -89,6 +90,7 @@ public VolumeObjectTO(VolumeInfo volume) { setId(volume.getId()); format = volume.getFormat(); provisioningType = volume.getProvisioningType(); + poolId = volume.getPoolId(); bytesReadRate = volume.getBytesReadRate(); bytesReadRateMax = volume.getBytesReadRateMax(); bytesReadRateMaxLength = volume.getBytesReadRateMaxLength(); @@ -227,6 +229,14 @@ public void setProvisioningType(Storage.ProvisioningType provisioningType){ this.provisioningType = provisioningType; } + public Long getPoolId(){ + return poolId; + } + + public void setPoolId(Long poolId){ + this.poolId = poolId; + } + @Override public String toString() { return new StringBuilder("volumeTO[uuid=").append(uuid).append("|path=").append(path).append("|datastore=").append(dataStore).append("]").toString(); diff --git a/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java b/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java index 463d3a78b216..3ca300804f79 100644 --- a/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java +++ b/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java @@ -58,7 +58,13 @@ public interface VirtualMachineManager extends Manager { "The default label name for the config drive", false); ConfigKey VmConfigDriveOnPrimaryPool = new ConfigKey<>("Advanced", Boolean.class, "vm.configdrive.primarypool.enabled", "false", - "If config drive need to be created and hosted on primary storage pool. Currently only supported for KVM.", true); + "If config drive need to be created and hosted on primary storage pool. Currently only supported for KVM.", true, ConfigKey.Scope.Zone); + + ConfigKey VmConfigDriveUseHostCacheOnUnsupportedPool = new ConfigKey<>("Advanced", Boolean.class, "vm.configdrive.use.host.cache.on.unsupported.pool", "true", + "If true, config drive is created on the host cache storage when vm.configdrive.primarypool.enabled is true and the primary pool type doesn't support config drive.", true, ConfigKey.Scope.Zone); + + ConfigKey VmConfigDriveForceHostCacheUse = new ConfigKey<>("Advanced", Boolean.class, "vm.configdrive.force.host.cache.use", "false", + "If true, config drive is forced to create on the host cache storage. Currently only supported for KVM.", true, ConfigKey.Scope.Zone); ConfigKey ResoureCountRunningVMsonly = new ConfigKey("Advanced", Boolean.class, "resource.count.running.vms.only", "false", "Count the resources of only running VMs in resource limitation.", true); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java index ee264ac292bb..c6b96bca3e66 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java @@ -33,6 +33,7 @@ import com.cloud.deploy.DeployDestination; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientStorageCapacityException; +import com.cloud.exception.StorageAccessException; import com.cloud.exception.StorageUnavailableException; import com.cloud.host.Host; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -104,6 +105,8 @@ DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Lon void release(VirtualMachineProfile profile); + void release(long vmId, long hostId); + void cleanupVolumes(long vmId) throws ConcurrentOperationException; void revokeAccess(DataObject dataObject, Host host, DataStore dataStore); @@ -116,7 +119,7 @@ DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Lon void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest); - void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException; + void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException, StorageAccessException; boolean canVmRestartOnAnotherServer(long vmId); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreDriver.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreDriver.java index 3d73721c74d7..b197afad863a 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreDriver.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreDriver.java @@ -25,6 +25,7 @@ import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; +import com.cloud.host.Host; public interface DataStoreDriver { Map getCapabilities(); @@ -37,7 +38,9 @@ public interface DataStoreDriver { void deleteAsync(DataStore store, DataObject data, AsyncCompletionCallback callback); - void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCallback callback); + void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCallback callback); + + void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback); boolean canCopy(DataObject srcData, DataObject destData); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java index 6021a4391783..622dda31f987 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java @@ -23,6 +23,7 @@ import com.cloud.host.Host; import com.cloud.storage.StoragePool; +import com.cloud.utils.Pair; public interface PrimaryDataStoreDriver extends DataStoreDriver { enum QualityOfServiceState { MIGRATION, NO_MIGRATION } @@ -72,4 +73,34 @@ enum QualityOfServiceState { MIGRATION, NO_MIGRATION } void revertSnapshot(SnapshotInfo snapshotOnImageStore, SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback callback); void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, QualityOfServiceState qualityOfServiceState); + + /** + * intended for managed storage + * returns true if the storage can provide the stats (capacity and used bytes) + */ + boolean canProvideStorageStats(); + + /** + * intended for managed storage + * returns the total capacity and used size in bytes + */ + Pair getStorageStats(StoragePool storagePool); + + /** + * intended for managed storage + * returns true if the storage can provide the volume stats (physical and virtual size) + */ + boolean canProvideVolumeStats(); + + /** + * intended for managed storage + * returns the volume's physical and virtual size in bytes + */ + Pair getVolumeStats(StoragePool storagePool, String volumeId); + + /** + * intended for managed storage + * returns true if the host can access the storage pool + */ + boolean canHostAccessStoragePool(Host host, StoragePool pool); } diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateDataFactory.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateDataFactory.java index 4d258f3b6d0b..9584d7c6b406 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateDataFactory.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateDataFactory.java @@ -23,6 +23,8 @@ import com.cloud.storage.DataStoreRole; public interface TemplateDataFactory { + TemplateInfo getTemplate(long templateId); + TemplateInfo getTemplate(long templateId, DataStore store); TemplateInfo getReadyTemplateOnImageStore(long templateId, Long zoneId); @@ -39,6 +41,8 @@ public interface TemplateDataFactory { TemplateInfo getReadyBypassedTemplateOnPrimaryStore(long templateId, Long poolId, Long hostId); + TemplateInfo getReadyBypassedTemplateOnManagedStorage(long templateId, TemplateInfo templateOnPrimary, Long poolId, Long hostId); + boolean isTemplateMarkedForDirectDownload(long templateId); TemplateInfo getTemplateOnPrimaryStorage(long templateId, DataStore store, String configuration); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java index 1e4a1b7373a6..cc8e111d5499 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java @@ -28,6 +28,8 @@ public interface TemplateInfo extends DataObject, VirtualMachineTemplate { boolean isDirectDownload(); + boolean canBeDeletedFromDataStore(); + boolean isDeployAsIs(); String getDeployAsIsConfiguration(); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java index b13812282652..eafc3b7e85c0 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java @@ -22,6 +22,7 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.offering.DiskOffering.DiskCacheMode; import com.cloud.storage.MigrationOptions; +import com.cloud.storage.Storage; import com.cloud.storage.Volume; import com.cloud.vm.VirtualMachine; @@ -35,6 +36,8 @@ public interface VolumeInfo extends DataObject, Volume { HypervisorType getHypervisorType(); + Storage.StoragePoolType getStoragePoolType(); + Long getLastPoolId(); String getAttachedVmName(); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java index e8b533db0fd6..d194bbbc1f9c 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java @@ -25,6 +25,7 @@ import org.apache.cloudstack.storage.command.CommandResult; import com.cloud.agent.api.to.VirtualMachineTO; +import com.cloud.exception.StorageAccessException; import com.cloud.host.Host; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.offering.DiskOffering; @@ -62,13 +63,17 @@ public VolumeInfo getVolume() { */ AsyncCallFuture expungeVolumeAsync(VolumeInfo volume); + void ensureVolumeIsExpungeReady(long volumeId); + boolean cloneVolume(long volumeId, long baseVolId); AsyncCallFuture createVolumeFromSnapshot(VolumeInfo volume, DataStore store, SnapshotInfo snapshot); VolumeEntity getVolumeEntity(long volumeId); - AsyncCallFuture createManagedStorageVolumeFromTemplateAsync(VolumeInfo volumeInfo, long destDataStoreId, TemplateInfo srcTemplateInfo, long destHostId); + TemplateInfo createManagedStorageTemplate(long srcTemplateId, long destDataStoreId, long destHostId) throws StorageAccessException; + + AsyncCallFuture createManagedStorageVolumeFromTemplateAsync(VolumeInfo volumeInfo, long destDataStoreId, TemplateInfo srcTemplateInfo, long destHostId) throws StorageAccessException; AsyncCallFuture createVolumeFromTemplateAsync(VolumeInfo volume, long dataStoreId, TemplateInfo template); diff --git a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java index db7a27ff41c0..ade2eeb3f84a 100755 --- a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java +++ b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java @@ -21,6 +21,9 @@ import java.util.List; import java.util.Map; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; + import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupRoutingCommand; import com.cloud.agent.api.VgpuTypesInfo; @@ -38,8 +41,6 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.resource.ResourceState.Event; import com.cloud.utils.fsm.NoTransitionException; -import org.apache.cloudstack.framework.config.ConfigKey; -import org.apache.cloudstack.framework.config.Configurable; /** * ResourceManager manages how physical resources are organized within the @@ -204,7 +205,7 @@ public interface ResourceManager extends ResourceService, Configurable { */ HashMap> getGPUStatistics(HostVO host); - HostVO findOneRandomRunningHostByHypervisor(HypervisorType type); + HostVO findOneRandomRunningHostByHypervisor(HypervisorType type, Long dcId); boolean cancelMaintenance(final long hostId); } diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index 7455f22a5ad4..39e8aec633a4 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -109,6 +109,24 @@ public interface StorageManager extends StorageService { ConfigKey.Scope.Cluster, null); + ConfigKey STORAGE_POOL_DISK_WAIT = new ConfigKey<>(Integer.class, + "storage.pool.disk.wait", + "Storage", + "60", + "Timeout (in secs) for the storage pool disk (of managed pool) to become available in the host. Currently only supported for PowerFlex.", + true, + ConfigKey.Scope.StoragePool, + null); + + ConfigKey STORAGE_POOL_CLIENT_TIMEOUT = new ConfigKey<>(Integer.class, + "storage.pool.client.timeout", + "Storage", + "60", + "Timeout (in secs) for the storage pool client timeout (for managed pools). Currently only supported for PowerFlex.", + true, + ConfigKey.Scope.StoragePool, + null); + ConfigKey PRIMARY_STORAGE_DOWNLOAD_WAIT = new ConfigKey("Storage", Integer.class, "primary.storage.download.wait", "10800", "In second, timeout for download template to primary storage", false); @@ -144,6 +162,8 @@ public interface StorageManager extends StorageService { Pair sendToPool(StoragePool pool, long[] hostIdsToTryFirst, List hostIdsToAvoid, Command cmd) throws StorageUnavailableException; + public Answer getVolumeStats(StoragePool pool, Command cmd); + /** * Checks if a host has running VMs that are using its local storage pool. * @return true if local storage is active on the host @@ -172,6 +192,12 @@ public interface StorageManager extends StorageService { StoragePoolVO findLocalStorageOnHost(long hostId); + List findStoragePoolsConnectedToHost(long hostId); + + boolean canHostAccessStoragePool(Host host, StoragePool pool); + + Host getHost(long hostId); + Host updateSecondaryStorage(long secStorageId, String newUrl); void removeStoragePoolFromCluster(long hostId, String iScsiName, StoragePool storagePool); @@ -210,7 +236,9 @@ public interface StorageManager extends StorageService { */ boolean storagePoolHasEnoughSpace(List volume, StoragePool pool, Long clusterId); - boolean storagePoolHasEnoughSpaceForResize(StoragePool pool, long currentSize, long newSiz); + boolean storagePoolHasEnoughSpaceForResize(StoragePool pool, long currentSize, long newSize); + + boolean storagePoolCompatibleWithVolumePool(StoragePool pool, Volume volume); boolean isStoragePoolComplaintWithStoragePolicy(List volumes, StoragePool pool) throws StorageUnavailableException; @@ -218,6 +246,8 @@ public interface StorageManager extends StorageService { void connectHostToSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException; + void disconnectHostFromSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException; + void createCapacityEntry(long poolId); DataStore createLocalStorage(Host host, StoragePoolInfo poolInfo) throws ConnectionException; diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageUtil.java b/engine/components-api/src/main/java/com/cloud/storage/StorageUtil.java index 97354e2ab8d3..044ae3c3fd96 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageUtil.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageUtil.java @@ -16,6 +16,14 @@ // under the License. package com.cloud.storage; +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.commons.collections.CollectionUtils; + import com.cloud.dc.ClusterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.host.HostVO; @@ -25,13 +33,6 @@ import com.cloud.vm.VMInstanceVO; import com.cloud.vm.dao.VMInstanceDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.commons.collections.CollectionUtils; - -import java.util.List; -import javax.inject.Inject; - public class StorageUtil { @Inject private ClusterDao clusterDao; @Inject private HostDao hostDao; diff --git a/engine/components-api/src/main/java/com/cloud/vm/VirtualMachineProfileImpl.java b/engine/components-api/src/main/java/com/cloud/vm/VirtualMachineProfileImpl.java index 4d03396c1cbb..efe4e2e570c9 100644 --- a/engine/components-api/src/main/java/com/cloud/vm/VirtualMachineProfileImpl.java +++ b/engine/components-api/src/main/java/com/cloud/vm/VirtualMachineProfileImpl.java @@ -22,7 +22,9 @@ import java.util.Map; import com.cloud.agent.api.to.DiskTO; +import com.cloud.host.Host; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.network.element.NetworkElement; import com.cloud.offering.ServiceOffering; import com.cloud.service.ServiceOfferingVO; import com.cloud.template.VirtualMachineTemplate; @@ -49,6 +51,8 @@ public class VirtualMachineProfileImpl implements VirtualMachineProfile { Float cpuOvercommitRatio = 1.0f; Float memoryOvercommitRatio = 1.0f; + Host _host = null; + VirtualMachine.Type _type; List vmData = null; @@ -57,6 +61,7 @@ public class VirtualMachineProfileImpl implements VirtualMachineProfile { String configDriveIsoBaseLocation = "/tmp/"; String configDriveIsoRootFolder = null; String configDriveIsoFile = null; + NetworkElement.Location configDriveLocation = NetworkElement.Location.SECONDARY; public VirtualMachineProfileImpl(VirtualMachine vm, VirtualMachineTemplate template, ServiceOffering offering, Account owner, Map params) { _vm = vm; @@ -219,6 +224,19 @@ public Object getParameter(Param name) { return _params.get(name); } + @Override + public Long getHostId() { + if (_host != null) { + return _host.getId(); + } + return _vm.getHostId(); + } + + @Override + public void setHost(Host host) { + this._host = host; + } + @Override public String getHostName() { return _vm.getHostName(); @@ -311,4 +329,14 @@ public String getConfigDriveIsoFile() { public void setConfigDriveIsoFile(String isoFile) { this.configDriveIsoFile = isoFile; } + + @Override + public NetworkElement.Location getConfigDriveLocation() { + return configDriveLocation; + } + + @Override + public void setConfigDriveLocation(NetworkElement.Location location) { + this.configDriveLocation = location; + } } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index de1ef20f883e..dfec0b1de778 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -156,6 +156,7 @@ import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.OperationTimedoutException; import com.cloud.exception.ResourceUnavailableException; +import com.cloud.exception.StorageAccessException; import com.cloud.exception.StorageUnavailableException; import com.cloud.ha.HighAvailabilityManager; import com.cloud.ha.HighAvailabilityManager.WorkType; @@ -743,12 +744,11 @@ public void start(final String vmUuid, final Map> getVolumesToDisconnect(VirtualMachine vm) { info.put(DiskTO.STORAGE_HOST, storagePool.getHostAddress()); info.put(DiskTO.STORAGE_PORT, String.valueOf(storagePool.getPort())); info.put(DiskTO.IQN, volume.get_iScsiName()); + info.put(DiskTO.PROTOCOL_TYPE, (volume.getPoolType() != null) ? volume.getPoolType().toString() : null); volumesToDisconnect.add(info); } @@ -1762,20 +1766,34 @@ protected boolean cleanup(final VirtualMachineGuru guru, final VirtualMachinePro } } } finally { - try { - _networkMgr.release(profile, cleanUpEvenIfUnableToStop); - s_logger.debug("Successfully released network resources for the vm " + vm); - } catch (final Exception e) { - s_logger.warn("Unable to release some network resources.", e); - } - - volumeMgr.release(profile); - s_logger.debug(String.format("Successfully cleaned up resources for the VM %s in %s state", vm, state)); + releaseVmResources(profile, cleanUpEvenIfUnableToStop); } return true; } + protected void releaseVmResources(final VirtualMachineProfile profile, final boolean forced) { + final VirtualMachine vm = profile.getVirtualMachine(); + final State state = vm.getState(); + try { + _networkMgr.release(profile, forced); + s_logger.debug(String.format("Successfully released network resources for the VM %s in %s state", vm, state)); + } catch (final Exception e) { + s_logger.warn(String.format("Unable to release some network resources for the VM %s in %s state", vm, state), e); + } + + try { + if (vm.getHypervisorType() != HypervisorType.BareMetal) { + volumeMgr.release(profile); + s_logger.debug(String.format("Successfully released storage resources for the VM %s in %s state", vm, state)); + } + } catch (final Exception e) { + s_logger.warn(String.format("Unable to release storage resources for the VM %s in %s state", vm, state), e); + } + + s_logger.debug(String.format("Successfully cleaned up resources for the VM %s in %s state", vm, state)); + } + @Override public void advanceStop(final String vmUuid, final boolean cleanUpEvenIfUnableToStop) throws AgentUnavailableException, OperationTimedoutException, ConcurrentOperationException { @@ -1985,21 +2003,7 @@ private void advanceStop(final VMInstanceVO vm, final boolean cleanUpEvenIfUnabl s_logger.debug(vm + " is stopped on the host. Proceeding to release resource held."); } - try { - _networkMgr.release(profile, cleanUpEvenIfUnableToStop); - s_logger.debug("Successfully released network resources for the vm " + vm); - } catch (final Exception e) { - s_logger.warn("Unable to release some network resources.", e); - } - - try { - if (vm.getHypervisorType() != HypervisorType.BareMetal) { - volumeMgr.release(profile); - s_logger.debug("Successfully released storage resources for the vm " + vm); - } - } catch (final Exception e) { - s_logger.warn("Unable to release storage resources.", e); - } + releaseVmResources(profile, cleanUpEvenIfUnableToStop); try { if (work != null) { @@ -2603,11 +2607,14 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy } final VirtualMachineProfile vmSrc = new VirtualMachineProfileImpl(vm); + vmSrc.setHost(fromHost); for (final NicProfile nic : _networkMgr.getNicProfiles(vm)) { vmSrc.addNic(nic); } final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm, null, _offeringDao.findById(vm.getId(), vm.getServiceOfferingId()), null, null); + profile.setHost(dest.getHost()); + _networkMgr.prepareNicForMigration(profile, dest); volumeMgr.prepareForMigration(profile, dest); profile.setConfigDriveLabel(VmConfigDriveLabel.value()); @@ -2635,6 +2642,7 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy } finally { if (pfma == null) { _networkMgr.rollbackNicForMigration(vmSrc, profile); + volumeMgr.release(vm.getId(), dstHostId); work.setStep(Step.Done); _workDao.update(work.getId(), work); } @@ -2644,15 +2652,21 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy try { if (vm == null || vm.getHostId() == null || vm.getHostId() != srcHostId || !changeState(vm, Event.MigrationRequested, dstHostId, work, Step.Migrating)) { _networkMgr.rollbackNicForMigration(vmSrc, profile); + if (vm != null) { + volumeMgr.release(vm.getId(), dstHostId); + } + s_logger.info("Migration cancelled because state has changed: " + vm); throw new ConcurrentOperationException("Migration cancelled because state has changed: " + vm); } } catch (final NoTransitionException e1) { _networkMgr.rollbackNicForMigration(vmSrc, profile); + volumeMgr.release(vm.getId(), dstHostId); s_logger.info("Migration cancelled because " + e1.getMessage()); throw new ConcurrentOperationException("Migration cancelled because " + e1.getMessage()); } catch (final CloudRuntimeException e2) { _networkMgr.rollbackNicForMigration(vmSrc, profile); + volumeMgr.release(vm.getId(), dstHostId); s_logger.info("Migration cancelled because " + e2.getMessage()); work.setStep(Step.Done); _workDao.update(work.getId(), work); @@ -2720,6 +2734,7 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy if (!migrated) { s_logger.info("Migration was unsuccessful. Cleaning up: " + vm); _networkMgr.rollbackNicForMigration(vmSrc, profile); + volumeMgr.release(vm.getId(), dstHostId); _alertMgr.sendAlert(alertType, fromHost.getDataCenterId(), fromHost.getPodId(), "Unable to migrate vm " + vm.getInstanceName() + " from host " + fromHost.getName() + " in zone " + dest.getDataCenter().getName() + " and pod " + @@ -2737,6 +2752,7 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy } } else { _networkMgr.commitNicForMigration(vmSrc, profile); + volumeMgr.release(vm.getId(), srcHostId); _networkMgr.setHypervisorHostname(profile, dest, true); } @@ -3026,8 +3042,16 @@ private void orchestrateMigrateWithStorage(final String vmUuid, final long srcHo final Cluster cluster = _clusterDao.findById(destHost.getClusterId()); final DeployDestination destination = new DeployDestination(dc, pod, cluster, destHost); + final VirtualMachineProfile vmSrc = new VirtualMachineProfileImpl(vm); + vmSrc.setHost(srcHost); + for (final NicProfile nic : _networkMgr.getNicProfiles(vm)) { + vmSrc.addNic(nic); + } + + final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm, null, _offeringDao.findById(vm.getId(), vm.getServiceOfferingId()), null, null); + profile.setHost(destHost); + // Create a map of which volume should go in which storage pool. - final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); final Map volumeToPoolMap = createMappingVolumeAndStoragePool(profile, destHost, volumeToPool); // If none of the volumes have to be migrated, fail the call. Administrator needs to make a call for migrating @@ -3055,7 +3079,6 @@ private void orchestrateMigrateWithStorage(final String vmUuid, final long srcHo work.setResourceId(destHostId); work = _workDao.persist(work); - // Put the vm in migrating state. vm.setLastHostId(srcHostId); vm.setPodIdToDeployIn(destHost.getPodId()); @@ -3127,6 +3150,9 @@ private void orchestrateMigrateWithStorage(final String vmUuid, final long srcHo } finally { if (!migrated) { s_logger.info("Migration was unsuccessful. Cleaning up: " + vm); + _networkMgr.rollbackNicForMigration(vmSrc, profile); + volumeMgr.release(vm.getId(), destHostId); + _alertMgr.sendAlert(alertType, srcHost.getDataCenterId(), srcHost.getPodId(), "Unable to migrate vm " + vm.getInstanceName() + " from host " + srcHost.getName() + " in zone " + dc.getName() + " and pod " + dc.getName(), "Migrate Command failed. Please check logs."); @@ -3141,6 +3167,8 @@ private void orchestrateMigrateWithStorage(final String vmUuid, final long srcHo } _networkMgr.setHypervisorHostname(profile, destination, false); } else { + _networkMgr.commitNicForMigration(vmSrc, profile); + volumeMgr.release(vm.getId(), srcHostId); _networkMgr.setHypervisorHostname(profile, destination, true); } @@ -3415,7 +3443,7 @@ private void orchestrateReboot(final String vmUuid, final Map[] getConfigKeys() { - return new ConfigKey[] {ClusterDeltaSyncInterval, StartRetry, VmDestroyForcestop, VmOpCancelInterval, VmOpCleanupInterval, VmOpCleanupWait, - VmOpLockStateRetry, - VmOpWaitInterval, ExecuteInSequence, VmJobCheckInterval, VmJobTimeout, VmJobStateReportInterval, VmConfigDriveLabel, VmConfigDriveOnPrimaryPool, HaVmRestartHostUp, - ResoureCountRunningVMsonly, AllowExposeHypervisorHostname, AllowExposeHypervisorHostnameAccountLevel, - VmServiceOfferingMaxCPUCores, VmServiceOfferingMaxRAMSize }; + return new ConfigKey[] { ClusterDeltaSyncInterval, StartRetry, VmDestroyForcestop, VmOpCancelInterval, VmOpCleanupInterval, VmOpCleanupWait, + VmOpLockStateRetry, VmOpWaitInterval, ExecuteInSequence, VmJobCheckInterval, VmJobTimeout, VmJobStateReportInterval, + VmConfigDriveLabel, VmConfigDriveOnPrimaryPool, VmConfigDriveForceHostCacheUse, VmConfigDriveUseHostCacheOnUnsupportedPool, + HaVmRestartHostUp, ResoureCountRunningVMsonly, AllowExposeHypervisorHostname, AllowExposeHypervisorHostnameAccountLevel, + VmServiceOfferingMaxCPUCores, VmServiceOfferingMaxRAMSize }; } public List getStoragePoolAllocators() { @@ -4777,12 +4805,12 @@ private void handlePowerOffReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { String.format("VM %s is at %s and we received a %s report while there is no pending jobs on it" , vm.getInstanceName(), vm.getState(), vm.getPowerState())); } - if(vm.isHaEnabled() && vm.getState() == State.Running + if (vm.isHaEnabled() && vm.getState() == State.Running && HaVmRestartHostUp.value() && vm.getHypervisorType() != HypervisorType.VMware && vm.getHypervisorType() != HypervisorType.Hyperv) { s_logger.info("Detected out-of-band stop of a HA enabled VM " + vm.getInstanceName() + ", will schedule restart"); - if(!_haMgr.hasPendingHaWork(vm.getId())) { + if (!_haMgr.hasPendingHaWork(vm.getId())) { _haMgr.scheduleRestart(vm, true); } else { s_logger.info("VM " + vm.getInstanceName() + " already has an pending HA task working on it"); @@ -4791,13 +4819,20 @@ private void handlePowerOffReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { } // not when report is missing - if(PowerState.PowerOff.equals(vm.getPowerState())) { + if (PowerState.PowerOff.equals(vm.getPowerState())) { final VirtualMachineGuru vmGuru = getVmGuru(vm); final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); if (!sendStop(vmGuru, profile, true, true)) { // In case StopCommand fails, don't proceed further return; + } else { + // Release resources on StopCommand success + releaseVmResources(profile, true); } + } else if (PowerState.PowerReportMissing.equals(vm.getPowerState())) { + final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); + // VM will be sync-ed to Stopped state, release the resources + releaseVmResources(profile, true); } try { @@ -5574,10 +5609,9 @@ private Pair orchestrateStart(final VmWorkStart work) th s_logger.trace(String.format("orchestrating VM start for '%s' %s set to %s", vm.getInstanceName(), VirtualMachineProfile.Param.BootIntoSetup, enterSetup)); } - try{ + try { orchestrateStart(vm.getUuid(), work.getParams(), work.getPlan(), _dpMgr.getDeploymentPlannerByName(work.getDeploymentPlanner())); - } - catch (CloudRuntimeException e){ + } catch (CloudRuntimeException e) { e.printStackTrace(); s_logger.info("Caught CloudRuntimeException, returning job failed " + e); CloudRuntimeException ex = new CloudRuntimeException("Unable to start VM instance"); diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index 8c97b4753a0f..849787b5742a 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -35,8 +35,6 @@ import javax.naming.ConfigurationException; import com.cloud.agent.api.to.DatadiskTO; -import com.cloud.storage.VolumeDetailVO; -import com.cloud.storage.dao.VMTemplateDetailsDao; import com.cloud.utils.StringUtils; import com.cloud.vm.SecondaryStorageVmVO; import com.cloud.vm.UserVmDetailVO; @@ -75,6 +73,8 @@ import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.jobs.AsyncJobManager; import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; +import org.apache.cloudstack.resourcedetail.DiskOfferingDetailVO; +import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao; import org.apache.cloudstack.storage.command.CommandResult; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; @@ -103,6 +103,7 @@ import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientStorageCapacityException; import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.StorageAccessException; import com.cloud.exception.StorageUnavailableException; import com.cloud.host.Host; import com.cloud.host.HostVO; @@ -122,8 +123,10 @@ import com.cloud.storage.Volume; import com.cloud.storage.Volume.Type; import com.cloud.storage.VolumeApiService; +import com.cloud.storage.VolumeDetailVO; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.VMTemplateDetailsDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeDetailsDao; import com.cloud.template.TemplateManager; @@ -185,6 +188,8 @@ public enum UserVmCloneType { @Inject protected ResourceLimitService _resourceLimitMgr; @Inject + DiskOfferingDetailsDao _diskOfferingDetailDao; + @Inject VolumeDetailsDao _volDetailDao; @Inject DataStoreManager dataStoreMgr; @@ -748,6 +753,19 @@ public DiskProfile allocateRawVolume(Type type, String name, DiskOffering offeri vol.setFormat(getSupportedImageFormatForCluster(vm.getHypervisorType())); vol = _volsDao.persist(vol); + List volumeDetailsVO = new ArrayList(); + DiskOfferingDetailVO bandwidthLimitDetail = _diskOfferingDetailDao.findDetail(offering.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS); + if (bandwidthLimitDetail != null) { + volumeDetailsVO.add(new VolumeDetailVO(vol.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS, bandwidthLimitDetail.getValue(), false)); + } + DiskOfferingDetailVO iopsLimitDetail = _diskOfferingDetailDao.findDetail(offering.getId(), Volume.IOPS_LIMIT); + if (iopsLimitDetail != null) { + volumeDetailsVO.add(new VolumeDetailVO(vol.getId(), Volume.IOPS_LIMIT, iopsLimitDetail.getValue(), false)); + } + if (!volumeDetailsVO.isEmpty()) { + _volDetailDao.saveDetails(volumeDetailsVO); + } + // Save usage event and update resource count for user vm volumes if (vm.getType() == VirtualMachine.Type.User) { UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, vol.getAccountId(), vol.getDataCenterId(), vol.getId(), vol.getName(), offering.getId(), null, size, @@ -801,6 +819,19 @@ private DiskProfile allocateTemplatedVolume(Type type, String name, DiskOffering vol = _volsDao.persist(vol); + List volumeDetailsVO = new ArrayList(); + DiskOfferingDetailVO bandwidthLimitDetail = _diskOfferingDetailDao.findDetail(offering.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS); + if (bandwidthLimitDetail != null) { + volumeDetailsVO.add(new VolumeDetailVO(vol.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS, bandwidthLimitDetail.getValue(), false)); + } + DiskOfferingDetailVO iopsLimitDetail = _diskOfferingDetailDao.findDetail(offering.getId(), Volume.IOPS_LIMIT); + if (iopsLimitDetail != null) { + volumeDetailsVO.add(new VolumeDetailVO(vol.getId(), Volume.IOPS_LIMIT, iopsLimitDetail.getValue(), false)); + } + if (!volumeDetailsVO.isEmpty()) { + _volDetailDao.saveDetails(volumeDetailsVO); + } + if (StringUtils.isNotBlank(configurationId)) { VolumeDetailVO deployConfigurationDetail = new VolumeDetailVO(vol.getId(), VmDetailConstants.DEPLOY_AS_IS_CONFIGURATION, configurationId, false); _volDetailDao.persist(deployConfigurationDetail); @@ -1010,8 +1041,39 @@ public VolumeVO doInTransaction(TransactionStatus status) { } @Override - public void release(VirtualMachineProfile profile) { - // add code here + public void release(VirtualMachineProfile vmProfile) { + Long hostId = vmProfile.getVirtualMachine().getHostId(); + if (hostId != null) { + revokeAccess(vmProfile.getId(), hostId); + } + } + + @Override + public void release(long vmId, long hostId) { + List volumesForVm = _volsDao.findUsableVolumesForInstance(vmId); + if (volumesForVm == null || volumesForVm.isEmpty()) { + return; + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Releasing " + volumesForVm.size() + " volumes for VM: " + vmId + " from host: " + hostId); + } + + for (VolumeVO volumeForVm : volumesForVm) { + VolumeInfo volumeInfo = volFactory.getVolume(volumeForVm.getId()); + + // pool id can be null for the VM's volumes in Allocated state + if (volumeForVm.getPoolId() != null) { + DataStore dataStore = dataStoreMgr.getDataStore(volumeForVm.getPoolId(), DataStoreRole.Primary); + PrimaryDataStore primaryDataStore = (PrimaryDataStore)dataStore; + HostVO host = _hostDao.findById(hostId); + + // This might impact other managed storages, grant access for PowerFlex storage pool only + if (primaryDataStore.isManaged() && primaryDataStore.getPoolType() == Storage.StoragePoolType.PowerFlex) { + volService.revokeAccess(volumeInfo, host, dataStore); + } + } + } } @Override @@ -1243,6 +1305,12 @@ public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest disk.setDetails(getDetails(volumeInfo, dataStore)); + PrimaryDataStore primaryDataStore = (PrimaryDataStore)dataStore; + // This might impact other managed storages, grant access for PowerFlex storage pool only + if (primaryDataStore.isManaged() && primaryDataStore.getPoolType() == Storage.StoragePoolType.PowerFlex) { + volService.grantAccess(volFactory.getVolume(vol.getId()), dest.getHost(), dataStore); + } + vm.addDisk(disk); } @@ -1269,6 +1337,7 @@ private Map getDetails(VolumeInfo volumeInfo, DataStore dataStor VolumeVO volume = _volumeDao.findById(volumeInfo.getId()); details.put(DiskTO.PROTOCOL_TYPE, (volume.getPoolType() != null) ? volume.getPoolType().toString() : null); + details.put(StorageManager.STORAGE_POOL_DISK_WAIT.toString(), String.valueOf(StorageManager.STORAGE_POOL_DISK_WAIT.valueIn(storagePool.getId()))); if (volume.getPoolId() != null) { StoragePoolVO poolVO = _storagePoolDao.findById(volume.getPoolId()); @@ -1386,7 +1455,7 @@ private List getTasks(List vols, Map return tasks; } - private Pair recreateVolume(VolumeVO vol, VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException { + private Pair recreateVolume(VolumeVO vol, VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, StorageAccessException { VolumeVO newVol; boolean recreate = RecreatableSystemVmEnabled.value(); DataStore destPool = null; @@ -1430,19 +1499,28 @@ private Pair recreateVolume(VolumeVO vol, VirtualMachinePro future = volService.createVolumeAsync(volume, destPool); } else { TemplateInfo templ = tmplFactory.getReadyTemplateOnImageStore(templateId, dest.getDataCenter().getId()); + PrimaryDataStore primaryDataStore = (PrimaryDataStore)destPool; if (templ == null) { if (tmplFactory.isTemplateMarkedForDirectDownload(templateId)) { // Template is marked for direct download bypassing Secondary Storage - templ = tmplFactory.getReadyBypassedTemplateOnPrimaryStore(templateId, destPool.getId(), dest.getHost().getId()); + if (!primaryDataStore.isManaged()) { + templ = tmplFactory.getReadyBypassedTemplateOnPrimaryStore(templateId, destPool.getId(), dest.getHost().getId()); + } else { + s_logger.debug("Direct download template: " + templateId + " on host: " + dest.getHost().getId() + " and copy to the managed storage pool: " + destPool.getId()); + templ = volService.createManagedStorageTemplate(templateId, destPool.getId(), dest.getHost().getId()); + } + + if (templ == null) { + s_logger.debug("Failed to spool direct download template: " + templateId + " for data center " + dest.getDataCenter().getId()); + throw new CloudRuntimeException("Failed to spool direct download template: " + templateId + " for data center " + dest.getDataCenter().getId()); + } } else { s_logger.debug("can't find ready template: " + templateId + " for data center " + dest.getDataCenter().getId()); throw new CloudRuntimeException("can't find ready template: " + templateId + " for data center " + dest.getDataCenter().getId()); } } - PrimaryDataStore primaryDataStore = (PrimaryDataStore)destPool; - if (primaryDataStore.isManaged()) { DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, volume.getDiskOfferingId()); HypervisorType hyperType = vm.getVirtualMachine().getHypervisorType(); @@ -1476,11 +1554,17 @@ private Pair recreateVolume(VolumeVO vol, VirtualMachinePro long hostId = vm.getVirtualMachine().getHostId(); Host host = _hostDao.findById(hostId); - volService.grantAccess(volFactory.getVolume(newVol.getId()), host, destPool); + try { + volService.grantAccess(volFactory.getVolume(newVol.getId()), host, destPool); + } catch (Exception e) { + throw new StorageAccessException("Unable to grant access to volume: " + newVol.getId() + " on host: " + host.getId()); + } } newVol = _volsDao.findById(newVol.getId()); break; //break out of template-redeploy retry loop + } catch (StorageAccessException e) { + throw e; } catch (InterruptedException | ExecutionException e) { s_logger.error("Unable to create " + newVol, e); throw new StorageUnavailableException("Unable to create " + newVol + ":" + e.toString(), destPool.getId()); @@ -1491,7 +1575,7 @@ private Pair recreateVolume(VolumeVO vol, VirtualMachinePro } @Override - public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException { + public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException, StorageAccessException { if (dest == null) { if (s_logger.isDebugEnabled()) { @@ -1534,7 +1618,20 @@ public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws Sto volService.revokeAccess(volFactory.getVolume(vol.getId()), lastHost, storagePool); } - volService.grantAccess(volFactory.getVolume(vol.getId()), host, (DataStore)pool); + try { + volService.grantAccess(volFactory.getVolume(vol.getId()), host, (DataStore)pool); + } catch (Exception e) { + throw new StorageAccessException("Unable to grant access to volume: " + vol.getId() + " on host: " + host.getId()); + } + } else { + // This might impact other managed storages, grant access for PowerFlex storage pool only + if (pool.getPoolType() == Storage.StoragePoolType.PowerFlex) { + try { + volService.grantAccess(volFactory.getVolume(vol.getId()), host, (DataStore)pool); + } catch (Exception e) { + throw new StorageAccessException("Unable to grant access to volume: " + vol.getId() + " on host: " + host.getId()); + } + } } } } else if (task.type == VolumeTaskType.MIGRATE) { @@ -1847,4 +1944,4 @@ public void doInTransactionWithoutResult(TransactionStatus status) { } }); } -} \ No newline at end of file +} diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java index ac6c8555da96..71c1dce8904e 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java @@ -61,10 +61,10 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, As } if (srcData.getDataStore().getDriver().canCopy(srcData, destData)) { - srcData.getDataStore().getDriver().copyAsync(srcData, destData, callback); + srcData.getDataStore().getDriver().copyAsync(srcData, destData, destHost, callback); return; } else if (destData.getDataStore().getDriver().canCopy(srcData, destData)) { - destData.getDataStore().getDriver().copyAsync(srcData, destData, callback); + destData.getDataStore().getDriver().copyAsync(srcData, destData, destHost, callback); return; } diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java index 971859685ff1..bf8761e96e63 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java @@ -53,6 +53,7 @@ import com.cloud.storage.StoragePool; import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VMTemplateStorageResourceAssoc; +import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.utils.exception.CloudRuntimeException; @@ -195,6 +196,10 @@ protected boolean shouldMigrateVolume(StoragePoolVO sourceStoragePool, Host dest @Override protected void copyTemplateToTargetFilesystemStorageIfNeeded(VolumeInfo srcVolumeInfo, StoragePool srcStoragePool, DataStore destDataStore, StoragePool destStoragePool, Host destHost) { + if (srcVolumeInfo.getVolumeType() != Volume.Type.ROOT || srcVolumeInfo.getTemplateId() == null) { + return; + } + VMTemplateStoragePoolVO sourceVolumeTemplateStoragePoolVO = vmTemplatePoolDao.findByPoolTemplate(destStoragePool.getId(), srcVolumeInfo.getTemplateId(), null); if (sourceVolumeTemplateStoragePoolVO == null && destStoragePool.getPoolType() == StoragePoolType.Filesystem) { DataStore sourceTemplateDataStore = dataStoreManagerImpl.getRandomImageStore(srcVolumeInfo.getDataCenterId()); diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index 936f0626af32..952dbb200dd0 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -574,6 +574,14 @@ private void handleVolumeMigrationFromManagedStorageToNonManagedStorage(VolumeIn } } + private void verifyFormatWithPoolType(ImageFormat imageFormat, StoragePoolType poolType) { + if (imageFormat != ImageFormat.VHD && imageFormat != ImageFormat.OVA && imageFormat != ImageFormat.QCOW2 && + !(imageFormat == ImageFormat.RAW && StoragePoolType.PowerFlex == poolType)) { + throw new CloudRuntimeException("Only the following image types are currently supported: " + + ImageFormat.VHD.toString() + ", " + ImageFormat.OVA.toString() + ", " + ImageFormat.QCOW2.toString() + ", and " + ImageFormat.RAW.toString() + "(for PowerFlex)"); + } + } + private void verifyFormat(ImageFormat imageFormat) { if (imageFormat != ImageFormat.VHD && imageFormat != ImageFormat.OVA && imageFormat != ImageFormat.QCOW2) { throw new CloudRuntimeException("Only the following image types are currently supported: " + @@ -585,8 +593,9 @@ private void verifyFormat(SnapshotInfo snapshotInfo) { long volumeId = snapshotInfo.getVolumeId(); VolumeVO volumeVO = _volumeDao.findByIdIncludingRemoved(volumeId); + StoragePoolVO storagePoolVO = _storagePoolDao.findById(volumeVO.getPoolId()); - verifyFormat(volumeVO.getFormat()); + verifyFormatWithPoolType(volumeVO.getFormat(), storagePoolVO.getPoolType()); } private boolean usingBackendSnapshotFor(SnapshotInfo snapshotInfo) { @@ -735,6 +744,7 @@ private void handleVolumeMigrationForXenServer(VolumeInfo srcVolumeInfo, VolumeI details.put(DiskTO.MANAGED, Boolean.TRUE.toString()); details.put(DiskTO.IQN, destVolumeInfo.get_iScsiName()); details.put(DiskTO.STORAGE_HOST, destPool.getHostAddress()); + details.put(DiskTO.PROTOCOL_TYPE, (destPool.getPoolType() != null) ? destPool.getPoolType().toString() : null); command.setDestDetails(details); @@ -916,6 +926,11 @@ else if (HypervisorType.VMware.equals(snapshotInfo.getHypervisorType()) || Hyper boolean keepGrantedAccess = false; DataStore srcDataStore = snapshotInfo.getDataStore(); + StoragePoolVO storagePoolVO = _storagePoolDao.findById(srcDataStore.getId()); + + if (HypervisorType.KVM.equals(snapshotInfo.getHypervisorType()) && storagePoolVO.getPoolType() == StoragePoolType.PowerFlex) { + usingBackendSnapshot = false; + } if (usingBackendSnapshot) { createVolumeFromSnapshot(snapshotInfo); @@ -1309,7 +1324,13 @@ private void handleCreateVolumeFromTemplateBothOnStorageSystem(TemplateInfo temp Preconditions.checkArgument(volumeInfo != null, "Passing 'null' to volumeInfo of " + "handleCreateVolumeFromTemplateBothOnStorageSystem is not supported."); - verifyFormat(templateInfo.getFormat()); + DataStore dataStore = volumeInfo.getDataStore(); + if (dataStore.getRole() == DataStoreRole.Primary) { + StoragePoolVO storagePoolVO = _storagePoolDao.findById(dataStore.getId()); + verifyFormatWithPoolType(templateInfo.getFormat(), storagePoolVO.getPoolType()); + } else { + verifyFormat(templateInfo.getFormat()); + } HostVO hostVO = null; @@ -1786,6 +1807,11 @@ public void copyAsync(Map volumeDataStoreMap, VirtualMach StoragePoolVO destStoragePool = _storagePoolDao.findById(destDataStore.getId()); StoragePoolVO sourceStoragePool = _storagePoolDao.findById(srcVolumeInfo.getPoolId()); + // do not initiate migration for the same PowerFlex/ScaleIO pool + if (sourceStoragePool.getId() == destStoragePool.getId() && sourceStoragePool.getPoolType() == Storage.StoragePoolType.PowerFlex) { + continue; + } + if (!shouldMigrateVolume(sourceStoragePool, destHost, destStoragePool)) { continue; } @@ -1894,13 +1920,11 @@ public void copyAsync(Map volumeDataStoreMap, VirtualMach throw new CloudRuntimeException(errMsg); } - } - catch (Exception ex) { + } catch (Exception ex) { errMsg = "Copy operation failed in 'StorageSystemDataMotionStrategy.copyAsync': " + ex.getMessage(); - + LOGGER.error(errMsg, ex); throw new CloudRuntimeException(errMsg); - } - finally { + } finally { CopyCmdAnswer copyCmdAnswer = new CopyCmdAnswer(errMsg); CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); @@ -2197,10 +2221,6 @@ protected void verifyLiveMigrationForKVM(Map volumeDataSt throw new CloudRuntimeException("Volume with ID " + volumeInfo.getId() + " is not associated with a storage pool."); } - if (srcStoragePoolVO.isManaged()) { - throw new CloudRuntimeException("Migrating a volume online with KVM from managed storage is not currently supported."); - } - DataStore dataStore = entry.getValue(); StoragePoolVO destStoragePoolVO = _storagePoolDao.findById(dataStore.getId()); @@ -2208,6 +2228,10 @@ protected void verifyLiveMigrationForKVM(Map volumeDataSt throw new CloudRuntimeException("Destination storage pool with ID " + dataStore.getId() + " was not located."); } + if (srcStoragePoolVO.isManaged() && srcStoragePoolVO.getId() != destStoragePoolVO.getId()) { + throw new CloudRuntimeException("Migrating a volume online with KVM from managed storage is not currently supported."); + } + if (storageTypeConsistency == null) { storageTypeConsistency = destStoragePoolVO.isManaged(); } else if (storageTypeConsistency != destStoragePoolVO.isManaged()) { @@ -2301,7 +2325,9 @@ private void handleCreateTemplateFromManagedVolume(VolumeInfo volumeInfo, Templa CopyCmdAnswer copyCmdAnswer = null; try { - if (!ImageFormat.QCOW2.equals(volumeInfo.getFormat())) { + StoragePoolVO storagePoolVO = _storagePoolDao.findById(volumeInfo.getPoolId()); + + if (!ImageFormat.QCOW2.equals(volumeInfo.getFormat()) && !(ImageFormat.RAW.equals(volumeInfo.getFormat()) && StoragePoolType.PowerFlex == storagePoolVO.getPoolType())) { throw new CloudRuntimeException("When using managed storage, you can only create a template from a volume on KVM currently."); } @@ -2317,7 +2343,7 @@ private void handleCreateTemplateFromManagedVolume(VolumeInfo volumeInfo, Templa try { handleQualityOfServiceForVolumeMigration(volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION); - if (srcVolumeDetached) { + if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType()) { _volumeService.grantAccess(volumeInfo, hostVO, srcDataStore); } @@ -2349,7 +2375,7 @@ private void handleCreateTemplateFromManagedVolume(VolumeInfo volumeInfo, Templa throw new CloudRuntimeException(msg + ex.getMessage(), ex); } finally { - if (srcVolumeDetached) { + if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType()) { try { _volumeService.revokeAccess(volumeInfo, hostVO, srcDataStore); } @@ -2415,6 +2441,8 @@ private Map getVolumeDetails(VolumeInfo volumeInfo) { volumeDetails.put(DiskTO.STORAGE_HOST, storagePoolVO.getHostAddress()); volumeDetails.put(DiskTO.STORAGE_PORT, String.valueOf(storagePoolVO.getPort())); volumeDetails.put(DiskTO.IQN, volumeVO.get_iScsiName()); + volumeDetails.put(DiskTO.PROTOCOL_TYPE, (volumeVO.getPoolType() != null) ? volumeVO.getPoolType().toString() : null); + volumeDetails.put(StorageManager.STORAGE_POOL_DISK_WAIT.toString(), String.valueOf(StorageManager.STORAGE_POOL_DISK_WAIT.valueIn(storagePoolVO.getId()))); volumeDetails.put(DiskTO.VOLUME_SIZE, String.valueOf(volumeVO.getSize())); volumeDetails.put(DiskTO.SCSI_NAA_DEVICE_ID, getVolumeProperty(volumeInfo.getId(), DiskTO.SCSI_NAA_DEVICE_ID)); @@ -2442,7 +2470,12 @@ private Map getSnapshotDetails(SnapshotInfo snapshotInfo) { long snapshotId = snapshotInfo.getId(); - snapshotDetails.put(DiskTO.IQN, getSnapshotProperty(snapshotId, DiskTO.IQN)); + if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex) { + snapshotDetails.put(DiskTO.IQN, snapshotInfo.getPath()); + } else { + snapshotDetails.put(DiskTO.IQN, getSnapshotProperty(snapshotId, DiskTO.IQN)); + } + snapshotDetails.put(DiskTO.VOLUME_SIZE, String.valueOf(snapshotInfo.getSize())); snapshotDetails.put(DiskTO.SCSI_NAA_DEVICE_ID, getSnapshotProperty(snapshotId, DiskTO.SCSI_NAA_DEVICE_ID)); diff --git a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java index ba7fb74da1d8..609742b79fd6 100644 --- a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java +++ b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java @@ -70,6 +70,7 @@ import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StoragePool; import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.Volume; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.utils.exception.CloudRuntimeException; @@ -327,6 +328,7 @@ private void configureAndTestcopyTemplateToTargetStorageIfNeeded(VMTemplateStora VolumeInfo srcVolumeInfo = Mockito.mock(VolumeInfo.class); Mockito.when(srcVolumeInfo.getTemplateId()).thenReturn(0l); + Mockito.when(srcVolumeInfo.getVolumeType()).thenReturn(Volume.Type.ROOT); StoragePool srcStoragePool = Mockito.mock(StoragePool.class); @@ -465,6 +467,8 @@ public void testVerifyLiveMigrationMapForKVMNotExistingDest() { @Test(expected = CloudRuntimeException.class) public void testVerifyLiveMigrationMapForKVMMixedManagedUnmagedStorage() { when(pool1.isManaged()).thenReturn(true); + when(pool1.getId()).thenReturn(POOL_1_ID); + when(pool2.getId()).thenReturn(POOL_2_ID); lenient().when(pool2.isManaged()).thenReturn(false); kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap, host2); } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java index 1590fe0bf7de..c720b28b5670 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java @@ -44,6 +44,7 @@ import com.cloud.host.dao.HostDao; import com.cloud.storage.DataStoreRole; import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplatePoolDao; @@ -79,6 +80,16 @@ public TemplateInfo getTemplateOnPrimaryStorage(long templateId, DataStore store return null; } + @Override + public TemplateInfo getTemplate(long templateId) { + VMTemplateVO templ = imageDataDao.findById(templateId); + if (templ != null) { + TemplateObject tmpl = TemplateObject.getTemplate(templ, null, null); + return tmpl; + } + return null; + } + @Override public TemplateInfo getTemplate(long templateId, DataStore store) { VMTemplateVO templ = imageDataDao.findById(templateId); @@ -244,6 +255,33 @@ public TemplateInfo getReadyBypassedTemplateOnPrimaryStore(long templateId, Long return this.getTemplate(templateId, store); } + @Override + public TemplateInfo getReadyBypassedTemplateOnManagedStorage(long templateId, TemplateInfo templateOnPrimary, Long poolId, Long hostId) { + VMTemplateVO templateVO = imageDataDao.findById(templateId); + if (templateVO == null || !templateVO.isDirectDownload()) { + return null; + } + + if (poolId == null) { + throw new CloudRuntimeException("No storage pool specified to download template: " + templateId); + } + + StoragePoolVO poolVO = primaryDataStoreDao.findById(poolId); + if (poolVO == null || !poolVO.isManaged()) { + return null; + } + + VMTemplateStoragePoolVO spoolRef = templatePoolDao.findByPoolTemplate(poolId, templateId, null); + if (spoolRef == null) { + throw new CloudRuntimeException("Template not created on managed storage pool: " + poolId + " to copy the download template: " + templateId); + } else if (spoolRef.getDownloadState() == VMTemplateStorageResourceAssoc.Status.NOT_DOWNLOADED) { + directDownloadManager.downloadTemplate(templateId, poolId, hostId); + } + + DataStore store = storeMgr.getDataStore(poolId, DataStoreRole.Primary); + return this.getTemplate(templateId, store); + } + @Override public boolean isTemplateMarkedForDirectDownload(long templateId) { VMTemplateVO templateVO = imageDataDao.findById(templateId); diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java index ed9359d952af..ef0ef7edcdf0 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java @@ -917,7 +917,14 @@ public AsyncCallFuture deleteTemplateAsync(TemplateInfo templ TemplateOpContext context = new TemplateOpContext(null, to, future); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().deleteTemplateCallback(null, null)).setContext(context); - to.getDataStore().getDriver().deleteAsync(to.getDataStore(), to, caller); + + if (to.canBeDeletedFromDataStore()) { + to.getDataStore().getDriver().deleteAsync(to.getDataStore(), to, caller); + } else { + CommandResult result = new CommandResult(); + caller.complete(result); + } + return future; } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java index b7a44cd4f08a..d96b6183de1d 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java @@ -374,6 +374,35 @@ public boolean isDirectDownload() { return this.imageVO.isDirectDownload(); } + @Override + public boolean canBeDeletedFromDataStore() { + Status downloadStatus = Status.UNKNOWN; + int downloadPercent = -1; + if (getDataStore().getRole() == DataStoreRole.Primary) { + VMTemplateStoragePoolVO templatePoolRef = templatePoolDao.findByPoolTemplate(getDataStore().getId(), getId(), null); + if (templatePoolRef != null) { + downloadStatus = templatePoolRef.getDownloadState(); + downloadPercent = templatePoolRef.getDownloadPercent(); + } + } else if (dataStore.getRole() == DataStoreRole.Image || dataStore.getRole() == DataStoreRole.ImageCache) { + TemplateDataStoreVO templateStoreRef = templateStoreDao.findByStoreTemplate(dataStore.getId(), getId()); + if (templateStoreRef != null) { + downloadStatus = templateStoreRef.getDownloadState(); + downloadPercent = templateStoreRef.getDownloadPercent(); + templateStoreRef.getState(); + } + } + + // Marking downloaded templates for deletion, but might skip any deletion handled for failed templates. + // Only templates not downloaded and in error state (with no install path) cannot be deleted from the datastore, so doesn't impact last behavior for templates with other states + if (downloadStatus == null || downloadStatus == Status.NOT_DOWNLOADED || (downloadStatus == Status.DOWNLOAD_ERROR && downloadPercent == 0)) { + s_logger.debug("Template: " + getId() + " cannot be deleted from the store: " + getDataStore().getId()); + return false; + } + + return true; + } + @Override public boolean isDeployAsIs() { if (this.imageVO == null) { diff --git a/engine/storage/snapshot/pom.xml b/engine/storage/snapshot/pom.xml index 40e513bbc8c9..a5d27ae64d15 100644 --- a/engine/storage/snapshot/pom.xml +++ b/engine/storage/snapshot/pom.xml @@ -50,6 +50,12 @@ cloud-engine-storage-volume ${project.version} + + org.apache.cloudstack + cloud-plugin-storage-volume-scaleio + ${project.version} + compile + diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/ScaleIOSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/ScaleIOSnapshotStrategy.java new file mode 100644 index 000000000000..dfe475004f78 --- /dev/null +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/ScaleIOSnapshotStrategy.java @@ -0,0 +1,93 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.snapshot; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; + +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.Snapshot; +import com.cloud.storage.Storage; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; + +public class ScaleIOSnapshotStrategy extends StorageSystemSnapshotStrategy { + @Inject + private SnapshotDataStoreDao snapshotStoreDao; + @Inject + private PrimaryDataStoreDao primaryDataStoreDao; + @Inject + private VolumeDao volumeDao; + + private static final Logger LOG = Logger.getLogger(ScaleIOSnapshotStrategy.class); + + @Override + public StrategyPriority canHandle(Snapshot snapshot, SnapshotOperation op) { + long volumeId = snapshot.getVolumeId(); + VolumeVO volumeVO = volumeDao.findByIdIncludingRemoved(volumeId); + boolean baseVolumeExists = volumeVO.getRemoved() == null; + if (!baseVolumeExists) { + return StrategyPriority.CANT_HANDLE; + } + + if (!isSnapshotStoredOnScaleIOStoragePool(snapshot)) { + return StrategyPriority.CANT_HANDLE; + } + + if (SnapshotOperation.REVERT.equals(op)) { + return StrategyPriority.HIGHEST; + } + + if (SnapshotOperation.DELETE.equals(op)) { + return StrategyPriority.HIGHEST; + } + + return StrategyPriority.CANT_HANDLE; + } + + @Override + public boolean revertSnapshot(SnapshotInfo snapshotInfo) { + VolumeInfo volumeInfo = snapshotInfo.getBaseVolume(); + Storage.ImageFormat imageFormat = volumeInfo.getFormat(); + if (!Storage.ImageFormat.RAW.equals(imageFormat)) { + LOG.error(String.format("Does not support revert snapshot of the image format [%s] on PowerFlex. Can only rollback snapshots of format RAW", imageFormat)); + return false; + } + + executeRevertSnapshot(snapshotInfo, volumeInfo); + + return true; + } + + protected boolean isSnapshotStoredOnScaleIOStoragePool(Snapshot snapshot) { + SnapshotDataStoreVO snapshotStore = snapshotStoreDao.findBySnapshot(snapshot.getId(), DataStoreRole.Primary); + if (snapshotStore == null) { + return false; + } + StoragePoolVO storagePoolVO = primaryDataStoreDao.findById(snapshotStore.getDataStoreId()); + return storagePoolVO != null && storagePoolVO.getPoolType() == Storage.StoragePoolType.PowerFlex; + } +} diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java index 33d43d708b08..6401f8a8e1c9 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java @@ -16,6 +16,37 @@ // under the License. package org.apache.cloudstack.storage.snapshot; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Random; +import java.util.UUID; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotResult; +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; +import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer; +import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.ModifyTargetsCommand; @@ -38,18 +69,18 @@ import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Volume; +import com.cloud.storage.VolumeDetailVO; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.SnapshotDetailsDao; import com.cloud.storage.dao.SnapshotDetailsVO; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeDetailsDao; -import com.cloud.storage.VolumeDetailVO; import com.cloud.utils.db.DB; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; -import com.cloud.vm.VirtualMachine; import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; import com.cloud.vm.dao.VMInstanceDao; import com.cloud.vm.snapshot.VMSnapshot; import com.cloud.vm.snapshot.VMSnapshotService; @@ -57,37 +88,6 @@ import com.cloud.vm.snapshot.dao.VMSnapshotDao; import com.google.common.base.Preconditions; -import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotResult; -import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; -import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer; -import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - -import javax.inject.Inject; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Random; -import java.util.UUID; - @Component public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase { private static final Logger s_logger = Logger.getLogger(StorageSystemSnapshotStrategy.class); @@ -241,15 +241,16 @@ private boolean cleanupSnapshotOnPrimaryStore(long snapshotId) { } private boolean isAcceptableRevertFormat(VolumeVO volumeVO) { - return ImageFormat.VHD.equals(volumeVO.getFormat()) || ImageFormat.OVA.equals(volumeVO.getFormat()) || ImageFormat.QCOW2.equals(volumeVO.getFormat()); + return ImageFormat.VHD.equals(volumeVO.getFormat()) || ImageFormat.OVA.equals(volumeVO.getFormat()) + || ImageFormat.QCOW2.equals(volumeVO.getFormat()) || ImageFormat.RAW.equals(volumeVO.getFormat()); } private void verifyFormat(VolumeInfo volumeInfo) { ImageFormat imageFormat = volumeInfo.getFormat(); - if (imageFormat != ImageFormat.VHD && imageFormat != ImageFormat.OVA && imageFormat != ImageFormat.QCOW2) { + if (imageFormat != ImageFormat.VHD && imageFormat != ImageFormat.OVA && imageFormat != ImageFormat.QCOW2 && imageFormat != ImageFormat.RAW) { throw new CloudRuntimeException("Only the following image types are currently supported: " + - ImageFormat.VHD.toString() + ", " + ImageFormat.OVA.toString() + ", and " + ImageFormat.QCOW2); + ImageFormat.VHD.toString() + ", " + ImageFormat.OVA.toString() + ", " + ImageFormat.QCOW2 + ", and " + ImageFormat.RAW); } } @@ -456,7 +457,7 @@ public SnapshotInfo takeSnapshot(SnapshotInfo snapshotInfo) { computeClusterSupportsVolumeClone = clusterDao.getSupportsResigning(hostVO.getClusterId()); } - else if (volumeInfo.getFormat() == ImageFormat.OVA || volumeInfo.getFormat() == ImageFormat.QCOW2) { + else if (volumeInfo.getFormat() == ImageFormat.OVA || volumeInfo.getFormat() == ImageFormat.QCOW2 || volumeInfo.getFormat() == ImageFormat.RAW) { computeClusterSupportsVolumeClone = true; } else { @@ -760,6 +761,7 @@ private Map getSourceDetails(VolumeInfo volumeInfo) { sourceDetails.put(DiskTO.STORAGE_HOST, storagePoolVO.getHostAddress()); sourceDetails.put(DiskTO.STORAGE_PORT, String.valueOf(storagePoolVO.getPort())); sourceDetails.put(DiskTO.IQN, volumeVO.get_iScsiName()); + sourceDetails.put(DiskTO.PROTOCOL_TYPE, (storagePoolVO.getPoolType() != null) ? storagePoolVO.getPoolType().toString() : null); ChapInfo chapInfo = volService.getChapInfo(volumeInfo, volumeInfo.getDataStore()); @@ -778,6 +780,7 @@ private Map getDestDetails(StoragePoolVO storagePoolVO, Snapshot destDetails.put(DiskTO.STORAGE_HOST, storagePoolVO.getHostAddress()); destDetails.put(DiskTO.STORAGE_PORT, String.valueOf(storagePoolVO.getPort())); + destDetails.put(DiskTO.PROTOCOL_TYPE, (storagePoolVO.getPoolType() != null) ? storagePoolVO.getPoolType().toString() : null); long snapshotId = snapshotInfo.getId(); diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java new file mode 100644 index 000000000000..985eeedb205b --- /dev/null +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java @@ -0,0 +1,492 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.vmsnapshot; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; +import org.apache.cloudstack.engine.subsystem.api.storage.VMSnapshotStrategy; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.storage.datastore.api.SnapshotGroup; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.log4j.Logger; + +import com.cloud.agent.api.VMSnapshotTO; +import com.cloud.alert.AlertManager; +import com.cloud.event.EventTypes; +import com.cloud.event.UsageEventUtils; +import com.cloud.event.UsageEventVO; +import com.cloud.server.ManagementServerImpl; +import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.uservm.UserVm; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.crypt.DBEncryptionUtil; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackWithExceptionNoReturn; +import com.cloud.utils.db.TransactionStatus; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.snapshot.VMSnapshot; +import com.cloud.vm.snapshot.VMSnapshotDetailsVO; +import com.cloud.vm.snapshot.VMSnapshotVO; +import com.cloud.vm.snapshot.dao.VMSnapshotDao; +import com.cloud.vm.snapshot.dao.VMSnapshotDetailsDao; + +public class ScaleIOVMSnapshotStrategy extends ManagerBase implements VMSnapshotStrategy { + private static final Logger LOGGER = Logger.getLogger(ScaleIOVMSnapshotStrategy.class); + @Inject + VMSnapshotHelper vmSnapshotHelper; + @Inject + UserVmDao userVmDao; + @Inject + VMSnapshotDao vmSnapshotDao; + @Inject + protected VMSnapshotDetailsDao vmSnapshotDetailsDao; + int _wait; + @Inject + ConfigurationDao configurationDao; + @Inject + VolumeDao volumeDao; + @Inject + DiskOfferingDao diskOfferingDao; + @Inject + PrimaryDataStoreDao storagePoolDao; + @Inject + StoragePoolDetailsDao storagePoolDetailsDao; + @Inject + AlertManager alertManager; + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + String value = configurationDao.getValue("vmsnapshot.create.wait"); + _wait = NumbersUtil.parseInt(value, 1800); + return true; + } + + @Override + public StrategyPriority canHandle(VMSnapshot vmSnapshot) { + List volumeTOs = vmSnapshotHelper.getVolumeTOList(vmSnapshot.getVmId()); + if (volumeTOs == null) { + throw new CloudRuntimeException("Failed to get the volumes for the vm snapshot: " + vmSnapshot.getUuid()); + } + + if (volumeTOs != null && !volumeTOs.isEmpty()) { + for (VolumeObjectTO volumeTO: volumeTOs) { + Long poolId = volumeTO.getPoolId(); + Storage.StoragePoolType poolType = vmSnapshotHelper.getStoragePoolType(poolId); + if (poolType != Storage.StoragePoolType.PowerFlex) { + return StrategyPriority.CANT_HANDLE; + } + } + } + + return StrategyPriority.HIGHEST; + } + + @Override + public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { + UserVm userVm = userVmDao.findById(vmSnapshot.getVmId()); + VMSnapshotVO vmSnapshotVO = (VMSnapshotVO)vmSnapshot; + + try { + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshotVO, VMSnapshot.Event.CreateRequested); + } catch (NoTransitionException e) { + throw new CloudRuntimeException(e.getMessage()); + } + + boolean result = false; + try { + Map srcVolumeDestSnapshotMap = new HashMap<>(); + List volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId()); + + final Long storagePoolId = vmSnapshotHelper.getStoragePoolForVM(userVm.getId()); + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + long prev_chain_size = 0; + long virtual_size=0; + for (VolumeObjectTO volume : volumeTOs) { + String volumeSnapshotName = String.format("%s-%s-%s-%s", ScaleIOUtil.VMSNAPSHOT_PREFIX, vmSnapshotVO.getId(), + storagePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value()); + srcVolumeDestSnapshotMap.put(volume.getPath(), volumeSnapshotName); + + virtual_size += volume.getSize(); + VolumeVO volumeVO = volumeDao.findById(volume.getId()); + prev_chain_size += volumeVO.getVmSnapshotChainSize() == null ? 0 : volumeVO.getVmSnapshotChainSize(); + } + + VMSnapshotTO current = null; + VMSnapshotVO currentSnapshot = vmSnapshotDao.findCurrentSnapshotByVmId(userVm.getId()); + if (currentSnapshot != null) { + current = vmSnapshotHelper.getSnapshotWithParents(currentSnapshot); + } + + if (current == null) + vmSnapshotVO.setParent(null); + else + vmSnapshotVO.setParent(current.getId()); + + try { + final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + SnapshotGroup snapshotGroup = client.takeSnapshot(srcVolumeDestSnapshotMap); + if (snapshotGroup == null) { + throw new CloudRuntimeException("Failed to take VM snapshot on PowerFlex storage pool"); + } + + String snapshotGroupId = snapshotGroup.getSnapshotGroupId(); + List volumeIds = snapshotGroup.getVolumeIds(); + if (volumeIds != null && !volumeIds.isEmpty()) { + List vmSnapshotDetails = new ArrayList(); + vmSnapshotDetails.add(new VMSnapshotDetailsVO(vmSnapshot.getId(), "SnapshotGroupId", snapshotGroupId, false)); + + for (int index = 0; index < volumeIds.size(); index++) { + vmSnapshotDetails.add(new VMSnapshotDetailsVO(vmSnapshot.getId(), "Vol_" + volumeTOs.get(index).getId() + "_Snapshot", volumeIds.get(index), false)); + } + + vmSnapshotDetailsDao.saveDetails(vmSnapshotDetails); + } + + finalizeCreate(vmSnapshotVO, volumeTOs); + result = true; + LOGGER.debug("Create vm snapshot " + vmSnapshot.getName() + " succeeded for vm: " + userVm.getInstanceName()); + + long new_chain_size=0; + for (VolumeObjectTO volumeTo : volumeTOs) { + publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_CREATE, vmSnapshot, userVm, volumeTo); + new_chain_size += volumeTo.getSize(); + } + publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_ON_PRIMARY, vmSnapshot, userVm, new_chain_size - prev_chain_size, virtual_size); + return vmSnapshot; + } catch (Exception e) { + String errMsg = "Unable to take vm snapshot due to: " + e.getMessage(); + LOGGER.warn(errMsg, e); + throw new CloudRuntimeException(errMsg); + } + } finally { + if (!result) { + try { + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed); + + String subject = "Take snapshot failed for VM: " + userVm.getDisplayName(); + String message = "Snapshot operation failed for VM: " + userVm.getDisplayName() + ", Please check and delete if any stale volumes created with VM snapshot id: " + vmSnapshot.getVmId(); + alertManager.sendAlert(AlertManager.AlertType.ALERT_TYPE_VM_SNAPSHOT, userVm.getDataCenterId(), userVm.getPodIdToDeployIn(), subject, message); + } catch (NoTransitionException e1) { + LOGGER.error("Cannot set vm snapshot state due to: " + e1.getMessage()); + } + } + } + } + + @DB + protected void finalizeCreate(VMSnapshotVO vmSnapshot, List volumeTOs) { + try { + Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) throws NoTransitionException { + // update chain size for the volumes in the VM snapshot + for (VolumeObjectTO volume : volumeTOs) { + VolumeVO volumeVO = volumeDao.findById(volume.getId()); + if (volumeVO != null) { + long vmSnapshotChainSize = volumeVO.getVmSnapshotChainSize() == null ? 0 : volumeVO.getVmSnapshotChainSize(); + vmSnapshotChainSize += volumeVO.getSize(); + volumeVO.setVmSnapshotChainSize(vmSnapshotChainSize); + volumeDao.persist(volumeVO); + } + } + + vmSnapshot.setCurrent(true); + + // change current snapshot + if (vmSnapshot.getParent() != null) { + VMSnapshotVO previousCurrent = vmSnapshotDao.findById(vmSnapshot.getParent()); + previousCurrent.setCurrent(false); + vmSnapshotDao.persist(previousCurrent); + } + vmSnapshotDao.persist(vmSnapshot); + + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationSucceeded); + } + }); + } catch (Exception e) { + String errMsg = "Error while finalize create vm snapshot: " + vmSnapshot.getName() + " due to " + e.getMessage(); + LOGGER.error(errMsg, e); + throw new CloudRuntimeException(errMsg); + } + } + + @Override + public boolean revertVMSnapshot(VMSnapshot vmSnapshot) { + VMSnapshotVO vmSnapshotVO = (VMSnapshotVO)vmSnapshot; + UserVmVO userVm = userVmDao.findById(vmSnapshot.getVmId()); + + try { + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshotVO, VMSnapshot.Event.RevertRequested); + } catch (NoTransitionException e) { + throw new CloudRuntimeException(e.getMessage()); + } + + boolean result = false; + try { + List volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId()); + Long storagePoolId = vmSnapshotHelper.getStoragePoolForVM(userVm.getId()); + Map srcSnapshotDestVolumeMap = new HashMap<>(); + for (VolumeObjectTO volume : volumeTOs) { + VMSnapshotDetailsVO vmSnapshotDetail = vmSnapshotDetailsDao.findDetail(vmSnapshotVO.getId(), "Vol_" + volume.getId() + "_Snapshot"); + String srcSnapshotVolumeId = vmSnapshotDetail.getValue(); + String destVolumeId = volume.getPath(); + srcSnapshotDestVolumeMap.put(srcSnapshotVolumeId, destVolumeId); + } + + String systemId = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID).getValue(); + if (systemId == null) { + throw new CloudRuntimeException("Failed to get the system id for PowerFlex storage pool for reverting VM snapshot: " + vmSnapshot.getName()); + } + + final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + result = client.revertSnapshot(systemId, srcSnapshotDestVolumeMap); + if (!result) { + throw new CloudRuntimeException("Failed to revert VM snapshot on PowerFlex storage pool"); + } + + finalizeRevert(vmSnapshotVO, volumeTOs); + result = true; + } catch (Exception e) { + String errMsg = "Revert VM: " + userVm.getInstanceName() + " to snapshot: " + vmSnapshotVO.getName() + " failed due to " + e.getMessage(); + LOGGER.error(errMsg, e); + throw new CloudRuntimeException(errMsg); + } finally { + if (!result) { + try { + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed); + } catch (NoTransitionException e1) { + LOGGER.error("Cannot set vm snapshot state due to: " + e1.getMessage()); + } + } + } + return result; + } + + @DB + protected void finalizeRevert(VMSnapshotVO vmSnapshot, List volumeToList) { + try { + Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) throws NoTransitionException { + // update chain size for the volumes in the VM snapshot + for (VolumeObjectTO volume : volumeToList) { + VolumeVO volumeVO = volumeDao.findById(volume.getId()); + if (volumeVO != null && volumeVO.getVmSnapshotChainSize() != null && volumeVO.getVmSnapshotChainSize() >= volumeVO.getSize()) { + long vmSnapshotChainSize = volumeVO.getVmSnapshotChainSize() - volumeVO.getSize(); + volumeVO.setVmSnapshotChainSize(vmSnapshotChainSize); + volumeDao.persist(volumeVO); + } + } + + // update current snapshot, current snapshot is the one reverted to + VMSnapshotVO previousCurrent = vmSnapshotDao.findCurrentSnapshotByVmId(vmSnapshot.getVmId()); + if (previousCurrent != null) { + previousCurrent.setCurrent(false); + vmSnapshotDao.persist(previousCurrent); + } + vmSnapshot.setCurrent(true); + vmSnapshotDao.persist(vmSnapshot); + + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationSucceeded); + } + }); + } catch (Exception e) { + String errMsg = "Error while finalize revert vm snapshot: " + vmSnapshot.getName() + " due to " + e.getMessage(); + LOGGER.error(errMsg, e); + throw new CloudRuntimeException(errMsg); + } + } + + @Override + public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) { + UserVmVO userVm = userVmDao.findById(vmSnapshot.getVmId()); + VMSnapshotVO vmSnapshotVO = (VMSnapshotVO)vmSnapshot; + + try { + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.ExpungeRequested); + } catch (NoTransitionException e) { + LOGGER.debug("Failed to change vm snapshot state with event ExpungeRequested"); + throw new CloudRuntimeException("Failed to change vm snapshot state with event ExpungeRequested: " + e.getMessage()); + } + + try { + List volumeTOs = vmSnapshotHelper.getVolumeTOList(vmSnapshot.getVmId()); + Long storagePoolId = vmSnapshotHelper.getStoragePoolForVM(userVm.getId()); + String systemId = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID).getValue(); + if (systemId == null) { + throw new CloudRuntimeException("Failed to get the system id for PowerFlex storage pool for deleting VM snapshot: " + vmSnapshot.getName()); + } + + VMSnapshotDetailsVO vmSnapshotDetailsVO = vmSnapshotDetailsDao.findDetail(vmSnapshot.getId(), "SnapshotGroupId"); + if (vmSnapshotDetailsVO == null) { + throw new CloudRuntimeException("Failed to get snapshot group id for the VM snapshot: " + vmSnapshot.getName()); + } + + String snapshotGroupId = vmSnapshotDetailsVO.getValue(); + final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + int volumesDeleted = client.deleteSnapshotGroup(systemId, snapshotGroupId); + if (volumesDeleted <= 0) { + throw new CloudRuntimeException("Failed to delete VM snapshot: " + vmSnapshot.getName()); + } else if (volumesDeleted != volumeTOs.size()) { + LOGGER.warn("Unable to delete all volumes of the VM snapshot: " + vmSnapshot.getName()); + } + + finalizeDelete(vmSnapshotVO, volumeTOs); + long full_chain_size=0; + for (VolumeObjectTO volumeTo : volumeTOs) { + publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_DELETE, vmSnapshot, userVm, volumeTo); + full_chain_size += volumeTo.getSize(); + } + publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_OFF_PRIMARY, vmSnapshot, userVm, full_chain_size, 0L); + return true; + } catch (Exception e) { + String errMsg = "Unable to delete vm snapshot: " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " due to " + e.getMessage(); + LOGGER.warn(errMsg, e); + throw new CloudRuntimeException(errMsg); + } + } + + @DB + protected void finalizeDelete(VMSnapshotVO vmSnapshot, List volumeTOs) { + try { + Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) throws NoTransitionException { + // update chain size for the volumes in the VM snapshot + for (VolumeObjectTO volume : volumeTOs) { + VolumeVO volumeVO = volumeDao.findById(volume.getId()); + if (volumeVO != null && volumeVO.getVmSnapshotChainSize() != null && volumeVO.getVmSnapshotChainSize() >= volumeVO.getSize()) { + long vmSnapshotChainSize = volumeVO.getVmSnapshotChainSize() - volumeVO.getSize(); + volumeVO.setVmSnapshotChainSize(vmSnapshotChainSize); + volumeDao.persist(volumeVO); + } + } + + // update children's parent snapshots + List children = vmSnapshotDao.listByParent(vmSnapshot.getId()); + for (VMSnapshotVO child : children) { + child.setParent(vmSnapshot.getParent()); + vmSnapshotDao.persist(child); + } + + // update current snapshot + VMSnapshotVO current = vmSnapshotDao.findCurrentSnapshotByVmId(vmSnapshot.getVmId()); + if (current != null && current.getId() == vmSnapshot.getId() && vmSnapshot.getParent() != null) { + VMSnapshotVO parent = vmSnapshotDao.findById(vmSnapshot.getParent()); + parent.setCurrent(true); + vmSnapshotDao.persist(parent); + } + vmSnapshot.setCurrent(false); + vmSnapshotDao.persist(vmSnapshot); + + vmSnapshotDao.remove(vmSnapshot.getId()); + } + }); + } catch (Exception e) { + String errMsg = "Error while finalize delete vm snapshot: " + vmSnapshot.getName() + " due to " + e.getMessage(); + LOGGER.error(errMsg, e); + throw new CloudRuntimeException(errMsg); + } + } + + @Override + public boolean deleteVMSnapshotFromDB(VMSnapshot vmSnapshot, boolean unmanage) { + try { + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.ExpungeRequested); + } catch (NoTransitionException e) { + LOGGER.debug("Failed to change vm snapshot state with event ExpungeRequested"); + throw new CloudRuntimeException("Failed to change vm snapshot state with event ExpungeRequested: " + e.getMessage()); + } + UserVm userVm = userVmDao.findById(vmSnapshot.getVmId()); + List volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId()); + long full_chain_size = 0; + for (VolumeObjectTO volumeTo: volumeTOs) { + volumeTo.setSize(0); + publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_DELETE, vmSnapshot, userVm, volumeTo); + full_chain_size += volumeTo.getSize(); + } + if (unmanage) { + publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_OFF_PRIMARY, vmSnapshot, userVm, full_chain_size, 0L); + } + return vmSnapshotDao.remove(vmSnapshot.getId()); + } + + private void publishUsageEvent(String type, VMSnapshot vmSnapshot, UserVm userVm, VolumeObjectTO volumeTo) { + VolumeVO volume = volumeDao.findById(volumeTo.getId()); + Long diskOfferingId = volume.getDiskOfferingId(); + Long offeringId = null; + if (diskOfferingId != null) { + DiskOfferingVO offering = diskOfferingDao.findById(diskOfferingId); + if (offering != null && (offering.getType() == DiskOfferingVO.Type.Disk)) { + offeringId = offering.getId(); + } + } + Map details = new HashMap<>(); + if (vmSnapshot != null) { + details.put(UsageEventVO.DynamicParameters.vmSnapshotId.name(), String.valueOf(vmSnapshot.getId())); + } + UsageEventUtils.publishUsageEvent(type, vmSnapshot.getAccountId(), userVm.getDataCenterId(), userVm.getId(), vmSnapshot.getName(), offeringId, volume.getId(), // save volume's id into templateId field + volumeTo.getSize(), VMSnapshot.class.getName(), vmSnapshot.getUuid(), details); + } + + private void publishUsageEvent(String type, VMSnapshot vmSnapshot, UserVm userVm, Long vmSnapSize, Long virtualSize) { + try { + Map details = new HashMap<>(); + if (vmSnapshot != null) { + details.put(UsageEventVO.DynamicParameters.vmSnapshotId.name(), String.valueOf(vmSnapshot.getId())); + } + UsageEventUtils.publishUsageEvent(type, vmSnapshot.getAccountId(), userVm.getDataCenterId(), userVm.getId(), vmSnapshot.getName(), 0L, 0L, vmSnapSize, virtualSize, + VMSnapshot.class.getName(), vmSnapshot.getUuid(), details); + } catch (Exception e) { + LOGGER.error("Failed to publish usage event " + type, e); + } + } + + private ScaleIOGatewayClient getScaleIOClient(final Long storagePoolId) throws Exception { + final int clientTimeout = StorageManager.STORAGE_POOL_CLIENT_TIMEOUT.valueIn(storagePoolId); + final String url = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.GATEWAY_API_ENDPOINT).getValue(); + final String encryptedUsername = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.GATEWAY_API_USERNAME).getValue(); + final String username = DBEncryptionUtil.decrypt(encryptedUsername); + final String encryptedPassword = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.GATEWAY_API_PASSWORD).getValue(); + final String password = DBEncryptionUtil.decrypt(encryptedPassword); + return ScaleIOGatewayClient.getClient(url, username, password, false, clientTimeout); + } +} diff --git a/engine/storage/snapshot/src/main/resources/META-INF/cloudstack/storage/spring-engine-storage-snapshot-storage-context.xml b/engine/storage/snapshot/src/main/resources/META-INF/cloudstack/storage/spring-engine-storage-snapshot-storage-context.xml index 2bfb3c368a56..2084ce26f69a 100644 --- a/engine/storage/snapshot/src/main/resources/META-INF/cloudstack/storage/spring-engine-storage-snapshot-storage-context.xml +++ b/engine/storage/snapshot/src/main/resources/META-INF/cloudstack/storage/spring-engine-storage-snapshot-storage-context.xml @@ -36,7 +36,13 @@ + + + + diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java index cfe32c2bd072..2a1c25744319 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java @@ -28,13 +28,13 @@ import com.cloud.exception.StorageUnavailableException; import com.cloud.storage.StoragePoolStatus; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.log4j.Logger; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; import com.cloud.capacity.Capacity; import com.cloud.capacity.dao.CapacityDao; @@ -211,12 +211,16 @@ protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh, return false; } + Volume volume = volumeDao.findById(dskCh.getVolumeId()); + if(!storageMgr.storagePoolCompatibleWithVolumePool(pool, volume)) { + return false; + } + if (pool.isManaged() && !storageUtil.managedStoragePoolCanScale(pool, plan.getClusterId(), plan.getHostId())) { return false; } // check capacity - Volume volume = volumeDao.findById(dskCh.getVolumeId()); List requestVolumes = new ArrayList<>(); requestVolumes.add(volume); if (dskCh.getHypervisorType() == HypervisorType.VMware) { diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java index 301704a75a6a..225f781489c0 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java @@ -48,15 +48,10 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator { @Inject private CapacityDao capacityDao; - @Override protected List select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { LOGGER.debug("ZoneWideStoragePoolAllocator to find storage pool"); - if (dskCh.useLocalStorage()) { - return null; - } - if (LOGGER.isTraceEnabled()) { // Log the pools details that are ignored because they are in disabled state List disabledPools = storagePoolDao.findDisabledPoolsByScope(plan.getDataCenterId(), null, null, ScopeType.ZONE); @@ -92,7 +87,6 @@ protected List select(DiskProfile dskCh, VirtualMachineProfile vmPr avoid.addPool(pool.getId()); } - for (StoragePoolVO storage : storagePools) { if (suitablePools.size() == returnUpTo) { break; @@ -114,7 +108,6 @@ private boolean canAddStoragePoolToAvoidSet(StoragePoolVO storagePoolVO) { return !ScopeType.ZONE.equals(storagePoolVO.getScope()) || !storagePoolVO.isManaged(); } - @Override protected List reorderPoolsByCapacity(DeploymentPlan plan, List pools) { diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java index cadbad3341a9..01842441e269 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java @@ -37,6 +37,7 @@ import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; +import com.cloud.storage.Storage; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.fsm.NoTransitionException; @@ -148,4 +149,33 @@ public VMSnapshotTO getSnapshotWithParents(VMSnapshotVO snapshot) { return result; } + @Override + public Long getStoragePoolForVM(Long vmId) { + List rootVolumes = volumeDao.findReadyRootVolumesByInstance(vmId); + if (rootVolumes == null || rootVolumes.isEmpty()) { + throw new InvalidParameterValueException("Failed to find root volume for the user vm:" + vmId); + } + + VolumeVO rootVolume = rootVolumes.get(0); + StoragePoolVO rootVolumePool = primaryDataStoreDao.findById(rootVolume.getPoolId()); + if (rootVolumePool == null) { + throw new InvalidParameterValueException("Failed to find root volume storage pool for the user vm:" + vmId); + } + + if (rootVolumePool.isInMaintenance()) { + throw new InvalidParameterValueException("Storage pool for the user vm:" + vmId + " is in maintenance"); + } + + return rootVolumePool.getId(); + } + + @Override + public Storage.StoragePoolType getStoragePoolType(Long poolId) { + StoragePoolVO storagePool = primaryDataStoreDao.findById(poolId); + if (storagePool == null) { + throw new InvalidParameterValueException("storage pool is not found"); + } + + return storagePool.getPoolType(); + } } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java index 9cf73e6f2a5d..0c55545d66a8 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java @@ -71,6 +71,7 @@ import com.cloud.configuration.Config; import com.cloud.exception.AgentUnavailableException; import com.cloud.exception.OperationTimedoutException; +import com.cloud.host.Host; import com.cloud.host.dao.HostDao; import com.cloud.secstorage.CommandExecLogDao; import com.cloud.secstorage.CommandExecLogVO; @@ -363,6 +364,11 @@ public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCa } } + @Override + public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback) { + copyAsync(srcData, destData, callback); + } + private Answer sendToLeastBusyEndpoint(List eps, CopyCommand cmd) { Answer answer = null; EndPoint endPoint = null; diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java index 2e7e13b0846e..35153a109961 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java @@ -23,6 +23,7 @@ import org.apache.cloudstack.storage.to.VolumeObjectTO; import com.cloud.agent.api.VMSnapshotTO; +import com.cloud.storage.Storage; import com.cloud.utils.fsm.NoTransitionException; import com.cloud.vm.snapshot.VMSnapshot; import com.cloud.vm.snapshot.VMSnapshotVO; @@ -35,4 +36,8 @@ public interface VMSnapshotHelper { List getVolumeTOList(Long vmId); VMSnapshotTO getSnapshotWithParents(VMSnapshotVO snapshot); + + Long getStoragePoolForVM(Long vmId); + + Storage.StoragePoolType getStoragePoolType(Long poolId); } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java index 9750fb1b84f1..5ec9cfb27e5f 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java @@ -29,11 +29,9 @@ import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDetailsDao; import com.cloud.vm.VmDetailConstants; -import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao; -import org.apache.commons.lang.StringUtils; -import org.apache.log4j.Logger; +import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; @@ -44,6 +42,8 @@ import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.commons.lang.StringUtils; +import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.DownloadAnswer; @@ -53,6 +53,7 @@ import com.cloud.offering.DiskOffering.DiskCacheMode; import com.cloud.storage.DataStoreRole; import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.Storage; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.ProvisioningType; import com.cloud.storage.Volume; @@ -625,6 +626,11 @@ public HypervisorType getHypervisorType() { return volumeDao.getHypervisorType(volumeVO.getId()); } + @Override + public Storage.StoragePoolType getStoragePoolType() { + return volumeVO.getPoolType(); + } + @Override public Long getLastPoolId() { return volumeVO.getLastPoolId(); diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 5e3493a29f1d..9f5fcac7d927 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -31,6 +31,7 @@ import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity; +import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; @@ -47,6 +48,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; @@ -88,6 +90,7 @@ import com.cloud.event.EventTypes; import com.cloud.event.UsageEventUtils; import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.StorageAccessException; import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; @@ -101,13 +104,16 @@ import com.cloud.storage.DataStoreRole; import com.cloud.storage.RegisterVolumePayload; import com.cloud.storage.ScopeType; +import com.cloud.storage.Storage; import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.Volume; import com.cloud.storage.Volume.State; +import com.cloud.storage.VolumeDetailVO; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.storage.dao.VolumeDao; @@ -172,6 +178,10 @@ public class VolumeServiceImpl implements VolumeService { private VolumeDetailsDao _volumeDetailsDao; @Inject private VMTemplateDao templateDao; + @Inject + private TemplateDataFactory tmplFactory; + @Inject + private VolumeOrchestrationService _volumeMgr; private final static String SNAPSHOT_ID = "SNAPSHOT_ID"; @@ -380,6 +390,14 @@ public AsyncCallFuture expungeVolumeAsync(VolumeInfo volume) { return future; } + public void ensureVolumeIsExpungeReady(long volumeId) { + VolumeVO volume = volDao.findById(volumeId); + if (volume != null && volume.getPodId() != null) { + volume.setPodId(null); + volDao.update(volumeId, volume); + } + } + private boolean volumeExistsOnPrimary(VolumeVO vol) { Long poolId = vol.getPoolId(); @@ -794,6 +812,39 @@ protected Void createVolumeFromBaseImageCallBack(AsyncCallbackDispatcher callback, CreateVolumeFromBaseImageContext context) { + CopyCommandResult result = callback.getResult(); + DataObject vo = context.vo; + DataObject tmplOnPrimary = context.templateOnStore; + VolumeApiResult volResult = new VolumeApiResult((VolumeObject)vo); + + if (result.isSuccess()) { + VolumeVO volume = volDao.findById(vo.getId()); + CopyCmdAnswer answer = (CopyCmdAnswer)result.getAnswer(); + VolumeObjectTO volumeObjectTo = (VolumeObjectTO)answer.getNewData(); + volume.setPath(volumeObjectTo.getPath()); + if (volumeObjectTo.getFormat() != null) { + volume.setFormat(volumeObjectTo.getFormat()); + } + + volDao.update(volume.getId(), volume); + vo.processEvent(Event.OperationSuccessed); + } else { + volResult.setResult(result.getResult()); + + try { + destroyAndReallocateManagedVolume((VolumeInfo) vo); + } catch (CloudRuntimeException ex) { + s_logger.warn("Couldn't destroy managed volume: " + vo.getId()); + } + } + + AsyncCallFuture future = context.getFuture(); + future.complete(volResult); + return null; + } + /** * Creates a template volume on managed storage, which will be used for creating ROOT volumes by cloning. * @@ -809,6 +860,9 @@ private TemplateInfo createManagedTemplateVolume(TemplateInfo srcTemplateInfo, P if (templatePoolRef == null) { throw new CloudRuntimeException("Failed to find template " + srcTemplateInfo.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId()); + } else if (templatePoolRef.getState() == ObjectInDataStoreStateMachine.State.Ready) { + // Template already exists + return templateOnPrimary; } // At this point, we have an entry in the DB that points to our cached template. @@ -824,13 +878,6 @@ private TemplateInfo createManagedTemplateVolume(TemplateInfo srcTemplateInfo, P throw new CloudRuntimeException("Unable to acquire lock on VMTemplateStoragePool: " + templatePoolRefId); } - // Template already exists - if (templatePoolRef.getState() == ObjectInDataStoreStateMachine.State.Ready) { - _tmpltPoolDao.releaseFromLockTable(templatePoolRefId); - - return templateOnPrimary; - } - try { // create a cache volume on the back-end @@ -875,27 +922,25 @@ private TemplateInfo createManagedTemplateVolume(TemplateInfo srcTemplateInfo, P * @param destHost The host that we will use for the copy */ private void copyTemplateToManagedTemplateVolume(TemplateInfo srcTemplateInfo, TemplateInfo templateOnPrimary, VMTemplateStoragePoolVO templatePoolRef, PrimaryDataStore destPrimaryDataStore, - Host destHost) { + Host destHost) throws StorageAccessException { AsyncCallFuture copyTemplateFuture = new AsyncCallFuture<>(); int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600); long templatePoolRefId = templatePoolRef.getId(); - templatePoolRef = _tmpltPoolDao.acquireInLockTable(templatePoolRefId, storagePoolMaxWaitSeconds); - - if (templatePoolRef == null) { - throw new CloudRuntimeException("Unable to acquire lock on VMTemplateStoragePool: " + templatePoolRefId); - } - - if (templatePoolRef.getDownloadState() == Status.DOWNLOADED) { - // There can be cases where we acquired the lock, but the template - // was already copied by a previous thread. Just return in that case. + try { + templatePoolRef = _tmpltPoolDao.acquireInLockTable(templatePoolRefId, storagePoolMaxWaitSeconds); - s_logger.debug("Template already downloaded, nothing to do"); + if (templatePoolRef == null) { + throw new CloudRuntimeException("Unable to acquire lock on VMTemplateStoragePool: " + templatePoolRefId); + } - return; - } + if (templatePoolRef.getDownloadState() == Status.DOWNLOADED) { + // There can be cases where we acquired the lock, but the template + // was already copied by a previous thread. Just return in that case. + s_logger.debug("Template already downloaded, nothing to do"); + return; + } - try { // copy the template from sec storage to the created volume CreateBaseImageContext copyContext = new CreateBaseImageContext<>(null, null, destPrimaryDataStore, srcTemplateInfo, copyTemplateFuture, templateOnPrimary, templatePoolRefId); @@ -913,6 +958,7 @@ private void copyTemplateToManagedTemplateVolume(TemplateInfo srcTemplateInfo, T details.put(PrimaryDataStore.MANAGED_STORE_TARGET_ROOT_VOLUME, srcTemplateInfo.getUniqueName()); details.put(PrimaryDataStore.REMOVE_AFTER_COPY, Boolean.TRUE.toString()); details.put(PrimaryDataStore.VOLUME_SIZE, String.valueOf(templateOnPrimary.getSize())); + details.put(StorageManager.STORAGE_POOL_DISK_WAIT.toString(), String.valueOf(StorageManager.STORAGE_POOL_DISK_WAIT.valueIn(destPrimaryDataStore.getId()))); ChapInfo chapInfo = getChapInfo(templateOnPrimary, destPrimaryDataStore); @@ -923,11 +969,15 @@ private void copyTemplateToManagedTemplateVolume(TemplateInfo srcTemplateInfo, T details.put(PrimaryDataStore.CHAP_TARGET_SECRET, chapInfo.getTargetSecret()); } - templateOnPrimary.processEvent(Event.CopyingRequested); - destPrimaryDataStore.setDetails(details); - grantAccess(templateOnPrimary, destHost, destPrimaryDataStore); + try { + grantAccess(templateOnPrimary, destHost, destPrimaryDataStore); + } catch (Exception e) { + throw new StorageAccessException("Unable to grant access to template: " + templateOnPrimary.getId() + " on host: " + destHost.getId()); + } + + templateOnPrimary.processEvent(Event.CopyingRequested); VolumeApiResult result; @@ -955,6 +1005,8 @@ private void copyTemplateToManagedTemplateVolume(TemplateInfo srcTemplateInfo, T // something weird happens to the volume (XenServer creates an SR, but the VDI copy can fail). // For now, I just retry the copy. } + } catch (StorageAccessException e) { + throw e; } catch (Throwable e) { s_logger.debug("Failed to create a template on primary storage", e); @@ -1031,6 +1083,126 @@ private void createManagedVolumeCloneTemplateAsync(VolumeInfo volumeInfo, Templa } } + private void createManagedVolumeCopyManagedTemplateAsync(VolumeInfo volumeInfo, PrimaryDataStore destPrimaryDataStore, TemplateInfo srcTemplateOnPrimary, Host destHost, AsyncCallFuture future) throws StorageAccessException { + VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), srcTemplateOnPrimary.getId(), null); + + if (templatePoolRef == null) { + throw new CloudRuntimeException("Failed to find template " + srcTemplateOnPrimary.getUniqueName() + " in storage pool " + srcTemplateOnPrimary.getId()); + } + + if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { + throw new CloudRuntimeException("Template " + srcTemplateOnPrimary.getUniqueName() + " has not been downloaded to primary storage."); + } + + String volumeDetailKey = "POOL_TEMPLATE_ID_COPY_ON_HOST_" + destHost.getId(); + + try { + try { + grantAccess(srcTemplateOnPrimary, destHost, destPrimaryDataStore); + } catch (Exception e) { + throw new StorageAccessException("Unable to grant access to src template: " + srcTemplateOnPrimary.getId() + " on host: " + destHost.getId()); + } + + _volumeDetailsDao.addDetail(volumeInfo.getId(), volumeDetailKey, String.valueOf(templatePoolRef.getId()), false); + + // Create a volume on managed storage. + AsyncCallFuture createVolumeFuture = createVolumeAsync(volumeInfo, destPrimaryDataStore); + VolumeApiResult createVolumeResult = createVolumeFuture.get(); + + if (createVolumeResult.isFailed()) { + throw new CloudRuntimeException("Creation of a volume failed: " + createVolumeResult.getResult()); + } + + // Refresh the volume info from the DB. + volumeInfo = volFactory.getVolume(volumeInfo.getId(), destPrimaryDataStore); + + volumeInfo.processEvent(Event.CreateRequested); + CreateVolumeFromBaseImageContext context = new CreateVolumeFromBaseImageContext<>(null, volumeInfo, destPrimaryDataStore, srcTemplateOnPrimary, future, null, null); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().createVolumeFromBaseManagedImageCallBack(null, null)); + caller.setContext(context); + + Map details = new HashMap(); + details.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString()); + details.put(PrimaryDataStore.STORAGE_HOST, destPrimaryDataStore.getHostAddress()); + details.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(destPrimaryDataStore.getPort())); + details.put(PrimaryDataStore.MANAGED_STORE_TARGET, volumeInfo.get_iScsiName()); + details.put(PrimaryDataStore.MANAGED_STORE_TARGET_ROOT_VOLUME, volumeInfo.getName()); + details.put(PrimaryDataStore.VOLUME_SIZE, String.valueOf(volumeInfo.getSize())); + details.put(StorageManager.STORAGE_POOL_DISK_WAIT.toString(), String.valueOf(StorageManager.STORAGE_POOL_DISK_WAIT.valueIn(destPrimaryDataStore.getId()))); + destPrimaryDataStore.setDetails(details); + + grantAccess(volumeInfo, destHost, destPrimaryDataStore); + + try { + motionSrv.copyAsync(srcTemplateOnPrimary, volumeInfo, destHost, caller); + } finally { + revokeAccess(volumeInfo, destHost, destPrimaryDataStore); + } + } catch (StorageAccessException e) { + throw e; + } catch (Throwable e) { + s_logger.debug("Failed to copy managed template on primary storage", e); + String errMsg = "Failed due to " + e.toString(); + + try { + destroyAndReallocateManagedVolume(volumeInfo); + } catch (CloudRuntimeException ex) { + s_logger.warn("Failed to destroy managed volume: " + volumeInfo.getId()); + errMsg += " : " + ex.getMessage(); + } + + VolumeApiResult result = new VolumeApiResult(volumeInfo); + result.setResult(errMsg); + future.complete(result); + } finally { + _volumeDetailsDao.removeDetail(volumeInfo.getId(), volumeDetailKey); + + List volumeDetails = _volumeDetailsDao.findDetails(volumeDetailKey, String.valueOf(templatePoolRef.getId()), false); + if (volumeDetails == null || volumeDetails.isEmpty()) { + revokeAccess(srcTemplateOnPrimary, destHost, destPrimaryDataStore); + } + } + } + + private void destroyAndReallocateManagedVolume(VolumeInfo volumeInfo) { + if (volumeInfo == null) { + return; + } + + VolumeVO volume = volDao.findById(volumeInfo.getId()); + if (volume == null) { + return; + } + + if (volume.getState() == State.Allocated) { // Possible states here: Allocated, Ready & Creating + return; + } + + volumeInfo.processEvent(Event.DestroyRequested); + + Volume newVol = _volumeMgr.allocateDuplicateVolume(volume, null); + VolumeVO newVolume = (VolumeVO) newVol; + newVolume.set_iScsiName(null); + volDao.update(newVolume.getId(), newVolume); + s_logger.debug("Allocated new volume: " + newVolume.getId() + " for the VM: " + volume.getInstanceId()); + + try { + AsyncCallFuture expungeVolumeFuture = expungeVolumeAsync(volumeInfo); + VolumeApiResult expungeVolumeResult = expungeVolumeFuture.get(); + if (expungeVolumeResult.isFailed()) { + s_logger.warn("Failed to expunge volume: " + volumeInfo.getId() + " that was created"); + throw new CloudRuntimeException("Failed to expunge volume: " + volumeInfo.getId() + " that was created"); + } + } catch (Exception ex) { + if (canVolumeBeRemoved(volumeInfo.getId())) { + volDao.remove(volumeInfo.getId()); + } + s_logger.warn("Unable to expunge volume: " + volumeInfo.getId() + " due to: " + ex.getMessage()); + throw new CloudRuntimeException("Unable to expunge volume: " + volumeInfo.getId() + " due to: " + ex.getMessage()); + } + } + private void createManagedVolumeCopyTemplateAsync(VolumeInfo volumeInfo, PrimaryDataStore primaryDataStore, TemplateInfo srcTemplateInfo, Host destHost, AsyncCallFuture future) { try { // Create a volume on managed storage. @@ -1061,6 +1233,7 @@ private void createManagedVolumeCopyTemplateAsync(VolumeInfo volumeInfo, Primary details.put(PrimaryDataStore.MANAGED_STORE_TARGET, volumeInfo.get_iScsiName()); details.put(PrimaryDataStore.MANAGED_STORE_TARGET_ROOT_VOLUME, volumeInfo.getName()); details.put(PrimaryDataStore.VOLUME_SIZE, String.valueOf(volumeInfo.getSize())); + details.put(StorageManager.STORAGE_POOL_DISK_WAIT.toString(), String.valueOf(StorageManager.STORAGE_POOL_DISK_WAIT.valueIn(primaryDataStore.getId()))); ChapInfo chapInfo = getChapInfo(volumeInfo, primaryDataStore); @@ -1106,7 +1279,109 @@ private void createManagedVolumeCopyTemplateAsync(VolumeInfo volumeInfo, Primary } @Override - public AsyncCallFuture createManagedStorageVolumeFromTemplateAsync(VolumeInfo volumeInfo, long destDataStoreId, TemplateInfo srcTemplateInfo, long destHostId) { + public TemplateInfo createManagedStorageTemplate(long srcTemplateId, long destDataStoreId, long destHostId) throws StorageAccessException { + Host destHost = _hostDao.findById(destHostId); + if (destHost == null) { + throw new CloudRuntimeException("Destination host should not be null."); + } + + TemplateInfo srcTemplateInfo = tmplFactory.getTemplate(srcTemplateId); + if (srcTemplateInfo == null) { + throw new CloudRuntimeException("Failed to get info of template: " + srcTemplateId); + } + + if (Storage.ImageFormat.ISO.equals(srcTemplateInfo.getFormat())) { + throw new CloudRuntimeException("Unsupported format: " + Storage.ImageFormat.ISO.toString() + " for managed storage template"); + } + + GlobalLock lock = null; + TemplateInfo templateOnPrimary = null; + try { + String templateIdManagedPoolIdLockString = "templateId:" + srcTemplateId + "managedPoolId:" + destDataStoreId; + lock = GlobalLock.getInternLock(templateIdManagedPoolIdLockString); + if (lock == null) { + throw new CloudRuntimeException("Unable to create managed storage template, couldn't get global lock on " + templateIdManagedPoolIdLockString); + } + + int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600); + if (!lock.lock(storagePoolMaxWaitSeconds)) { + s_logger.debug("Unable to create managed storage template, couldn't lock on " + templateIdManagedPoolIdLockString); + throw new CloudRuntimeException("Unable to create managed storage template, couldn't lock on " + templateIdManagedPoolIdLockString); + } + + PrimaryDataStore destPrimaryDataStore = dataStoreMgr.getPrimaryDataStore(destDataStoreId); + + // Check if template exists on the storage pool. If not, downland and copy to managed storage pool + VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destDataStoreId, srcTemplateId, null); + if (templatePoolRef != null && templatePoolRef.getDownloadState() == Status.DOWNLOADED) { + return tmplFactory.getTemplate(srcTemplateId, destPrimaryDataStore); + } + + templateOnPrimary = createManagedTemplateVolume(srcTemplateInfo, destPrimaryDataStore); + if (templateOnPrimary == null) { + throw new CloudRuntimeException("Failed to create template " + srcTemplateInfo.getUniqueName() + " on primary storage: " + destDataStoreId); + } + + templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId(), null); + if (templatePoolRef == null) { + throw new CloudRuntimeException("Failed to find template " + srcTemplateInfo.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId()); + } + + if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { + // Populate details which will be later read by the storage subsystem. + Map details = new HashMap<>(); + + details.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString()); + details.put(PrimaryDataStore.STORAGE_HOST, destPrimaryDataStore.getHostAddress()); + details.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(destPrimaryDataStore.getPort())); + details.put(PrimaryDataStore.MANAGED_STORE_TARGET, templateOnPrimary.getInstallPath()); + details.put(PrimaryDataStore.MANAGED_STORE_TARGET_ROOT_VOLUME, srcTemplateInfo.getUniqueName()); + details.put(PrimaryDataStore.REMOVE_AFTER_COPY, Boolean.TRUE.toString()); + details.put(PrimaryDataStore.VOLUME_SIZE, String.valueOf(templateOnPrimary.getSize())); + details.put(StorageManager.STORAGE_POOL_DISK_WAIT.toString(), String.valueOf(StorageManager.STORAGE_POOL_DISK_WAIT.valueIn(destPrimaryDataStore.getId()))); + destPrimaryDataStore.setDetails(details); + + try { + grantAccess(templateOnPrimary, destHost, destPrimaryDataStore); + } catch (Exception e) { + throw new StorageAccessException("Unable to grant access to template: " + templateOnPrimary.getId() + " on host: " + destHost.getId()); + } + + templateOnPrimary.processEvent(Event.CopyingRequested); + + try { + //Download and copy template to the managed volume + TemplateInfo templateOnPrimaryNow = tmplFactory.getReadyBypassedTemplateOnManagedStorage(srcTemplateId, templateOnPrimary, destDataStoreId, destHostId); + if (templateOnPrimaryNow == null) { + s_logger.debug("Failed to prepare ready bypassed template: " + srcTemplateId + " on primary storage: " + templateOnPrimary.getId()); + throw new CloudRuntimeException("Failed to prepare ready bypassed template: " + srcTemplateId + " on primary storage: " + templateOnPrimary.getId()); + } + templateOnPrimary.processEvent(Event.OperationSuccessed); + return templateOnPrimaryNow; + } finally { + revokeAccess(templateOnPrimary, destHost, destPrimaryDataStore); + } + } + return null; + } catch (StorageAccessException e) { + throw e; + } catch (Throwable e) { + s_logger.debug("Failed to create template on managed primary storage", e); + if (templateOnPrimary != null) { + templateOnPrimary.processEvent(Event.OperationFailed); + } + + throw new CloudRuntimeException(e.getMessage()); + } finally { + if (lock != null) { + lock.unlock(); + lock.releaseRef(); + } + } + } + + @Override + public AsyncCallFuture createManagedStorageVolumeFromTemplateAsync(VolumeInfo volumeInfo, long destDataStoreId, TemplateInfo srcTemplateInfo, long destHostId) throws StorageAccessException { PrimaryDataStore destPrimaryDataStore = dataStoreMgr.getPrimaryDataStore(destDataStoreId); Host destHost = _hostDao.findById(destHostId); @@ -1144,10 +1419,16 @@ public AsyncCallFuture createManagedStorageVolumeFromTemplateAs copyTemplateToManagedTemplateVolume(srcTemplateInfo, templateOnPrimary, templatePoolRef, destPrimaryDataStore, destHost); } - // We have a template on primary storage. Clone it to new volume. - s_logger.debug("Creating a clone from template on primary storage " + destDataStoreId); + if (destPrimaryDataStore.getPoolType() != StoragePoolType.PowerFlex) { + // We have a template on primary storage. Clone it to new volume. + s_logger.debug("Creating a clone from template on primary storage " + destDataStoreId); - createManagedVolumeCloneTemplateAsync(volumeInfo, templateOnPrimary, destPrimaryDataStore, future); + createManagedVolumeCloneTemplateAsync(volumeInfo, templateOnPrimary, destPrimaryDataStore, future); + } else { + // We have a template on PowerFlex primary storage. Create new volume and copy to it. + s_logger.debug("Copying the template to the volume on primary storage"); + createManagedVolumeCopyManagedTemplateAsync(volumeInfo, destPrimaryDataStore, templateOnPrimary, destHost, future); + } } else { s_logger.debug("Primary storage does not support cloning or no support for UUID resigning on the host side; copying the template normally"); @@ -1300,6 +1581,8 @@ protected VolumeVO duplicateVolumeOnAnotherStorage(Volume volume, StoragePool po // part here to make sure the credentials do not get stored in the db unencrypted. if (pool.getPoolType() == StoragePoolType.SMB && folder != null && folder.contains("?")) { folder = folder.substring(0, folder.indexOf("?")); + } else if (pool.getPoolType() == StoragePoolType.PowerFlex) { + folder = volume.getFolder(); } VolumeVO newVol = new VolumeVO(volume); @@ -1309,6 +1592,7 @@ protected VolumeVO duplicateVolumeOnAnotherStorage(Volume volume, StoragePool po newVol.setFolder(folder); newVol.setPodId(pool.getPodId()); newVol.setPoolId(pool.getId()); + newVol.setPoolType(pool.getPoolType()); newVol.setLastPoolId(lastPoolId); newVol.setPodId(pool.getPodId()); return volDao.persist(newVol); @@ -1502,6 +1786,10 @@ protected Void copyVolumeCallBack(AsyncCallbackDispatcher destroyFuture = expungeVolumeAsync(destVolume); destroyFuture.get(); @@ -1512,6 +1800,10 @@ protected Void copyVolumeCallBack(AsyncCallbackDispatcher destroyFuture = expungeVolumeAsync(srcVolume); // If volume destroy fails, this could be because of vdi is still in use state, so wait and retry. @@ -1569,7 +1861,7 @@ public AsyncCallFuture migrateVolume(VolumeInfo srcVolume, Data caller.setCallback(caller.getTarget().migrateVolumeCallBack(null, null)).setContext(context); motionSrv.copyAsync(srcVolume, destVolume, caller); } catch (Exception e) { - s_logger.debug("Failed to copy volume", e); + s_logger.debug("Failed to migrate volume", e); res.setResult(e.toString()); future.complete(res); } @@ -1588,6 +1880,10 @@ protected Void migrateVolumeCallBack(AsyncCallbackDispatcherjna-platform ${cs.jna.version} + + org.apache.cloudstack + cloud-plugin-storage-volume-scaleio + ${project.version} + compile + diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index d6d078ea3558..00545cff9870 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -46,9 +46,7 @@ import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; -import com.cloud.hypervisor.kvm.resource.rolling.maintenance.RollingMaintenanceAgentExecutor; -import com.cloud.hypervisor.kvm.resource.rolling.maintenance.RollingMaintenanceExecutor; -import com.cloud.hypervisor.kvm.resource.rolling.maintenance.RollingMaintenanceServiceExecutor; +import org.apache.cloudstack.storage.configdrive.ConfigDrive; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; @@ -88,6 +86,7 @@ import com.cloud.agent.api.PingCommand; import com.cloud.agent.api.PingRoutingCommand; import com.cloud.agent.api.PingRoutingWithNwGroupsCommand; +import com.cloud.agent.api.SecurityGroupRulesCmd; import com.cloud.agent.api.SetupGuestNetworkCommand; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupRoutingCommand; @@ -110,7 +109,6 @@ import com.cloud.agent.resource.virtualnetwork.VRScripts; import com.cloud.agent.resource.virtualnetwork.VirtualRouterDeployer; import com.cloud.agent.resource.virtualnetwork.VirtualRoutingResource; -import com.cloud.agent.api.SecurityGroupRulesCmd; import com.cloud.dc.Vlan; import com.cloud.exception.InternalErrorException; import com.cloud.host.Host.Type; @@ -143,6 +141,9 @@ import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.WatchDogDef; import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.WatchDogDef.WatchDogAction; import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.WatchDogDef.WatchDogModel; +import com.cloud.hypervisor.kvm.resource.rolling.maintenance.RollingMaintenanceAgentExecutor; +import com.cloud.hypervisor.kvm.resource.rolling.maintenance.RollingMaintenanceExecutor; +import com.cloud.hypervisor.kvm.resource.rolling.maintenance.RollingMaintenanceServiceExecutor; import com.cloud.hypervisor.kvm.resource.wrapper.LibvirtRequestWrapper; import com.cloud.hypervisor.kvm.resource.wrapper.LibvirtUtilitiesHelper; import com.cloud.hypervisor.kvm.storage.IscsiStorageCleanupMonitor; @@ -236,6 +237,9 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv public static final String SSHPUBKEYPATH = SSHKEYSPATH + File.separator + "id_rsa.pub.cloud"; public static final String DEFAULTDOMRSSHPORT = "3922"; + public final static String HOST_CACHE_PATH_PARAMETER = "host.cache.location"; + public final static String CONFIG_DIR = "config"; + public static final String BASH_SCRIPT_PATH = "/bin/bash"; private String _mountPoint = "/mnt"; @@ -515,6 +519,14 @@ public String getDirectDownloadTemporaryDownloadPath() { return directDownloadTemporaryDownloadPath; } + public String getConfigPath() { + return getCachePath() + "/" + CONFIG_DIR; + } + + public String getCachePath() { + return cachePath; + } + public String getResizeVolumePath() { return _resizeVolumePath; } @@ -567,6 +579,7 @@ protected enum BridgeType { protected boolean dpdkSupport = false; protected String dpdkOvsPath; protected String directDownloadTemporaryDownloadPath; + protected String cachePath; private String getEndIpFromStartIp(final String startIp, final int numIps) { final String[] tokens = startIp.split("[.]"); @@ -618,6 +631,10 @@ private String getDefaultDirectDownloadTemporaryPath() { return "/var/lib/libvirt/images"; } + private String getDefaultCachePath() { + return "/var/cache/cloud"; + } + protected String getDefaultNetworkScriptsDir() { return "scripts/vm/network/vnet"; } @@ -707,6 +724,11 @@ public boolean configure(final String name, final Map params) th directDownloadTemporaryDownloadPath = getDefaultDirectDownloadTemporaryPath(); } + cachePath = (String) params.get(HOST_CACHE_PATH_PARAMETER); + if (org.apache.commons.lang.StringUtils.isBlank(cachePath)) { + cachePath = getDefaultCachePath(); + } + params.put("domr.scripts.dir", domrScriptsDir); _virtRouterResource = new VirtualRoutingResource(this); @@ -2458,11 +2480,21 @@ public void createVifs(final VirtualMachineTO vmSpec, final LibvirtVMDef vm) thr } public String getVolumePath(final Connect conn, final DiskTO volume) throws LibvirtException, URISyntaxException { + return getVolumePath(conn, volume, false); + } + + public String getVolumePath(final Connect conn, final DiskTO volume, boolean diskOnHostCache) throws LibvirtException, URISyntaxException { final DataTO data = volume.getData(); final DataStoreTO store = data.getDataStore(); if (volume.getType() == Volume.Type.ISO && data.getPath() != null && (store instanceof NfsTO || store instanceof PrimaryDataStoreTO && data instanceof TemplateObjectTO && !((TemplateObjectTO) data).isDirectDownload())) { + + if (data.getPath().startsWith(ConfigDrive.CONFIGDRIVEDIR) && diskOnHostCache) { + String configDrivePath = getConfigPath() + "/" + data.getPath(); + return configDrivePath; + } + final String isoPath = store.getUrl().split("\\?")[0] + File.separator + data.getPath(); final int index = isoPath.lastIndexOf("/"); final String path = isoPath.substring(0, index); @@ -2500,7 +2532,11 @@ public int compare(final DiskTO arg0, final DiskTO arg1) { if (volume.getType() == Volume.Type.ISO && data.getPath() != null) { DataStoreTO dataStore = data.getDataStore(); String dataStoreUrl = null; - if (dataStore instanceof NfsTO) { + if (data.getPath().startsWith(ConfigDrive.CONFIGDRIVEDIR) && vmSpec.isConfigDriveOnHostCache() && data instanceof TemplateObjectTO) { + String configDrivePath = getConfigPath() + "/" + data.getPath(); + physicalDisk = new KVMPhysicalDisk(configDrivePath, ((TemplateObjectTO) data).getUuid(), null); + physicalDisk.setFormat(PhysicalDiskFormat.FILE); + } else if (dataStore instanceof NfsTO) { NfsTO nfsStore = (NfsTO)data.getDataStore(); dataStoreUrl = nfsStore.getUrl(); physicalDisk = getPhysicalDiskFromNfsStore(dataStoreUrl, data); @@ -2580,6 +2616,8 @@ public int compare(final DiskTO arg0, final DiskTO arg1) { */ disk.defNetworkBasedDisk(physicalDisk.getPath().replace("rbd:", ""), pool.getSourceHost(), pool.getSourcePort(), pool.getAuthUserName(), pool.getUuid(), devId, diskBusType, DiskProtocol.RBD, DiskDef.DiskFmtType.RAW); + } else if (pool.getType() == StoragePoolType.PowerFlex) { + disk.defBlockBasedDisk(physicalDisk.getPath(), devId, diskBusTypeData); } else if (pool.getType() == StoragePoolType.Gluster) { final String mountpoint = pool.getLocalPath(); final String path = physicalDisk.getPath(); @@ -2663,7 +2701,6 @@ public int compare(final DiskTO arg0, final DiskTO arg1) { } } } - } private KVMPhysicalDisk getPhysicalDiskPrimaryStore(PrimaryDataStoreTO primaryDataStoreTO, DataTO data) { @@ -2825,6 +2862,8 @@ public synchronized String attachOrDetachDisk(final Connect conn, if (attachingPool.getType() == StoragePoolType.RBD) { diskdef.defNetworkBasedDisk(attachingDisk.getPath(), attachingPool.getSourceHost(), attachingPool.getSourcePort(), attachingPool.getAuthUserName(), attachingPool.getUuid(), devId, busT, DiskProtocol.RBD, DiskDef.DiskFmtType.RAW); + } else if (attachingPool.getType() == StoragePoolType.PowerFlex) { + diskdef.defBlockBasedDisk(attachingDisk.getPath(), devId, busT); } else if (attachingPool.getType() == StoragePoolType.Gluster) { diskdef.defNetworkBasedDisk(attachingDisk.getPath(), attachingPool.getSourceHost(), attachingPool.getSourcePort(), null, null, devId, busT, DiskProtocol.GLUSTER, DiskDef.DiskFmtType.QCOW2); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolDef.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolDef.java index 56519aed3a41..1bdf2db8c4f4 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolDef.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolDef.java @@ -18,7 +18,7 @@ public class LibvirtStoragePoolDef { public enum PoolType { - ISCSI("iscsi"), NETFS("netfs"), LOGICAL("logical"), DIR("dir"), RBD("rbd"), GLUSTERFS("glusterfs"); + ISCSI("iscsi"), NETFS("netfs"), LOGICAL("logical"), DIR("dir"), RBD("rbd"), GLUSTERFS("glusterfs"), POWERFLEX("powerflex"); String _poolType; PoolType(String poolType) { @@ -178,7 +178,7 @@ public String toString() { storagePoolBuilder.append("'/>\n"); storagePoolBuilder.append("\n"); } - if (_poolType != PoolType.RBD) { + if (_poolType != PoolType.RBD && _poolType != PoolType.POWERFLEX) { storagePoolBuilder.append("\n"); storagePoolBuilder.append("" + _targetPath + "\n"); storagePoolBuilder.append("\n"); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolXMLParser.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolXMLParser.java index 7b70c37ab191..bd7deaa0eb41 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolXMLParser.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolXMLParser.java @@ -55,7 +55,7 @@ public LibvirtStoragePoolDef parseStoragePoolXML(String poolXML) { String host = getAttrValue("host", "name", source); String format = getAttrValue("format", "type", source); - if (type.equalsIgnoreCase("rbd")) { + if (type.equalsIgnoreCase("rbd") || type.equalsIgnoreCase("powerflex")) { int port = 0; String xmlPort = getAttrValue("host", "port", source); if (StringUtils.isNotBlank(xmlPort)) { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckUrlCommand.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckUrlCommand.java index efc009037b9c..2618f20fae11 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckUrlCommand.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckUrlCommand.java @@ -18,13 +18,15 @@ // package com.cloud.hypervisor.kvm.resource.wrapper; +import org.apache.cloudstack.agent.directdownload.CheckUrlAnswer; +import org.apache.cloudstack.agent.directdownload.CheckUrlCommand; +import org.apache.log4j.Logger; + import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; import com.cloud.resource.CommandWrapper; import com.cloud.resource.ResourceWrapper; import com.cloud.utils.UriUtils; -import org.apache.cloudstack.agent.directdownload.CheckUrlAnswer; -import org.apache.cloudstack.agent.directdownload.CheckUrlCommand; -import org.apache.log4j.Logger; +import com.cloud.utils.storage.QCOW2Utils; @ResourceWrapper(handles = CheckUrlCommand.class) public class LibvirtCheckUrlCommand extends CommandWrapper { @@ -39,7 +41,12 @@ public CheckUrlAnswer execute(CheckUrlCommand cmd, LibvirtComputingResource serv Long remoteSize = null; try { UriUtils.checkUrlExistence(url); - remoteSize = UriUtils.getRemoteSize(url); + + if ("qcow2".equalsIgnoreCase(cmd.getFormat())) { + remoteSize = QCOW2Utils.getVirtualSize(url); + } else { + remoteSize = UriUtils.getRemoteSize(url); + } } catch (IllegalArgumentException e) { s_logger.warn(e.getMessage()); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVolumeStatsCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVolumeStatsCommandWrapper.java index 00bdfcd49d73..a2f50ac6555f 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVolumeStatsCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVolumeStatsCommandWrapper.java @@ -50,7 +50,12 @@ public Answer execute(final GetVolumeStatsCommand cmd, final LibvirtComputingRes StoragePoolType poolType = cmd.getPoolType(); HashMap statEntry = new HashMap(); for (String volumeUuid : cmd.getVolumeUuids()) { - statEntry.put(volumeUuid, getVolumeStat(libvirtComputingResource, conn, volumeUuid, storeUuid, poolType)); + VolumeStatsEntry volumeStatsEntry = getVolumeStat(libvirtComputingResource, conn, volumeUuid, storeUuid, poolType); + if (volumeStatsEntry == null) { + String msg = "Can't get disk stats as pool or disk details unavailable for volume: " + volumeUuid + " on the storage pool: " + storeUuid; + return new GetVolumeStatsAnswer(cmd, msg, null); + } + statEntry.put(volumeUuid, volumeStatsEntry); } return new GetVolumeStatsAnswer(cmd, "", statEntry); } catch (LibvirtException | CloudRuntimeException e) { @@ -58,10 +63,17 @@ public Answer execute(final GetVolumeStatsCommand cmd, final LibvirtComputingRes } } - private VolumeStatsEntry getVolumeStat(final LibvirtComputingResource libvirtComputingResource, final Connect conn, final String volumeUuid, final String storeUuid, final StoragePoolType poolType) throws LibvirtException { KVMStoragePool sourceKVMPool = libvirtComputingResource.getStoragePoolMgr().getStoragePool(poolType, storeUuid); + if (sourceKVMPool == null) { + return null; + } + KVMPhysicalDisk sourceKVMVolume = sourceKVMPool.getPhysicalDisk(volumeUuid); + if (sourceKVMVolume == null) { + return null; + } + return new VolumeStatsEntry(volumeUuid, sourceKVMVolume.getSize(), sourceKVMVolume.getVirtualSize()); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtHandleConfigDriveCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtHandleConfigDriveCommandWrapper.java index 6baae85e2214..6067150df0f4 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtHandleConfigDriveCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtHandleConfigDriveCommandWrapper.java @@ -24,16 +24,21 @@ import java.nio.file.Paths; import org.apache.cloudstack.storage.configdrive.ConfigDriveBuilder; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; +import com.cloud.agent.api.HandleConfigDriveIsoAnswer; import com.cloud.agent.api.HandleConfigDriveIsoCommand; +import com.cloud.agent.api.to.DataStoreTO; import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; import com.cloud.hypervisor.kvm.storage.KVMStoragePool; import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.network.element.NetworkElement; import com.cloud.resource.CommandWrapper; import com.cloud.resource.ResourceWrapper; import com.cloud.storage.Storage; +import com.cloud.utils.exception.CloudRuntimeException; @ResourceWrapper(handles = HandleConfigDriveIsoCommand.class) public final class LibvirtHandleConfigDriveCommandWrapper extends CommandWrapper { @@ -41,38 +46,103 @@ public final class LibvirtHandleConfigDriveCommandWrapper extends CommandWrapper @Override public Answer execute(final HandleConfigDriveIsoCommand command, final LibvirtComputingResource libvirtComputingResource) { - final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr(); - final KVMStoragePool pool = storagePoolMgr.getStoragePool(Storage.StoragePoolType.NetworkFilesystem, command.getDestStore().getUuid()); - if (pool == null) { - return new Answer(command, false, "Pool not found, config drive for KVM is only supported for NFS"); - } + String mountPoint = null; + + try { + if (command.isCreate()) { + LOG.debug("Creating config drive: " + command.getIsoFile()); + + NetworkElement.Location location = NetworkElement.Location.PRIMARY; + if (command.isHostCachePreferred()) { + LOG.debug("Using the KVM host for config drive"); + mountPoint = libvirtComputingResource.getConfigPath(); + location = NetworkElement.Location.HOST; + } else { + final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr(); + KVMStoragePool pool = null; + String poolUuid = null; + Storage.StoragePoolType poolType = null; + DataStoreTO dataStoreTO = command.getDestStore(); + if (dataStoreTO != null) { + if (dataStoreTO instanceof PrimaryDataStoreTO) { + PrimaryDataStoreTO primaryDataStoreTO = (PrimaryDataStoreTO) dataStoreTO; + poolType = primaryDataStoreTO.getPoolType(); + } else { + poolType = Storage.StoragePoolType.NetworkFilesystem; + } + poolUuid = command.getDestStore().getUuid(); + pool = storagePoolMgr.getStoragePool(poolType, poolUuid); + } + + if (pool == null || poolType == null) { + return new HandleConfigDriveIsoAnswer(command, "Unable to create config drive, Pool " + (poolUuid != null ? poolUuid : "") + " not found"); + } + + if (pool.supportsConfigDriveIso()) { + LOG.debug("Using the pool: " + poolUuid + " for config drive"); + mountPoint = pool.getLocalPath(); + } else if (command.getUseHostCacheOnUnsupportedPool()) { + LOG.debug("Config drive for KVM is not supported for pool type: " + poolType.toString() + ", using the KVM host"); + mountPoint = libvirtComputingResource.getConfigPath(); + location = NetworkElement.Location.HOST; + } else { + LOG.debug("Config drive for KVM is not supported for pool type: " + poolType.toString()); + return new HandleConfigDriveIsoAnswer(command, "Config drive for KVM is not supported for pool type: " + poolType.toString()); + } + } + + Path isoPath = Paths.get(mountPoint, command.getIsoFile()); + File isoFile = new File(mountPoint, command.getIsoFile()); + + if (command.getIsoData() == null) { + return new HandleConfigDriveIsoAnswer(command, "Invalid config drive ISO data received"); + } + if (isoFile.exists()) { + LOG.debug("An old config drive iso already exists"); + } - final String mountPoint = pool.getLocalPath(); - final Path isoPath = Paths.get(mountPoint, command.getIsoFile()); - final File isoFile = new File(mountPoint, command.getIsoFile()); - if (command.isCreate()) { - LOG.debug("Creating config drive: " + command.getIsoFile()); - if (command.getIsoData() == null) { - return new Answer(command, false, "Invalid config drive ISO data received"); - } - if (isoFile.exists()) { - LOG.debug("An old config drive iso already exists"); - } - try { Files.createDirectories(isoPath.getParent()); ConfigDriveBuilder.base64StringToFile(command.getIsoData(), mountPoint, command.getIsoFile()); - } catch (IOException e) { - return new Answer(command, false, "Failed due to exception: " + e.getMessage()); - } - } else { - try { - Files.deleteIfExists(isoPath); - } catch (IOException e) { - LOG.warn("Failed to delete config drive: " + isoPath.toAbsolutePath().toString()); - return new Answer(command, false, "Failed due to exception: " + e.getMessage()); + + return new HandleConfigDriveIsoAnswer(command, location); + } else { + LOG.debug("Deleting config drive: " + command.getIsoFile()); + Path configDrivePath = null; + + if (command.isHostCachePreferred()) { + // Check and delete config drive in host storage if exists + mountPoint = libvirtComputingResource.getConfigPath(); + configDrivePath = Paths.get(mountPoint, command.getIsoFile()); + Files.deleteIfExists(configDrivePath); + } else { + final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr(); + KVMStoragePool pool = null; + DataStoreTO dataStoreTO = command.getDestStore(); + if (dataStoreTO != null) { + if (dataStoreTO instanceof PrimaryDataStoreTO) { + PrimaryDataStoreTO primaryDataStoreTO = (PrimaryDataStoreTO) dataStoreTO; + Storage.StoragePoolType poolType = primaryDataStoreTO.getPoolType(); + pool = storagePoolMgr.getStoragePool(poolType, command.getDestStore().getUuid()); + } else { + pool = storagePoolMgr.getStoragePool(Storage.StoragePoolType.NetworkFilesystem, command.getDestStore().getUuid()); + } + } + + if (pool != null && pool.supportsConfigDriveIso()) { + mountPoint = pool.getLocalPath(); + configDrivePath = Paths.get(mountPoint, command.getIsoFile()); + Files.deleteIfExists(configDrivePath); + } + } + + return new HandleConfigDriveIsoAnswer(command); } + } catch (final IOException e) { + LOG.debug("Failed to handle config drive due to " + e.getMessage(), e); + return new HandleConfigDriveIsoAnswer(command, "Failed due to exception: " + e.getMessage()); + } catch (final CloudRuntimeException e) { + LOG.debug("Failed to handle config drive due to " + e.getMessage(), e); + return new HandleConfigDriveIsoAnswer(command, "Failed due to exception: " + e.toString()); } - - return new Answer(command); } } \ No newline at end of file diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java index f3f50aa61f06..38cd9958d7c9 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java @@ -19,11 +19,22 @@ package com.cloud.hypervisor.kvm.resource.wrapper; +import java.net.URISyntaxException; +import java.util.HashMap; +import java.util.Map; + +import org.apache.cloudstack.storage.configdrive.ConfigDrive; +import org.apache.commons.collections.MapUtils; +import org.apache.log4j.Logger; +import org.libvirt.Connect; +import org.libvirt.LibvirtException; + import com.cloud.agent.api.Answer; import com.cloud.agent.api.PrepareForMigrationAnswer; import com.cloud.agent.api.PrepareForMigrationCommand; -import com.cloud.agent.api.to.DpdkTO; +import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.DiskTO; +import com.cloud.agent.api.to.DpdkTO; import com.cloud.agent.api.to.NicTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.exception.InternalErrorException; @@ -36,14 +47,6 @@ import com.cloud.storage.Volume; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; -import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; -import org.libvirt.Connect; -import org.libvirt.LibvirtException; - -import java.net.URISyntaxException; -import java.util.HashMap; -import java.util.Map; @ResourceWrapper(handles = PrepareForMigrationCommand.class) public final class LibvirtPrepareForMigrationCommandWrapper extends CommandWrapper { @@ -86,7 +89,12 @@ public Answer execute(final PrepareForMigrationCommand command, final LibvirtCom final DiskTO[] volumes = vm.getDisks(); for (final DiskTO volume : volumes) { if (volume.getType() == Volume.Type.ISO) { - libvirtComputingResource.getVolumePath(conn, volume); + final DataTO data = volume.getData(); + if (data != null && data.getPath() != null && data.getPath().startsWith(ConfigDrive.CONFIGDRIVEDIR)) { + libvirtComputingResource.getVolumePath(conn, volume, vm.isConfigDriveOnHostCache()); + } else { + libvirtComputingResource.getVolumePath(conn, volume); + } } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java index 0418dbbb0000..7684789c3d2a 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java @@ -330,6 +330,12 @@ public boolean disconnectPhysicalDisk(String volumeUuid, KVMStoragePool pool) { @Override public boolean disconnectPhysicalDisk(Map volumeToDisconnect) { + String poolType = volumeToDisconnect.get(DiskTO.PROTOCOL_TYPE); + // Unsupported pool types + if (poolType != null && poolType.equalsIgnoreCase(StoragePoolType.PowerFlex.toString())) { + return false; + } + String host = volumeToDisconnect.get(DiskTO.STORAGE_HOST); String port = volumeToDisconnect.get(DiskTO.STORAGE_PORT); String path = volumeToDisconnect.get(DiskTO.IQN); @@ -447,7 +453,7 @@ public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, S } @Override - public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, KVMStoragePool destPool, boolean isIso) { + public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) { return null; } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStoragePool.java index 865dfab58ff5..8e4af764cd60 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStoragePool.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStoragePool.java @@ -19,9 +19,9 @@ import java.util.List; import java.util.Map; -import com.cloud.storage.Storage; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import com.cloud.storage.Storage; import com.cloud.storage.Storage.StoragePoolType; public class IscsiAdmStoragePool implements KVMStoragePool { @@ -165,4 +165,9 @@ public String getSourceDir() { public String getLocalPath() { return _localPath; } + + @Override + public boolean supportsConfigDriveIso() { + return false; + } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java index be7a8b05184a..46d78e5f6b3a 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java @@ -19,9 +19,9 @@ import java.util.List; import java.util.Map; -import com.cloud.storage.Storage; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import com.cloud.storage.Storage; import com.cloud.storage.Storage.StoragePoolType; public interface KVMStoragePool { @@ -70,4 +70,6 @@ public interface KVMStoragePool { PhysicalDiskFormat getDefaultFormat(); public boolean createFolder(String path); + + public boolean supportsConfigDriveIso(); } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java index 544c47f07e57..e747093ec671 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java @@ -22,15 +22,15 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.UUID; import java.util.Set; +import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; -import org.apache.log4j.Logger; - import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import org.apache.log4j.Logger; +import org.reflections.Reflections; import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.api.to.VirtualMachineTO; @@ -44,8 +44,6 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine; -import org.reflections.Reflections; - public class KVMStoragePoolManager { private static final Logger s_logger = Logger.getLogger(KVMStoragePoolManager.class); @@ -100,6 +98,7 @@ public KVMStoragePoolManager(StorageLayer storagelayer, KVMHAMonitor monitor) { // add other storage adaptors here // this._storageMapper.put("newadaptor", new NewStorageAdaptor(storagelayer)); this._storageMapper.put(StoragePoolType.ManagedNFS.toString(), new ManagedNfsStorageAdaptor(storagelayer)); + this._storageMapper.put(StoragePoolType.PowerFlex.toString(), new ScaleIOStorageAdaptor(storagelayer)); // add any adaptors that wish to register themselves via annotation Reflections reflections = new Reflections("com.cloud.hypervisor.kvm.storage"); @@ -253,7 +252,7 @@ public KVMStoragePool getStoragePool(StoragePoolType type, String uuid, boolean if (info != null) { pool = createStoragePool(info.name, info.host, info.port, info.path, info.userInfo, info.poolType, info.type); } else { - throw new CloudRuntimeException("Could not fetch storage pool " + uuid + " from libvirt"); + throw new CloudRuntimeException("Could not fetch storage pool " + uuid + " from libvirt due to " + e.getMessage()); } } return pool; @@ -286,36 +285,38 @@ public KVMStoragePool getStoragePoolByURI(String uri) { public KVMPhysicalDisk getPhysicalDisk(StoragePoolType type, String poolUuid, String volName) { int cnt = 0; - int retries = 10; + int retries = 100; KVMPhysicalDisk vol = null; //harden get volume, try cnt times to get volume, in case volume is created on other host + //Poll more frequently and return immediately once disk is found String errMsg = ""; while (cnt < retries) { try { KVMStoragePool pool = getStoragePool(type, poolUuid); vol = pool.getPhysicalDisk(volName); if (vol != null) { - break; + return vol; } } catch (Exception e) { - s_logger.debug("Failed to find volume:" + volName + " due to" + e.toString() + ", retry:" + cnt); + s_logger.debug("Failed to find volume:" + volName + " due to " + e.toString() + ", retry:" + cnt); errMsg = e.toString(); } try { - Thread.sleep(30000); + Thread.sleep(3000); } catch (InterruptedException e) { s_logger.debug("[ignored] interupted while trying to get storage pool."); } cnt++; } + KVMStoragePool pool = getStoragePool(type, poolUuid); + vol = pool.getPhysicalDisk(volName); if (vol == null) { throw new CloudRuntimeException(errMsg); } else { return vol; } - } public KVMStoragePool createStoragePool(String name, String host, int port, String path, String userInfo, StoragePoolType type) { @@ -377,6 +378,10 @@ public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String n return adaptor.createDiskFromTemplate(template, name, PhysicalDiskFormat.DIR, provisioningType, size, destPool, timeout); + } else if (destPool.getType() == StoragePoolType.PowerFlex) { + return adaptor.createDiskFromTemplate(template, name, + PhysicalDiskFormat.RAW, provisioningType, + size, destPool, timeout); } else { return adaptor.createDiskFromTemplate(template, name, PhysicalDiskFormat.QCOW2, provisioningType, @@ -405,9 +410,9 @@ public KVMPhysicalDisk createDiskWithTemplateBacking(KVMPhysicalDisk template, S return adaptor.createDiskFromTemplateBacking(template, name, format, size, destPool, timeout); } - public KVMPhysicalDisk createPhysicalDiskFromDirectDownloadTemplate(String templateFilePath, KVMStoragePool destPool, boolean isIso) { + public KVMPhysicalDisk createPhysicalDiskFromDirectDownloadTemplate(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) { StorageAdaptor adaptor = getStorageAdaptor(destPool.getType()); - return adaptor.createTemplateFromDirectDownloadFile(templateFilePath, destPool, isIso); + return adaptor.createTemplateFromDirectDownloadFile(templateFilePath, destTemplatePath, destPool, format, timeout); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index cc47c5532834..f8ce8117dea6 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -37,7 +37,6 @@ import javax.naming.ConfigurationException; -import com.cloud.utils.Pair; import org.apache.cloudstack.agent.directdownload.DirectDownloadAnswer; import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand; import org.apache.cloudstack.agent.directdownload.HttpDirectDownloadCommand; @@ -117,6 +116,7 @@ import com.cloud.storage.template.QCOW2Processor; import com.cloud.storage.template.TemplateLocation; import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; import com.cloud.utils.storage.S3.S3Utils; @@ -255,11 +255,15 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { String path = details != null ? details.get("managedStoreTarget") : null; - storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details); + if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { + s_logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + } primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, path != null ? path : destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds()); - storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path); + if (!storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path)) { + s_logger.warn("Failed to disconnect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + } } else { primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, UUID.randomUUID().toString(), primaryPool, cmd.getWaitInMillSeconds()); } @@ -273,7 +277,7 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { final TemplateObjectTO newTemplate = new TemplateObjectTO(); newTemplate.setPath(primaryVol.getName()); newTemplate.setSize(primaryVol.getSize()); - if (primaryPool.getType() == StoragePoolType.RBD) { + if (primaryPool.getType() == StoragePoolType.RBD || primaryPool.getType() == StoragePoolType.PowerFlex) { newTemplate.setFormat(ImageFormat.RAW); } else { newTemplate.setFormat(ImageFormat.QCOW2); @@ -381,6 +385,27 @@ public Answer cloneVolumeFromBaseTemplate(final CopyCommand cmd) { if (primaryPool.getType() == StoragePoolType.CLVM) { templatePath = ((NfsTO)imageStore).getUrl() + File.separator + templatePath; vol = templateToPrimaryDownload(templatePath, primaryPool, volume.getUuid(), volume.getSize(), cmd.getWaitInMillSeconds()); + } if (primaryPool.getType() == StoragePoolType.PowerFlex) { + Map details = primaryStore.getDetails(); + String path = details != null ? details.get("managedStoreTarget") : null; + + if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath, details)) { + s_logger.warn("Failed to connect base template volume at path: " + templatePath + ", in storage pool id: " + primaryStore.getUuid()); + } + + BaseVol = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath); + if (BaseVol == null) { + s_logger.debug("Failed to get the base template volume: " + templatePath); + throw new CloudRuntimeException(""); + } + + if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { + s_logger.warn("Failed to connect new volume at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + } + + vol = storagePoolMgr.copyPhysicalDisk(BaseVol, path != null ? path : volume.getUuid(), primaryPool, cmd.getWaitInMillSeconds()); + + storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path); } else { if (templatePath.contains("/mnt")) { //upgrade issue, if the path contains path, need to extract the volume uuid from path @@ -1344,6 +1369,9 @@ public Answer attachVolume(final AttachCommand cmd) { } catch (final InternalErrorException e) { s_logger.debug("Failed to attach volume: " + vol.getPath() + ", due to ", e); return new AttachAnswer(e.toString()); + } catch (final CloudRuntimeException e) { + s_logger.debug("Failed to attach volume: " + vol.getPath() + ", due to ", e); + return new AttachAnswer(e.toString()); } } @@ -1375,6 +1403,9 @@ public Answer dettachVolume(final DettachCommand cmd) { } catch (final InternalErrorException e) { s_logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e); return new DettachAnswer(e.toString()); + } catch (final CloudRuntimeException e) { + s_logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e); + return new DettachAnswer(e.toString()); } } @@ -1728,6 +1759,7 @@ public Answer handleDownloadTemplateToPrimaryStorage(DirectDownloadCommand cmd) final PrimaryDataStoreTO pool = cmd.getDestPool(); DirectTemplateDownloader downloader; KVMPhysicalDisk template; + KVMStoragePool destPool = null; try { s_logger.debug("Verifying temporary location for downloading the template exists on the host"); @@ -1746,7 +1778,7 @@ public Answer handleDownloadTemplateToPrimaryStorage(DirectDownloadCommand cmd) return new DirectDownloadAnswer(false, msg, true); } - KVMStoragePool destPool = storagePoolMgr.getStoragePool(pool.getPoolType(), pool.getUuid()); + destPool = storagePoolMgr.getStoragePool(pool.getPoolType(), pool.getUuid()); downloader = getDirectTemplateDownloaderFromCommand(cmd, destPool, temporaryDownloadPath); s_logger.debug("Trying to download template"); Pair result = downloader.downloadTemplate(); @@ -1759,7 +1791,19 @@ public Answer handleDownloadTemplateToPrimaryStorage(DirectDownloadCommand cmd) s_logger.warn("Couldn't validate template checksum"); return new DirectDownloadAnswer(false, "Checksum validation failed", false); } - template = storagePoolMgr.createPhysicalDiskFromDirectDownloadTemplate(tempFilePath, destPool, cmd.isIso()); + + final TemplateObjectTO destTemplate = cmd.getDestData(); + String destTemplatePath = (destTemplate != null) ? destTemplate.getPath() : null; + + if (!storagePoolMgr.connectPhysicalDisk(pool.getPoolType(), pool.getUuid(), destTemplatePath, null)) { + s_logger.warn("Unable to connect physical disk at path: " + destTemplatePath + ", in storage pool id: " + pool.getUuid()); + } + + template = storagePoolMgr.createPhysicalDiskFromDirectDownloadTemplate(tempFilePath, destTemplatePath, destPool, cmd.getFormat(), cmd.getWaitInMillSeconds()); + + if (!storagePoolMgr.disconnectPhysicalDisk(pool.getPoolType(), pool.getUuid(), destTemplatePath)) { + s_logger.warn("Unable to disconnect physical disk at path: " + destTemplatePath + ", in storage pool id: " + pool.getUuid()); + } } catch (CloudRuntimeException e) { s_logger.warn("Error downloading template " + cmd.getTemplateId() + " due to: " + e.getMessage()); return new DirectDownloadAnswer(false, "Unable to download template: " + e.getMessage(), true); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java index f9c627b82b45..630b98855149 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java @@ -24,6 +24,10 @@ import java.util.Map; import java.util.UUID; +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; import org.apache.commons.codec.binary.Base64; import org.apache.log4j.Logger; import org.libvirt.Connect; @@ -42,12 +46,6 @@ import com.ceph.rbd.RbdImage; import com.ceph.rbd.jna.RbdImageInfo; import com.ceph.rbd.jna.RbdSnapInfo; - -import org.apache.cloudstack.utils.qemu.QemuImg; -import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; -import org.apache.cloudstack.utils.qemu.QemuImgException; -import org.apache.cloudstack.utils.qemu.QemuImgFile; - import com.cloud.exception.InternalErrorException; import com.cloud.hypervisor.kvm.resource.LibvirtConnection; import com.cloud.hypervisor.kvm.resource.LibvirtSecretDef; @@ -160,20 +158,20 @@ private void extractDownloadedTemplate(String downloadedTemplateFile, KVMStorage } @Override - public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, KVMStoragePool destPool, boolean isIso) { + public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) { File sourceFile = new File(templateFilePath); if (!sourceFile.exists()) { throw new CloudRuntimeException("Direct download template file " + sourceFile + " does not exist on this host"); } String templateUuid = UUID.randomUUID().toString(); - if (isIso) { + if (Storage.ImageFormat.ISO.equals(format)) { templateUuid += ".iso"; } String destinationFile = destPool.getLocalPath() + File.separator + templateUuid; if (destPool.getType() == StoragePoolType.NetworkFilesystem || destPool.getType() == StoragePoolType.Filesystem || destPool.getType() == StoragePoolType.SharedMountPoint) { - if (!isIso && isTemplateExtractable(templateFilePath)) { + if (!Storage.ImageFormat.ISO.equals(format) && isTemplateExtractable(templateFilePath)) { extractDownloadedTemplate(templateFilePath, destPool, destinationFile); } else { Script.runSimpleBashScript("mv " + templateFilePath + " " + destinationFile); @@ -451,11 +449,13 @@ public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) { type = StoragePoolType.CLVM; } else if (spd.getPoolType() == LibvirtStoragePoolDef.PoolType.GLUSTERFS) { type = StoragePoolType.Gluster; + } else if (spd.getPoolType() == LibvirtStoragePoolDef.PoolType.POWERFLEX) { + type = StoragePoolType.PowerFlex; } LibvirtStoragePool pool = new LibvirtStoragePool(uuid, storage.getName(), type, this, storage); - if (pool.getType() != StoragePoolType.RBD) + if (pool.getType() != StoragePoolType.RBD && pool.getType() != StoragePoolType.PowerFlex) pool.setLocalPath(spd.getTargetPath()); else pool.setLocalPath(""); @@ -545,7 +545,6 @@ public KVMPhysicalDisk getPhysicalDisk(String volumeUuid, KVMStoragePool pool) { s_logger.debug("Failed to get physical disk:", e); throw new CloudRuntimeException(e.toString()); } - } @Override @@ -1022,7 +1021,6 @@ public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, } } - return disk; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java index 1b554f7037f7..b2e8decfcb13 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java @@ -45,6 +45,7 @@ public class LibvirtStoragePool implements KVMStoragePool { protected String authSecret; protected String sourceHost; protected int sourcePort; + protected String sourceDir; public LibvirtStoragePool(String uuid, String name, StoragePoolType type, StorageAdaptor adaptor, StoragePool pool) { @@ -56,7 +57,6 @@ public LibvirtStoragePool(String uuid, String name, StoragePoolType type, Storag this.used = 0; this.available = 0; this._pool = pool; - } public void setCapacity(long capacity) { @@ -101,7 +101,7 @@ public String getUuid() { @Override public PhysicalDiskFormat getDefaultFormat() { - if (getStoragePoolType() == StoragePoolType.CLVM || getStoragePoolType() == StoragePoolType.RBD) { + if (getStoragePoolType() == StoragePoolType.CLVM || getStoragePoolType() == StoragePoolType.RBD || getStoragePoolType() == StoragePoolType.PowerFlex) { return PhysicalDiskFormat.RAW; } else { return PhysicalDiskFormat.QCOW2; @@ -271,4 +271,12 @@ public boolean delete() { public boolean createFolder(String path) { return this._storageAdaptor.createFolder(this.uuid, path); } + + @Override + public boolean supportsConfigDriveIso() { + if (this.type == StoragePoolType.NetworkFilesystem) { + return true; + } + return false; + } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java index 1ea4f6262263..6db2f82beb48 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java @@ -35,6 +35,7 @@ import com.cloud.hypervisor.kvm.resource.LibvirtStoragePoolDef.PoolType; import com.cloud.hypervisor.kvm.resource.LibvirtStorageVolumeDef; import com.cloud.hypervisor.kvm.resource.LibvirtStorageVolumeXMLParser; +import com.cloud.storage.Storage; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.ProvisioningType; import com.cloud.storage.Storage.StoragePoolType; @@ -319,7 +320,7 @@ public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, S } @Override - public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, KVMStoragePool destPool, boolean isIso) { + public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) { return null; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java new file mode 100644 index 000000000000..419fa0cb2d43 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java @@ -0,0 +1,389 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.kvm.storage; + +import java.io.File; +import java.io.FileFilter; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.apache.commons.io.filefilter.WildcardFileFilter; +import org.apache.log4j.Logger; + +import com.cloud.storage.Storage; +import com.cloud.storage.StorageLayer; +import com.cloud.storage.StorageManager; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.OutputInterpreter; +import com.cloud.utils.script.Script; +import com.google.common.base.Strings; + +@StorageAdaptorInfo(storagePoolType= Storage.StoragePoolType.PowerFlex) +public class ScaleIOStorageAdaptor implements StorageAdaptor { + private static final Logger LOGGER = Logger.getLogger(ScaleIOStorageAdaptor.class); + private static final Map MapStorageUuidToStoragePool = new HashMap<>(); + private static final int DEFAULT_DISK_WAIT_TIME_IN_SECS = 60; + private StorageLayer storageLayer; + + public ScaleIOStorageAdaptor(StorageLayer storagelayer) { + storageLayer = storagelayer; + } + + @Override + public KVMStoragePool getStoragePool(String uuid) { + KVMStoragePool pool = MapStorageUuidToStoragePool.get(uuid); + if (pool == null) { + LOGGER.error("Pool: " + uuid + " not found, probably sdc not connected on agent start"); + throw new CloudRuntimeException("Pool: " + uuid + " not found, reconnect sdc and restart agent if sdc not connected on agent start"); + } + + return pool; + } + + @Override + public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) { + return getStoragePool(uuid); + } + + @Override + public KVMPhysicalDisk getPhysicalDisk(String volumeId, KVMStoragePool pool) { + if (Strings.isNullOrEmpty(volumeId) || pool == null) { + LOGGER.error("Unable to get physical disk, unspecified volumeid or pool"); + return null; + } + + try { + String diskFilePath = null; + String systemId = ScaleIOUtil.getSystemIdForVolume(volumeId); + if (!Strings.isNullOrEmpty(systemId) && systemId.length() == ScaleIOUtil.IDENTIFIER_LENGTH) { + // Disk path format: /dev/disk/by-id/emc-vol-- + final String diskFileName = ScaleIOUtil.DISK_NAME_PREFIX + systemId + "-" + volumeId; + diskFilePath = ScaleIOUtil.DISK_PATH + File.separator + diskFileName; + final File diskFile = new File(diskFilePath); + if (!diskFile.exists()) { + LOGGER.debug("Physical disk file: " + diskFilePath + " doesn't exists on the storage pool: " + pool.getUuid()); + return null; + } + } else { + LOGGER.debug("Try with wildcard filter to get the physical disk: " + volumeId + " on the storage pool: " + pool.getUuid()); + final File dir = new File(ScaleIOUtil.DISK_PATH); + final FileFilter fileFilter = new WildcardFileFilter(ScaleIOUtil.DISK_NAME_PREFIX_FILTER + volumeId); + final File[] files = dir.listFiles(fileFilter); + if (files != null && files.length == 1) { + diskFilePath = files[0].getAbsolutePath(); + } else { + LOGGER.debug("Unable to find the physical disk: " + volumeId + " on the storage pool: " + pool.getUuid()); + return null; + } + } + + KVMPhysicalDisk disk = new KVMPhysicalDisk(diskFilePath, volumeId, pool); + disk.setFormat(QemuImg.PhysicalDiskFormat.RAW); + + long diskSize = getPhysicalDiskSize(diskFilePath); + disk.setSize(diskSize); + disk.setVirtualSize(diskSize); + + return disk; + } catch (Exception e) { + LOGGER.error("Failed to get the physical disk: " + volumeId + " on the storage pool: " + pool.getUuid() + " due to " + e.getMessage()); + throw new CloudRuntimeException("Failed to get the physical disk: " + volumeId + " on the storage pool: " + pool.getUuid()); + } + } + + @Override + public KVMStoragePool createStoragePool(String uuid, String host, int port, String path, String userInfo, Storage.StoragePoolType type) { + ScaleIOStoragePool storagePool = new ScaleIOStoragePool(uuid, host, port, path, type, this); + MapStorageUuidToStoragePool.put(uuid, storagePool); + return storagePool; + } + + @Override + public boolean deleteStoragePool(String uuid) { + return MapStorageUuidToStoragePool.remove(uuid) != null; + } + + @Override + public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, QemuImg.PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size) { + return null; + } + + @Override + public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map details) { + if (Strings.isNullOrEmpty(volumePath) || pool == null) { + LOGGER.error("Unable to connect physical disk due to insufficient data"); + throw new CloudRuntimeException("Unable to connect physical disk due to insufficient data"); + } + + int waitTimeInSec = DEFAULT_DISK_WAIT_TIME_IN_SECS; + if (details != null && details.containsKey(StorageManager.STORAGE_POOL_DISK_WAIT.toString())) { + String waitTime = details.get(StorageManager.STORAGE_POOL_DISK_WAIT.toString()); + if (!Strings.isNullOrEmpty(waitTime)) { + waitTimeInSec = Integer.valueOf(waitTime).intValue(); + } + } + return waitForDiskToBecomeAvailable(volumePath, pool, waitTimeInSec); + } + + private boolean waitForDiskToBecomeAvailable(String volumePath, KVMStoragePool pool, int waitTimeInSec) { + LOGGER.debug("Waiting for the volume with id: " + volumePath + " of the storage pool: " + pool.getUuid() + " to become available for " + waitTimeInSec + " secs"); + int timeBetweenTries = 1000; // Try more frequently (every sec) and return early if disk is found + KVMPhysicalDisk physicalDisk = null; + + // Rescan before checking for the physical disk + ScaleIOUtil.rescanForNewVolumes(); + + while (waitTimeInSec > 0) { + physicalDisk = getPhysicalDisk(volumePath, pool); + if (physicalDisk != null && physicalDisk.getSize() > 0) { + LOGGER.debug("Found the volume with id: " + volumePath + " of the storage pool: " + pool.getUuid()); + return true; + } + + waitTimeInSec--; + + try { + Thread.sleep(timeBetweenTries); + } catch (Exception ex) { + // don't do anything + } + } + + physicalDisk = getPhysicalDisk(volumePath, pool); + if (physicalDisk != null && physicalDisk.getSize() > 0) { + LOGGER.debug("Found the volume using id: " + volumePath + " of the storage pool: " + pool.getUuid()); + return true; + } + + LOGGER.debug("Unable to find the volume with id: " + volumePath + " of the storage pool: " + pool.getUuid()); + return false; + } + + private long getPhysicalDiskSize(String diskPath) { + if (Strings.isNullOrEmpty(diskPath)) { + return 0; + } + + Script diskCmd = new Script("blockdev", LOGGER); + diskCmd.add("--getsize64", diskPath); + + OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); + String result = diskCmd.execute(parser); + + if (result != null) { + LOGGER.warn("Unable to get the disk size at path: " + diskPath); + return 0; + } else { + LOGGER.info("Able to retrieve the disk size at path:" + diskPath); + } + + return Long.parseLong(parser.getLine()); + } + + @Override + public boolean disconnectPhysicalDisk(String volumePath, KVMStoragePool pool) { + return true; + } + + @Override + public boolean disconnectPhysicalDisk(Map volumeToDisconnect) { + return true; + } + + @Override + public boolean disconnectPhysicalDiskByPath(String localPath) { + return true; + } + + @Override + public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.ImageFormat format) { + return true; + } + + @Override + public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name, QemuImg.PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, KVMStoragePool destPool, int timeout) { + return null; + } + + @Override + public KVMPhysicalDisk createTemplateFromDisk(KVMPhysicalDisk disk, String name, QemuImg.PhysicalDiskFormat format, long size, KVMStoragePool destPool) { + return null; + } + + @Override + public List listPhysicalDisks(String storagePoolUuid, KVMStoragePool pool) { + return null; + } + + @Override + public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout) { + if (Strings.isNullOrEmpty(name) || disk == null || destPool == null) { + LOGGER.error("Unable to copy physical disk due to insufficient data"); + throw new CloudRuntimeException("Unable to copy physical disk due to insufficient data"); + } + + LOGGER.debug("Copy physical disk with size: " + disk.getSize() + ", virtualsize: " + disk.getVirtualSize()+ ", format: " + disk.getFormat()); + + KVMPhysicalDisk destDisk = destPool.getPhysicalDisk(name); + if (destDisk == null) { + LOGGER.error("Failed to find the disk: " + name + " of the storage pool: " + destPool.getUuid()); + throw new CloudRuntimeException("Failed to find the disk: " + name + " of the storage pool: " + destPool.getUuid()); + } + + destDisk.setFormat(QemuImg.PhysicalDiskFormat.RAW); + destDisk.setSize(disk.getVirtualSize()); + destDisk.setVirtualSize(disk.getSize()); + + QemuImg qemu = new QemuImg(timeout); + QemuImgFile srcFile = null; + QemuImgFile destFile = null; + + try { + srcFile = new QemuImgFile(disk.getPath(), disk.getFormat()); + destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat()); + + LOGGER.debug("Starting copy from source image " + srcFile.getFileName() + " to PowerFlex volume: " + destDisk.getPath()); + qemu.convert(srcFile, destFile); + LOGGER.debug("Succesfully converted source image " + srcFile.getFileName() + " to PowerFlex volume: " + destDisk.getPath()); + } catch (QemuImgException e) { + LOGGER.error("Failed to convert from " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + e.getMessage()); + destDisk = null; + } + + return destDisk; + } + + @Override + public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool, int timeout) { + return null; + } + + @Override + public boolean refresh(KVMStoragePool pool) { + return true; + } + + @Override + public boolean deleteStoragePool(KVMStoragePool pool) { + return deleteStoragePool(pool.getUuid()); + } + + @Override + public boolean createFolder(String uuid, String path) { + return true; + } + + @Override + public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, String name, QemuImg.PhysicalDiskFormat format, long size, KVMStoragePool destPool, int timeout) { + return null; + } + + @Override + public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) { + if (Strings.isNullOrEmpty(templateFilePath) || Strings.isNullOrEmpty(destTemplatePath) || destPool == null) { + LOGGER.error("Unable to create template from direct download template file due to insufficient data"); + throw new CloudRuntimeException("Unable to create template from direct download template file due to insufficient data"); + } + + LOGGER.debug("Create template from direct download template - file path: " + templateFilePath + ", dest path: " + destTemplatePath + ", format: " + format.toString()); + + File sourceFile = new File(templateFilePath); + if (!sourceFile.exists()) { + throw new CloudRuntimeException("Direct download template file " + templateFilePath + " does not exist on this host"); + } + + if (destTemplatePath == null || destTemplatePath.isEmpty()) { + LOGGER.error("Failed to create template, target template disk path not provided"); + throw new CloudRuntimeException("Target template disk path not provided"); + } + + if (destPool.getType() != Storage.StoragePoolType.PowerFlex) { + throw new CloudRuntimeException("Unsupported storage pool type: " + destPool.getType().toString()); + } + + if (Storage.ImageFormat.RAW.equals(format) && Storage.ImageFormat.QCOW2.equals(format)) { + LOGGER.error("Failed to create template, unsupported template format: " + format.toString()); + throw new CloudRuntimeException("Unsupported template format: " + format.toString()); + } + + String srcTemplateFilePath = templateFilePath; + KVMPhysicalDisk destDisk = null; + QemuImgFile srcFile = null; + QemuImgFile destFile = null; + try { + destDisk = destPool.getPhysicalDisk(destTemplatePath); + if (destDisk == null) { + LOGGER.error("Failed to find the disk: " + destTemplatePath + " of the storage pool: " + destPool.getUuid()); + throw new CloudRuntimeException("Failed to find the disk: " + destTemplatePath + " of the storage pool: " + destPool.getUuid()); + } + + if (isTemplateExtractable(templateFilePath)) { + srcTemplateFilePath = sourceFile.getParent() + "/" + UUID.randomUUID().toString(); + LOGGER.debug("Extract the downloaded template " + templateFilePath + " to " + srcTemplateFilePath); + String extractCommand = getExtractCommandForDownloadedFile(templateFilePath, srcTemplateFilePath); + Script.runSimpleBashScript(extractCommand); + Script.runSimpleBashScript("rm -f " + templateFilePath); + } + + QemuImg.PhysicalDiskFormat srcFileFormat = QemuImg.PhysicalDiskFormat.RAW; + if (format == Storage.ImageFormat.RAW) { + srcFileFormat = QemuImg.PhysicalDiskFormat.RAW; + } else if (format == Storage.ImageFormat.QCOW2) { + srcFileFormat = QemuImg.PhysicalDiskFormat.QCOW2; + } + + srcFile = new QemuImgFile(srcTemplateFilePath, srcFileFormat); + destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat()); + + LOGGER.debug("Starting copy from source downloaded template " + srcFile.getFileName() + " to PowerFlex template volume: " + destDisk.getPath()); + QemuImg qemu = new QemuImg(timeout); + qemu.convert(srcFile, destFile); + LOGGER.debug("Succesfully converted source downloaded template " + srcFile.getFileName() + " to PowerFlex template volume: " + destDisk.getPath()); + } catch (QemuImgException e) { + LOGGER.error("Failed to convert from " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + e.getMessage()); + destDisk = null; + } finally { + Script.runSimpleBashScript("rm -f " + srcTemplateFilePath); + } + + return destDisk; + } + + private boolean isTemplateExtractable(String templatePath) { + String type = Script.runSimpleBashScript("file " + templatePath + " | awk -F' ' '{print $2}'"); + return type.equalsIgnoreCase("bzip2") || type.equalsIgnoreCase("gzip") || type.equalsIgnoreCase("zip"); + } + + private String getExtractCommandForDownloadedFile(String downloadedTemplateFile, String templateFile) { + if (downloadedTemplateFile.endsWith(".zip")) { + return "unzip -p " + downloadedTemplateFile + " | cat > " + templateFile; + } else if (downloadedTemplateFile.endsWith(".bz2")) { + return "bunzip2 -c " + downloadedTemplateFile + " > " + templateFile; + } else if (downloadedTemplateFile.endsWith(".gz")) { + return "gunzip -c " + downloadedTemplateFile + " > " + templateFile; + } else { + throw new CloudRuntimeException("Unable to extract template " + downloadedTemplateFile); + } + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePool.java new file mode 100644 index 000000000000..4ead92d6a0dd --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePool.java @@ -0,0 +1,181 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.kvm.storage; + +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.utils.qemu.QemuImg; + +import com.cloud.storage.Storage; + +public class ScaleIOStoragePool implements KVMStoragePool { + private String uuid; + private String sourceHost; + private int sourcePort; + private String sourceDir; + private Storage.StoragePoolType storagePoolType; + private StorageAdaptor storageAdaptor; + private long capacity; + private long used; + private long available; + + public ScaleIOStoragePool(String uuid, String host, int port, String path, Storage.StoragePoolType poolType, StorageAdaptor adaptor) { + this.uuid = uuid; + sourceHost = host; + sourcePort = port; + sourceDir = path; + storagePoolType = poolType; + storageAdaptor = adaptor; + capacity = 0; + used = 0; + available = 0; + } + + @Override + public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, QemuImg.PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size) { + return null; + } + + @Override + public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, Storage.ProvisioningType provisioningType, long size) { + return null; + } + + @Override + public boolean connectPhysicalDisk(String volumeUuid, Map details) { + return storageAdaptor.connectPhysicalDisk(volumeUuid, this, details); + } + + @Override + public KVMPhysicalDisk getPhysicalDisk(String volumeId) { + return storageAdaptor.getPhysicalDisk(volumeId, this); + } + + @Override + public boolean disconnectPhysicalDisk(String volumeUuid) { + return storageAdaptor.disconnectPhysicalDisk(volumeUuid, this); + } + + @Override + public boolean deletePhysicalDisk(String volumeUuid, Storage.ImageFormat format) { + return true; + } + + @Override + public List listPhysicalDisks() { + return null; + } + + @Override + public String getUuid() { + return uuid; + } + + public void setCapacity(long capacity) { + this.capacity = capacity; + } + + @Override + public long getCapacity() { + return this.capacity; + } + + public void setUsed(long used) { + this.used = used; + } + + @Override + public long getUsed() { + return this.used; + } + + public void setAvailable(long available) { + this.available = available; + } + + @Override + public long getAvailable() { + return this.available; + } + + @Override + public boolean refresh() { + return false; + } + + @Override + public boolean isExternalSnapshot() { + return true; + } + + @Override + public String getLocalPath() { + return null; + } + + @Override + public String getSourceHost() { + return this.sourceHost; + } + + @Override + public String getSourceDir() { + return this.sourceDir; + } + + @Override + public int getSourcePort() { + return this.sourcePort; + } + + @Override + public String getAuthUserName() { + return null; + } + + @Override + public String getAuthSecret() { + return null; + } + + @Override + public Storage.StoragePoolType getType() { + return storagePoolType; + } + + @Override + public boolean delete() { + return false; + } + + @Override + public QemuImg.PhysicalDiskFormat getDefaultFormat() { + return QemuImg.PhysicalDiskFormat.RAW; + } + + @Override + public boolean createFolder(String path) { + return false; + } + + @Override + public boolean supportsConfigDriveIso() { + return false; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java index 99f2876915c0..570c2070c75f 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java @@ -86,7 +86,8 @@ KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, * Create physical disk on Primary Storage from direct download template on the host (in temporary location) * @param templateFilePath * @param destPool - * @param isIso + * @param format + * @param timeout */ - KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, KVMStoragePool destPool, boolean isIso); + KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout); } diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePoolTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePoolTest.java new file mode 100644 index 000000000000..cb9ffaee531f --- /dev/null +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePoolTest.java @@ -0,0 +1,152 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.kvm.storage; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +import java.io.File; +import java.io.FileFilter; + +import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.StorageLayer; + +@PrepareForTest(ScaleIOUtil.class) +@RunWith(PowerMockRunner.class) +public class ScaleIOStoragePoolTest { + + ScaleIOStoragePool pool; + + StorageAdaptor adapter; + + @Mock + StorageLayer storageLayer; + + @Before + public void setUp() throws Exception { + final String uuid = "345fc603-2d7e-47d2-b719-a0110b3732e6"; + final StoragePoolType type = StoragePoolType.PowerFlex; + + adapter = spy(new ScaleIOStorageAdaptor(storageLayer)); + pool = new ScaleIOStoragePool(uuid, "192.168.1.19", 443, "a519be2f00000000", type, adapter); + } + + @After + public void tearDown() throws Exception { + } + + @Test + public void testAttributes() { + assertEquals(pool.getCapacity(), 0); + assertEquals(pool.getUsed(), 0); + assertEquals(pool.getAvailable(), 0); + assertEquals(pool.getUuid(), "345fc603-2d7e-47d2-b719-a0110b3732e6"); + assertEquals(pool.getSourceHost(), "192.168.1.19"); + assertEquals(pool.getSourcePort(), 443); + assertEquals(pool.getSourceDir(), "a519be2f00000000"); + assertEquals(pool.getType(), StoragePoolType.PowerFlex); + + pool.setCapacity(131072); + pool.setUsed(24576); + pool.setAvailable(106496); + + assertEquals(pool.getCapacity(), 131072); + assertEquals(pool.getUsed(), 24576); + assertEquals(pool.getAvailable(), 106496); + } + + @Test + public void testDefaults() { + assertEquals(pool.getDefaultFormat(), PhysicalDiskFormat.RAW); + assertEquals(pool.getType(), StoragePoolType.PowerFlex); + + assertNull(pool.getAuthUserName()); + assertNull(pool.getAuthSecret()); + + Assert.assertFalse(pool.supportsConfigDriveIso()); + assertTrue(pool.isExternalSnapshot()); + } + + public void testGetPhysicalDiskWithWildcardFileFilter() throws Exception { + final String volumePath = "6c3362b500000001"; + final String systemId = "218ce1797566a00f"; + + File dir = PowerMockito.mock(File.class); + PowerMockito.whenNew(File.class).withAnyArguments().thenReturn(dir); + + // TODO: Mock file in dir + File[] files = new File[1]; + String diskFilePath = ScaleIOUtil.DISK_PATH + File.separator + ScaleIOUtil.DISK_NAME_PREFIX + systemId + "-" + volumePath; + files[0] = new File(diskFilePath); + PowerMockito.when(dir.listFiles(any(FileFilter.class))).thenReturn(files); + + KVMPhysicalDisk disk = adapter.getPhysicalDisk(volumePath, pool); + assertNull(disk); + } + + @Test + public void testGetPhysicalDiskWithSystemId() throws Exception { + final String volumePath = "6c3362b500000001"; + final String systemId = "218ce1797566a00f"; + PowerMockito.mockStatic(ScaleIOUtil.class); + when(ScaleIOUtil.getSystemIdForVolume(volumePath)).thenReturn(systemId); + + // TODO: Mock file exists + File file = PowerMockito.mock(File.class); + PowerMockito.whenNew(File.class).withAnyArguments().thenReturn(file); + PowerMockito.when(file.exists()).thenReturn(true); + + KVMPhysicalDisk disk = adapter.getPhysicalDisk(volumePath, pool); + assertNull(disk); + } + + @Test + public void testConnectPhysicalDisk() { + final String volumePath = "6c3362b500000001"; + final String systemId = "218ce1797566a00f"; + final String diskFilePath = ScaleIOUtil.DISK_PATH + File.separator + ScaleIOUtil.DISK_NAME_PREFIX + systemId + "-" + volumePath; + KVMPhysicalDisk disk = new KVMPhysicalDisk(diskFilePath, volumePath, pool); + disk.setFormat(QemuImg.PhysicalDiskFormat.RAW); + disk.setSize(8192); + disk.setVirtualSize(8192); + + assertEquals(disk.getPath(), "/dev/disk/by-id/emc-vol-218ce1797566a00f-6c3362b500000001"); + + when(adapter.getPhysicalDisk(volumePath, pool)).thenReturn(disk); + + final boolean result = adapter.connectPhysicalDisk(volumePath, pool, null); + assertTrue(result); + } +} \ No newline at end of file diff --git a/plugins/pom.xml b/plugins/pom.xml index 4dcc3f91b009..29cfbc18503b 100755 --- a/plugins/pom.xml +++ b/plugins/pom.xml @@ -121,6 +121,7 @@ storage/volume/nexenta storage/volume/sample storage/volume/solidfire + storage/volume/scaleio storage-allocators/random diff --git a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/driver/ElastistorPrimaryDataStoreDriver.java b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/driver/ElastistorPrimaryDataStoreDriver.java index 89e8c4fc1e41..f9e614692338 100644 --- a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/driver/ElastistorPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/driver/ElastistorPrimaryDataStoreDriver.java @@ -48,6 +48,7 @@ import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; +import com.cloud.host.Host; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.ResizeVolumePayload; import com.cloud.storage.Storage.StoragePoolType; @@ -59,6 +60,7 @@ import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeDetailsDao; import com.cloud.user.AccountManager; +import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; /** @@ -259,7 +261,11 @@ public void deleteAsync(DataStore dataStore, DataObject dataObject, AsyncComplet @Override public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCallback callback) { throw new UnsupportedOperationException(); + } + @Override + public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback) { + throw new UnsupportedOperationException(); } @Override @@ -409,4 +415,28 @@ public Map getCapabilities() { return mapCapabilities; } + @Override + public boolean canProvideStorageStats() { + return false; + } + + @Override + public Pair getStorageStats(StoragePool storagePool) { + return null; + } + + @Override + public boolean canProvideVolumeStats() { + return false; + } + + @Override + public Pair getVolumeStats(StoragePool storagePool, String volumeId) { + return null; + } + + @Override + public boolean canHostAccessStoragePool(Host host, StoragePool pool) { + return true; + } } diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java index fa1f3d4a6467..49559d209d6a 100644 --- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java @@ -17,6 +17,37 @@ package org.apache.cloudstack.storage.datastore.driver; +import java.io.UnsupportedEncodingException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.command.CommandResult; +import org.apache.cloudstack.storage.command.CreateObjectAnswer; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.util.DateraObject; +import org.apache.cloudstack.storage.datastore.util.DateraUtil; +import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.log4j.Logger; + import com.cloud.agent.api.Answer; import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataStoreTO; @@ -44,40 +75,12 @@ import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeDetailsDao; +import com.cloud.utils.Pair; import com.cloud.utils.StringUtils; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.exception.CloudRuntimeException; import com.google.common.base.Preconditions; import com.google.common.primitives.Ints; -import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.apache.cloudstack.storage.command.CommandResult; -import org.apache.cloudstack.storage.command.CreateObjectAnswer; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; -import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.storage.datastore.util.DateraObject; -import org.apache.cloudstack.storage.datastore.util.DateraUtil; -import org.apache.cloudstack.storage.to.SnapshotObjectTO; -import org.apache.log4j.Logger; - -import javax.inject.Inject; -import java.io.UnsupportedEncodingException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; import static com.cloud.utils.NumbersUtil.toHumanReadableSize; @@ -1254,6 +1257,12 @@ public void copyAsync(DataObject srcData, DataObject destData, throw new UnsupportedOperationException(); } + @Override + public void copyAsync(DataObject srcData, DataObject destData, + Host destHost, AsyncCompletionCallback callback) { + throw new UnsupportedOperationException(); + } + @Override public boolean canCopy(DataObject srcData, DataObject destData) { return false; @@ -1825,6 +1834,30 @@ private long getVolumeSizeIncludingHypervisorSnapshotReserve(long volumeSize, In @Override public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, QualityOfServiceState qualityOfServiceState) { + } + + @Override + public boolean canProvideStorageStats() { + return false; + } + + @Override + public Pair getStorageStats(StoragePool storagePool) { + return null; + } + @Override + public boolean canProvideVolumeStats() { + return false; + } + + @Override + public Pair getVolumeStats(StoragePool storagePool, String volumeId) { + return null; + } + + @Override + public boolean canHostAccessStoragePool(Host host, StoragePool pool) { + return true; } } diff --git a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java index 6ce874107b32..3cbcc8541ad7 100644 --- a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java +++ b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java @@ -76,6 +76,7 @@ import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.snapshot.SnapshotManager; import com.cloud.template.TemplateManager; +import com.cloud.utils.Pair; import com.cloud.vm.dao.VMInstanceDao; import static com.cloud.utils.NumbersUtil.toHumanReadableSize; @@ -277,6 +278,11 @@ public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCa } } + @Override + public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback) { + copyAsync(srcData, destData, callback); + } + @Override public boolean canCopy(DataObject srcData, DataObject destData) { //BUG fix for CLOUDSTACK-4618 @@ -389,4 +395,29 @@ public void resize(DataObject data, AsyncCompletionCallback cal @Override public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, QualityOfServiceState qualityOfServiceState) {} + + @Override + public boolean canProvideStorageStats() { + return false; + } + + @Override + public Pair getStorageStats(StoragePool storagePool) { + return null; + } + + @Override + public boolean canProvideVolumeStats() { + return false; + } + + @Override + public Pair getVolumeStats(StoragePool storagePool, String volumeId) { + return null; + } + + @Override + public boolean canHostAccessStoragePool(Host host, StoragePool pool) { + return true; + } } diff --git a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/driver/NexentaPrimaryDataStoreDriver.java b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/driver/NexentaPrimaryDataStoreDriver.java index d59fce4b68c7..92f8938060be 100644 --- a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/driver/NexentaPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/driver/NexentaPrimaryDataStoreDriver.java @@ -53,6 +53,7 @@ import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.VolumeDao; import com.cloud.user.dao.AccountDao; +import com.cloud.utils.Pair; public class NexentaPrimaryDataStoreDriver implements PrimaryDataStoreDriver { private static final Logger logger = Logger.getLogger(NexentaPrimaryDataStoreDriver.class); @@ -199,6 +200,10 @@ public void deleteAsync(DataStore store, DataObject data, AsyncCompletionCallbac @Override public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCallback callback) {} + @Override + public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback) { + } + @Override public boolean canCopy(DataObject srcData, DataObject destData) { return false; @@ -209,4 +214,29 @@ public void resize(DataObject data, AsyncCompletionCallback cal @Override public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, QualityOfServiceState qualityOfServiceState) {} + + @Override + public boolean canProvideStorageStats() { + return false; + } + + @Override + public boolean canProvideVolumeStats() { + return false; + } + + @Override + public Pair getVolumeStats(StoragePool storagePool, String volumeId) { + return null; + } + + @Override + public Pair getStorageStats(StoragePool storagePool) { + return null; + } + + @Override + public boolean canHostAccessStoragePool(Host host, StoragePool pool) { + return true; + } } diff --git a/plugins/storage/volume/sample/src/main/java/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java b/plugins/storage/volume/sample/src/main/java/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java index fc0186f15381..a41627723437 100644 --- a/plugins/storage/volume/sample/src/main/java/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java +++ b/plugins/storage/volume/sample/src/main/java/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java @@ -46,6 +46,7 @@ import com.cloud.host.Host; import com.cloud.storage.StoragePool; import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; public class SamplePrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver { @@ -224,6 +225,10 @@ public boolean canCopy(DataObject srcData, DataObject destData) { public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCallback callback) { } + @Override + public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback) { + } + @Override public void resize(DataObject data, AsyncCompletionCallback callback) { } @@ -236,4 +241,28 @@ public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, Qual public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback) { } + @Override + public boolean canProvideStorageStats() { + return false; + } + + @Override + public boolean canProvideVolumeStats() { + return false; + } + + @Override + public Pair getVolumeStats(StoragePool storagePool, String volumeId) { + return null; + } + + @Override + public Pair getStorageStats(StoragePool storagePool) { + return null; + } + + @Override + public boolean canHostAccessStoragePool(Host host, StoragePool pool) { + return true; + } } diff --git a/plugins/storage/volume/scaleio/pom.xml b/plugins/storage/volume/scaleio/pom.xml new file mode 100644 index 000000000000..859b2868235a --- /dev/null +++ b/plugins/storage/volume/scaleio/pom.xml @@ -0,0 +1,55 @@ + + + 4.0.0 + cloud-plugin-storage-volume-scaleio + Apache CloudStack Plugin - Storage Volume Dell-EMC ScaleIO/PowerFlex Provider + + org.apache.cloudstack + cloudstack-plugins + 4.15.0.0-SNAPSHOT + ../../../pom.xml + + + + org.apache.cloudstack + cloud-engine-storage-volume + ${project.version} + + + + + + maven-surefire-plugin + + true + + + + integration-test + + test + + + + + + + diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/ProtectionDomain.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/ProtectionDomain.java new file mode 100644 index 000000000000..5d260e0fd0c6 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/ProtectionDomain.java @@ -0,0 +1,57 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +public class ProtectionDomain { + String id; + String name; + String protectionDomainState; + String systemId; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getProtectionDomainState() { + return protectionDomainState; + } + + public void setProtectionDomainState(String protectionDomainState) { + this.protectionDomainState = protectionDomainState; + } + + public String getSystemId() { + return systemId; + } + + public void setSystemId(String systemId) { + this.systemId = systemId; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/Sdc.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/Sdc.java new file mode 100644 index 000000000000..71e4077d6d0b --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/Sdc.java @@ -0,0 +1,138 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +public class Sdc { + String id; + String name; + String mdmConnectionState; + Boolean sdcApproved; + String perfProfile; + String sdcGuid; + String sdcIp; + String[] sdcIps; + String systemId; + String osType; + String kernelVersion; + String softwareVersionInfo; + String versionInfo; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getMdmConnectionState() { + return mdmConnectionState; + } + + public void setMdmConnectionState(String mdmConnectionState) { + this.mdmConnectionState = mdmConnectionState; + } + + public Boolean getSdcApproved() { + return sdcApproved; + } + + public void setSdcApproved(Boolean sdcApproved) { + this.sdcApproved = sdcApproved; + } + + public String getPerfProfile() { + return perfProfile; + } + + public void setPerfProfile(String perfProfile) { + this.perfProfile = perfProfile; + } + + public String getSdcGuid() { + return sdcGuid; + } + + public void setSdcGuid(String sdcGuid) { + this.sdcGuid = sdcGuid; + } + + public String getSdcIp() { + return sdcIp; + } + + public void setSdcIp(String sdcIp) { + this.sdcIp = sdcIp; + } + + public String[] getSdcIps() { + return sdcIps; + } + + public void setSdcIps(String[] sdcIps) { + this.sdcIps = sdcIps; + } + + public String getSystemId() { + return systemId; + } + + public void setSystemId(String systemId) { + this.systemId = systemId; + } + + public String getOsType() { + return osType; + } + + public void setOsType(String osType) { + this.osType = osType; + } + + public String getKernelVersion() { + return kernelVersion; + } + + public void setKernelVersion(String kernelVersion) { + this.kernelVersion = kernelVersion; + } + + public String getSoftwareVersionInfo() { + return softwareVersionInfo; + } + + public void setSoftwareVersionInfo(String softwareVersionInfo) { + this.softwareVersionInfo = softwareVersionInfo; + } + + public String getVersionInfo() { + return versionInfo; + } + + public void setVersionInfo(String versionInfo) { + this.versionInfo = versionInfo; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SdcMappingInfo.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SdcMappingInfo.java new file mode 100644 index 000000000000..1b3436a553ae --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SdcMappingInfo.java @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +public class SdcMappingInfo { + String sdcId; + String sdcIp; + + public String getSdcId() { + return sdcId; + } + + public void setSdcId(String sdcId) { + this.sdcId = sdcId; + } + + public String getSdcIp() { + return sdcIp; + } + + public void setSdcIp(String sdcIp) { + this.sdcIp = sdcIp; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SnapshotDef.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SnapshotDef.java new file mode 100644 index 000000000000..fa973600bc87 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SnapshotDef.java @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +public class SnapshotDef { + String volumeId; + String snapshotName; + String allowOnExtManagedVol; + + public String getVolumeId() { + return volumeId; + } + + public void setVolumeId(String volumeId) { + this.volumeId = volumeId; + } + + public String getSnapshotName() { + return snapshotName; + } + + public void setSnapshotName(String snapshotName) { + this.snapshotName = snapshotName; + } + + public String getAllowOnExtManagedVol() { + return allowOnExtManagedVol; + } + + public void setAllowOnExtManagedVol(String allowOnExtManagedVol) { + this.allowOnExtManagedVol = allowOnExtManagedVol; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SnapshotDefs.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SnapshotDefs.java new file mode 100644 index 000000000000..a86ae30ba925 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SnapshotDefs.java @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +public class SnapshotDefs { + SnapshotDef[] snapshotDefs; + + public SnapshotDef[] getSnapshotDefs() { + return snapshotDefs; + } + + public void setSnapshotDefs(SnapshotDef[] snapshotDefs) { + this.snapshotDefs = snapshotDefs; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SnapshotGroup.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SnapshotGroup.java new file mode 100644 index 000000000000..bef2cee8fd4a --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SnapshotGroup.java @@ -0,0 +1,46 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +import java.util.Arrays; +import java.util.List; + +public class SnapshotGroup { + String snapshotGroupId; + String[] volumeIdList; + + public String getSnapshotGroupId() { + return snapshotGroupId; + } + + public void setSnapshotGroupId(String snapshotGroupId) { + this.snapshotGroupId = snapshotGroupId; + } + + public List getVolumeIds() { + return Arrays.asList(volumeIdList); + } + + public String[] getVolumeIdList() { + return volumeIdList; + } + + public void setVolumeIdList(String[] volumeIdList) { + this.volumeIdList = volumeIdList; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/StoragePool.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/StoragePool.java new file mode 100644 index 000000000000..df903bb67f7c --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/StoragePool.java @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +public class StoragePool { + String id; + String name; + String mediaType; + String protectionDomainId; + String systemId; + StoragePoolStatistics statistics; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getMediaType() { + return mediaType; + } + + public void setMediaType(String mediaType) { + this.mediaType = mediaType; + } + + public String getProtectionDomainId() { + return protectionDomainId; + } + + public void setProtectionDomainId(String protectionDomainId) { + this.protectionDomainId = protectionDomainId; + } + + public String getSystemId() { + return systemId; + } + + public void setSystemId(String systemId) { + this.systemId = systemId; + } + + public StoragePoolStatistics getStatistics() { + return statistics; + } + + public void setStatistics(StoragePoolStatistics statistics) { + this.statistics = statistics; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/StoragePoolStatistics.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/StoragePoolStatistics.java new file mode 100644 index 000000000000..599aa5c3ae9c --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/StoragePoolStatistics.java @@ -0,0 +1,85 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +import com.google.common.base.Strings; + +public class StoragePoolStatistics { + String maxCapacityInKb; // total capacity + String spareCapacityInKb; // spare capacity, space not used for volumes creation/allocation + String netCapacityInUseInKb; // user data capacity in use + String netUnusedCapacityInKb; // capacity available for volume creation (volume space to write) + + public Long getMaxCapacityInKb() { + if (Strings.isNullOrEmpty(maxCapacityInKb)) { + return Long.valueOf(0); + } + return Long.valueOf(maxCapacityInKb); + } + + public void setMaxCapacityInKb(String maxCapacityInKb) { + this.maxCapacityInKb = maxCapacityInKb; + } + + public Long getSpareCapacityInKb() { + if (Strings.isNullOrEmpty(spareCapacityInKb)) { + return Long.valueOf(0); + } + return Long.valueOf(spareCapacityInKb); + } + + public void setSpareCapacityInKb(String spareCapacityInKb) { + this.spareCapacityInKb = spareCapacityInKb; + } + + public Long getNetCapacityInUseInKb() { + if (Strings.isNullOrEmpty(netCapacityInUseInKb)) { + return Long.valueOf(0); + } + return Long.valueOf(netCapacityInUseInKb); + } + + public void setNetCapacityInUseInKb(String netCapacityInUseInKb) { + this.netCapacityInUseInKb = netCapacityInUseInKb; + } + + public Long getNetUnusedCapacityInKb() { + if (Strings.isNullOrEmpty(netUnusedCapacityInKb)) { + return Long.valueOf(0); + } + return Long.valueOf(netUnusedCapacityInKb); + } + + public Long getNetUnusedCapacityInBytes() { + return (getNetUnusedCapacityInKb() * 1024); + } + + public void setNetUnusedCapacityInKb(String netUnusedCapacityInKb) { + this.netUnusedCapacityInKb = netUnusedCapacityInKb; + } + + public Long getNetMaxCapacityInBytes() { + // total usable capacity = ("maxCapacityInKb" - "spareCapacityInKb") / 2 + Long netMaxCapacityInKb = getMaxCapacityInKb() - getSpareCapacityInKb(); + return ((netMaxCapacityInKb / 2) * 1024); + } + + public Long getNetUsedCapacityInBytes() { + return (getNetMaxCapacityInBytes() - getNetUnusedCapacityInBytes()); + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/Volume.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/Volume.java new file mode 100644 index 000000000000..4517a1286916 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/Volume.java @@ -0,0 +1,152 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +import java.util.Arrays; +import java.util.List; + +public class Volume { + public enum VolumeType { + ThickProvisioned, + ThinProvisioned, + Snapshot + } + String id; + String name; + String ancestorVolumeId; + String consistencyGroupId; + Long creationTime; + Long sizeInKb; + String sizeInGB; + String storagePoolId; + VolumeType volumeType; + String volumeSizeInGb; + String vtreeId; + SdcMappingInfo[] mappedSdcInfo; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getAncestorVolumeId() { + return ancestorVolumeId; + } + + public void setAncestorVolumeId(String ancestorVolumeId) { + this.ancestorVolumeId = ancestorVolumeId; + } + + public String getConsistencyGroupId() { + return consistencyGroupId; + } + + public void setConsistencyGroupId(String consistencyGroupId) { + this.consistencyGroupId = consistencyGroupId; + } + + public Long getCreationTime() { + return creationTime; + } + + public void setCreationTime(Long creationTime) { + this.creationTime = creationTime; + } + + public Long getSizeInKb() { + return sizeInKb; + } + + public void setSizeInKb(Long sizeInKb) { + this.sizeInKb = sizeInKb; + } + + public String getSizeInGB() { + return sizeInGB; + } + + public void setSizeInGB(Integer sizeInGB) { + this.sizeInGB = sizeInGB.toString(); + } + + public void setVolumeSizeInGb(String volumeSizeInGb) { + this.volumeSizeInGb = volumeSizeInGb; + } + + public String getStoragePoolId() { + return storagePoolId; + } + + public void setStoragePoolId(String storagePoolId) { + this.storagePoolId = storagePoolId; + } + + public String getVolumeSizeInGb() { + return volumeSizeInGb; + } + + public void setVolumeSizeInGb(Integer volumeSizeInGb) { + this.volumeSizeInGb = volumeSizeInGb.toString(); + } + + public VolumeType getVolumeType() { + return volumeType; + } + + public void setVolumeType(String volumeType) { + this.volumeType = Enum.valueOf(VolumeType.class, volumeType); + } + + public void setVolumeType(VolumeType volumeType) { + this.volumeType = volumeType; + } + + public String getVtreeId() { + return vtreeId; + } + + public void setVtreeId(String vtreeId) { + this.vtreeId = vtreeId; + } + + public List getMappedSdcList() { + if (mappedSdcInfo != null) { + return Arrays.asList(mappedSdcInfo); + } + return null; + } + + public SdcMappingInfo[] getMappedSdcInfo() { + return mappedSdcInfo; + } + + public void setMappedSdcInfo(SdcMappingInfo[] mappedSdcInfo) { + this.mappedSdcInfo = mappedSdcInfo; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/VolumeStatistics.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/VolumeStatistics.java new file mode 100644 index 000000000000..6f48e1721ad5 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/VolumeStatistics.java @@ -0,0 +1,53 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +public class VolumeStatistics { + Long allocatedSizeInKb; // virtual size + Long netProvisionedAddressesInKb; // physical size + + public Long getAllocatedSizeInKb() { + if (allocatedSizeInKb == null) { + return Long.valueOf(0); + } + return allocatedSizeInKb; + } + + public Long getAllocatedSizeInBytes() { + return (getAllocatedSizeInKb() * 1024); + } + + public void setAllocatedSizeInKb(Long allocatedSizeInKb) { + this.allocatedSizeInKb = allocatedSizeInKb; + } + + public Long getNetProvisionedAddressesInKb() { + if (netProvisionedAddressesInKb == null) { + return Long.valueOf(0); + } + return netProvisionedAddressesInKb; + } + + public Long getNetProvisionedAddressesInBytes() { + return (getNetProvisionedAddressesInKb() * 1024); + } + + public void setNetProvisionedAddressesInKb(Long netProvisionedAddressesInKb) { + this.netProvisionedAddressesInKb = netProvisionedAddressesInKb; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClient.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClient.java new file mode 100644 index 000000000000..a1e69bae8c26 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClient.java @@ -0,0 +1,88 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.client; + +import java.net.URISyntaxException; +import java.security.KeyManagementException; +import java.security.NoSuchAlgorithmException; +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.storage.datastore.api.Sdc; +import org.apache.cloudstack.storage.datastore.api.SnapshotGroup; +import org.apache.cloudstack.storage.datastore.api.StoragePool; +import org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics; +import org.apache.cloudstack.storage.datastore.api.Volume; +import org.apache.cloudstack.storage.datastore.api.VolumeStatistics; + +import com.cloud.storage.Storage; + +public interface ScaleIOGatewayClient { + String GATEWAY_API_ENDPOINT = "powerflex.gw.url"; + String GATEWAY_API_USERNAME = "powerflex.gw.username"; + String GATEWAY_API_PASSWORD = "powerflex.gw.password"; + String STORAGE_POOL_NAME = "powerflex.storagepool.name"; + String STORAGE_POOL_SYSTEM_ID = "powerflex.storagepool.system.id"; + + static ScaleIOGatewayClient getClient(final String url, final String username, final String password, + final boolean validateCertificate, final int timeout) throws NoSuchAlgorithmException, KeyManagementException, URISyntaxException { + return new ScaleIOGatewayClientImpl(url, username, password, validateCertificate, timeout); + } + + // Volume APIs + Volume createVolume(final String name, final String storagePoolId, + final Integer sizeInGb, final Storage.ProvisioningType volumeType); + List listVolumes(); + List listSnapshotVolumes(); + Volume getVolume(String volumeId); + Volume getVolumeByName(String name); + boolean renameVolume(final String volumeId, final String newName); + Volume resizeVolume(final String volumeId, final Integer sizeInGb); + Volume cloneVolume(final String sourceVolumeId, final String destVolumeName); + boolean deleteVolume(final String volumeId); + boolean migrateVolume(final String srcVolumeId, final String destPoolId); + + boolean mapVolumeToSdc(final String volumeId, final String sdcId); + boolean mapVolumeToSdcWithLimits(final String volumeId, final String sdcId, final Long iopsLimit, final Long bandwidthLimitInKbps); + boolean unmapVolumeFromSdc(final String volumeId, final String sdcId); + boolean unmapVolumeFromAllSdcs(final String volumeId); + boolean isVolumeMappedToSdc(final String volumeId, final String sdcId); + + // Snapshot APIs + SnapshotGroup takeSnapshot(final Map srcVolumeDestSnapshotMap); + boolean revertSnapshot(final String systemId, final Map srcSnapshotDestVolumeMap); + int deleteSnapshotGroup(final String systemId, final String snapshotGroupId); + Volume takeSnapshot(final String volumeId, final String snapshotVolumeName); + boolean revertSnapshot(final String sourceSnapshotVolumeId, final String destVolumeId); + + // Storage Pool APIs + List listStoragePools(); + StoragePool getStoragePool(String poolId); + StoragePoolStatistics getStoragePoolStatistics(String poolId); + VolumeStatistics getVolumeStatistics(String volumeId); + String getSystemId(String protectionDomainId); + List listVolumesInStoragePool(String poolId); + + // SDC APIs + List listSdcs(); + Sdc getSdc(String sdcId); + Sdc getSdcByIp(String ipAddress); + Sdc getConnectedSdcByIp(String ipAddress); + List listConnectedSdcIps(); + boolean isSdcConnected(String ipAddress); +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java new file mode 100644 index 000000000000..6baf46532290 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java @@ -0,0 +1,1021 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.client; + +import java.io.IOException; +import java.net.SocketTimeoutException; +import java.net.URI; +import java.net.URISyntaxException; +import java.security.KeyManagementException; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Base64; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.X509TrustManager; + +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.storage.datastore.api.ProtectionDomain; +import org.apache.cloudstack.storage.datastore.api.Sdc; +import org.apache.cloudstack.storage.datastore.api.SdcMappingInfo; +import org.apache.cloudstack.storage.datastore.api.SnapshotDef; +import org.apache.cloudstack.storage.datastore.api.SnapshotDefs; +import org.apache.cloudstack.storage.datastore.api.SnapshotGroup; +import org.apache.cloudstack.storage.datastore.api.StoragePool; +import org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics; +import org.apache.cloudstack.storage.datastore.api.Volume; +import org.apache.cloudstack.storage.datastore.api.VolumeStatistics; +import org.apache.cloudstack.utils.security.SSLUtils; +import org.apache.http.HttpHeaders; +import org.apache.http.HttpResponse; +import org.apache.http.HttpStatus; +import org.apache.http.client.HttpClient; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.conn.ConnectTimeoutException; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.SSLConnectionSocketFactory; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.util.EntityUtils; +import org.apache.log4j.Logger; + +import com.cloud.storage.Storage; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.nio.TrustAllManager; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.json.JsonMapper; +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; + +public class ScaleIOGatewayClientImpl implements ScaleIOGatewayClient { + private static final Logger LOG = Logger.getLogger(ScaleIOGatewayClientImpl.class); + + private final URI apiURI; + private final HttpClient httpClient; + private static final String SESSION_HEADER = "X-RestSvcSessionId"; + private static final String MDM_CONNECTED_STATE = "Connected"; + + private String host; + private String username; + private String password; + private String sessionKey = null; + + public ScaleIOGatewayClientImpl(final String url, final String username, final String password, + final boolean validateCertificate, final int timeout) + throws NoSuchAlgorithmException, KeyManagementException, URISyntaxException { + Preconditions.checkArgument(!Strings.isNullOrEmpty(url), "Gateway client url cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(username) && !Strings.isNullOrEmpty(password), "Gateway client credentials cannot be null"); + + final RequestConfig config = RequestConfig.custom() + .setConnectTimeout(timeout * 1000) + .setConnectionRequestTimeout(timeout * 1000) + .setSocketTimeout(timeout * 1000) + .build(); + + if (!validateCertificate) { + final SSLContext sslcontext = SSLUtils.getSSLContext(); + sslcontext.init(null, new X509TrustManager[]{new TrustAllManager()}, new SecureRandom()); + final SSLConnectionSocketFactory factory = new SSLConnectionSocketFactory(sslcontext, NoopHostnameVerifier.INSTANCE); + this.httpClient = HttpClientBuilder.create() + .setDefaultRequestConfig(config) + .setSSLSocketFactory(factory) + .build(); + } else { + this.httpClient = HttpClientBuilder.create() + .setDefaultRequestConfig(config) + .build(); + } + + this.apiURI = new URI(url); + this.host = apiURI.getHost(); + this.username = username; + this.password = password; + + authenticate(username, password); + } + + ///////////////////////////////////////////////////////////// + //////////////// Private Helper Methods ///////////////////// + ///////////////////////////////////////////////////////////// + + private void authenticate(final String username, final String password) { + final HttpGet request = new HttpGet(apiURI.toString() + "/login"); + request.setHeader(HttpHeaders.AUTHORIZATION, "Basic " + Base64.getEncoder().encodeToString((username + ":" + password).getBytes())); + try { + final HttpResponse response = httpClient.execute(request); + checkAuthFailure(response); + this.sessionKey = EntityUtils.toString(response.getEntity()); + if (Strings.isNullOrEmpty(this.sessionKey)) { + throw new CloudRuntimeException("Failed to create a valid PowerFlex Gateway Session to perform API requests"); + } + this.sessionKey = this.sessionKey.replace("\"", ""); + if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) { + throw new CloudRuntimeException("PowerFlex Gateway login failed, please check the provided settings"); + } + } catch (final IOException e) { + throw new CloudRuntimeException("Failed to authenticate PowerFlex API Gateway due to:" + e.getMessage()); + } + } + + private void checkAuthFailure(final HttpResponse response) { + if (response != null && response.getStatusLine().getStatusCode() == HttpStatus.SC_UNAUTHORIZED) { + throw new ServerApiException(ApiErrorCode.UNAUTHORIZED, "PowerFlex Gateway API call unauthorized, please check the provided settings"); + } + } + + private void checkResponseOK(final HttpResponse response) { + if (response.getStatusLine().getStatusCode() == HttpStatus.SC_NO_CONTENT) { + LOG.debug("Requested resource does not exist"); + return; + } + if (response.getStatusLine().getStatusCode() == HttpStatus.SC_BAD_REQUEST) { + throw new ServerApiException(ApiErrorCode.MALFORMED_PARAMETER_ERROR, "Bad API request"); + } + if (!(response.getStatusLine().getStatusCode() == HttpStatus.SC_OK || + response.getStatusLine().getStatusCode() == HttpStatus.SC_ACCEPTED)) { + String responseBody = response.toString(); + try { + responseBody = EntityUtils.toString(response.getEntity()); + } catch (IOException ignored) { + } + LOG.debug("HTTP request failed, status code is " + response.getStatusLine().getStatusCode() + ", response is: " + responseBody); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "API failed due to: " + responseBody); + } + } + + private void checkResponseTimeOut(final Exception e) { + if (e instanceof ConnectTimeoutException || e instanceof SocketTimeoutException) { + throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, "API operation timed out, please try again."); + } + } + + private HttpResponse get(final String path) throws IOException { + final HttpGet request = new HttpGet(apiURI.toString() + path); + request.setHeader(HttpHeaders.AUTHORIZATION, "Basic " + Base64.getEncoder().encodeToString((this.username + ":" + this.sessionKey).getBytes())); + final HttpResponse response = httpClient.execute(request); + String responseStatus = (response != null) ? (response.getStatusLine().getStatusCode() + " " + response.getStatusLine().getReasonPhrase()) : "nil"; + LOG.debug("GET request path: " + path + ", response: " + responseStatus); + checkAuthFailure(response); + return response; + } + + private HttpResponse post(final String path, final Object obj) throws IOException { + final HttpPost request = new HttpPost(apiURI.toString() + path); + request.setHeader(HttpHeaders.AUTHORIZATION, "Basic " + Base64.getEncoder().encodeToString((this.username + ":" + this.sessionKey).getBytes())); + request.setHeader("Content-type", "application/json"); + if (obj != null) { + if (obj instanceof String) { + request.setEntity(new StringEntity((String) obj)); + } else { + JsonMapper mapper = new JsonMapper(); + mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL); + String json = mapper.writer().writeValueAsString(obj); + request.setEntity(new StringEntity(json)); + } + } + final HttpResponse response = httpClient.execute(request); + String responseStatus = (response != null) ? (response.getStatusLine().getStatusCode() + " " + response.getStatusLine().getReasonPhrase()) : "nil"; + LOG.debug("POST request path: " + path + ", response: " + responseStatus); + checkAuthFailure(response); + return response; + } + + ////////////////////////////////////////////////// + //////////////// Volume APIs ///////////////////// + ////////////////////////////////////////////////// + + @Override + public Volume createVolume(final String name, final String storagePoolId, + final Integer sizeInGb, final Storage.ProvisioningType volumeType) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(name), "Volume name cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(storagePoolId), "Storage pool id cannot be null"); + Preconditions.checkArgument(sizeInGb != null && sizeInGb > 0, "Size(GB) must be greater than 0"); + + HttpResponse response = null; + try { + Volume newVolume = new Volume(); + newVolume.setName(name); + newVolume.setStoragePoolId(storagePoolId); + newVolume.setVolumeSizeInGb(sizeInGb); + if (Storage.ProvisioningType.FAT.equals(volumeType)) { + newVolume.setVolumeType(Volume.VolumeType.ThickProvisioned); + } else { + newVolume.setVolumeType(Volume.VolumeType.ThinProvisioned); + } + // The basic allocation granularity is 8GB. The volume size will be rounded up. + response = post("/types/Volume/instances", newVolume); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + Volume newVolumeObject = mapper.readValue(response.getEntity().getContent(), Volume.class); + return getVolume(newVolumeObject.getId()); + } catch (final IOException e) { + LOG.error("Failed to create PowerFlex volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + + @Override + public List listVolumes() { + HttpResponse response = null; + try { + response = get("/types/Volume/instances"); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + Volume[] volumes = mapper.readValue(response.getEntity().getContent(), Volume[].class); + return Arrays.asList(volumes); + } catch (final IOException e) { + LOG.error("Failed to list PowerFlex volumes due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return new ArrayList<>(); + } + + @Override + public List listSnapshotVolumes() { + List volumes = listVolumes(); + List snapshotVolumes = new ArrayList<>(); + if (volumes != null && !volumes.isEmpty()) { + for (Volume volume : volumes) { + if (volume != null && volume.getVolumeType() == Volume.VolumeType.Snapshot) { + snapshotVolumes.add(volume); + } + } + } + + return snapshotVolumes; + } + + @Override + public Volume getVolume(String volumeId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "Volume id cannot be null"); + + HttpResponse response = null; + try { + response = get("/instances/Volume::" + volumeId); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + return mapper.readValue(response.getEntity().getContent(), Volume.class); + } catch (final IOException e) { + LOG.error("Failed to get volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + + @Override + public Volume getVolumeByName(String name) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(name), "Volume name cannot be null"); + + HttpResponse response = null; + try { + Volume searchVolume = new Volume(); + searchVolume.setName(name); + response = post("/types/Volume/instances/action/queryIdByKey", searchVolume); + checkResponseOK(response); + String volumeId = EntityUtils.toString(response.getEntity()); + if (!Strings.isNullOrEmpty(volumeId)) { + return getVolume(volumeId.replace("\"", "")); + } + } catch (final IOException e) { + LOG.error("Failed to get volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + + @Override + public boolean renameVolume(final String volumeId, final String newName) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "Volume id cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(newName), "New name for volume cannot be null"); + + HttpResponse response = null; + try { + response = post( + "/instances/Volume::" + volumeId + "/action/setVolumeName", + String.format("{\"newName\":\"%s\"}", newName)); + checkResponseOK(response); + return true; + } catch (final IOException e) { + LOG.error("Failed to rename PowerFlex volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return false; + } + + @Override + public Volume resizeVolume(final String volumeId, final Integer sizeInGB) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "Volume id cannot be null"); + Preconditions.checkArgument(sizeInGB != null && (sizeInGB > 0 && sizeInGB % 8 == 0), + "Size(GB) must be greater than 0 and in granularity of 8"); + + HttpResponse response = null; + try { + // Volume capacity can only be increased. sizeInGB must be a positive number in granularity of 8 GB. + response = post( + "/instances/Volume::" + volumeId + "/action/setVolumeSize", + String.format("{\"sizeInGB\":\"%s\"}", sizeInGB.toString())); + checkResponseOK(response); + return getVolume(volumeId); + } catch (final IOException e) { + LOG.error("Failed to resize PowerFlex volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + + @Override + public Volume cloneVolume(final String sourceVolumeId, final String destVolumeName) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(sourceVolumeId), "Source volume id cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(destVolumeName), "Dest volume name cannot be null"); + + Map snapshotMap = new HashMap<>(); + snapshotMap.put(sourceVolumeId, destVolumeName); + takeSnapshot(snapshotMap); + return getVolumeByName(destVolumeName); + } + + @Override + public SnapshotGroup takeSnapshot(final Map srcVolumeDestSnapshotMap) { + Preconditions.checkArgument(srcVolumeDestSnapshotMap != null && !srcVolumeDestSnapshotMap.isEmpty(), "srcVolumeDestSnapshotMap cannot be null"); + + HttpResponse response = null; + try { + final List defs = new ArrayList<>(); + for (final String volumeId : srcVolumeDestSnapshotMap.keySet()) { + final SnapshotDef snapshotDef = new SnapshotDef(); + snapshotDef.setVolumeId(volumeId); + String snapshotName = srcVolumeDestSnapshotMap.get(volumeId); + if (!Strings.isNullOrEmpty(snapshotName)) { + snapshotDef.setSnapshotName(srcVolumeDestSnapshotMap.get(volumeId)); + } + defs.add(snapshotDef); + } + final SnapshotDefs snapshotDefs = new SnapshotDefs(); + snapshotDefs.setSnapshotDefs(defs.toArray(new SnapshotDef[0])); + response = post("/instances/System/action/snapshotVolumes", snapshotDefs); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + return mapper.readValue(response.getEntity().getContent(), SnapshotGroup.class); + } catch (final IOException e) { + LOG.error("Failed to take snapshot due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + + @Override + public boolean revertSnapshot(final String systemId, final Map srcSnapshotDestVolumeMap) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(systemId), "System id cannot be null"); + Preconditions.checkArgument(srcSnapshotDestVolumeMap != null && !srcSnapshotDestVolumeMap.isEmpty(), "srcSnapshotDestVolumeMap cannot be null"); + + // Take group snapshot (needs additional storage pool capacity till revert operation) to keep the last state of all volumes ??? + // and delete the group snapshot after revert operation + // If revert snapshot failed for any volume, use the group snapshot, to revert volumes to last state + Map srcVolumeDestSnapshotMap = new HashMap<>(); + List originalVolumeIds = new ArrayList<>(); + for (final String sourceSnapshotVolumeId : srcSnapshotDestVolumeMap.keySet()) { + String destVolumeId = srcSnapshotDestVolumeMap.get(sourceSnapshotVolumeId); + srcVolumeDestSnapshotMap.put(destVolumeId, ""); + originalVolumeIds.add(destVolumeId); + } + SnapshotGroup snapshotGroup = takeSnapshot(srcVolumeDestSnapshotMap); + if (snapshotGroup == null) { + throw new CloudRuntimeException("Failed to snapshot the last vm state"); + } + + boolean revertSnapshotResult = true; + int revertStatusIndex = -1; + + try { + // non-atomic operation, try revert each volume + for (final String sourceSnapshotVolumeId : srcSnapshotDestVolumeMap.keySet()) { + String destVolumeId = srcSnapshotDestVolumeMap.get(sourceSnapshotVolumeId); + boolean revertStatus = revertSnapshot(sourceSnapshotVolumeId, destVolumeId); + if (!revertStatus) { + revertSnapshotResult = false; + LOG.warn("Failed to revert snapshot for volume id: " + sourceSnapshotVolumeId); + throw new CloudRuntimeException("Failed to revert snapshot for volume id: " + sourceSnapshotVolumeId); + } else { + revertStatusIndex++; + } + } + } catch (final Exception e) { + LOG.error("Failed to revert vm snapshot due to: " + e.getMessage(), e); + throw new CloudRuntimeException("Failed to revert vm snapshot due to: " + e.getMessage()); + } finally { + if (!revertSnapshotResult) { + //revert to volume with last state and delete the snapshot group, for already reverted volumes + List volumesWithLastState = snapshotGroup.getVolumeIds(); + for (int index = revertStatusIndex; index >= 0; index--) { + // Handling failure for revert again will become recursive ??? + revertSnapshot(volumesWithLastState.get(index), originalVolumeIds.get(index)); + } + } + deleteSnapshotGroup(systemId, snapshotGroup.getSnapshotGroupId()); + } + + return revertSnapshotResult; + } + + @Override + public int deleteSnapshotGroup(final String systemId, final String snapshotGroupId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(systemId), "System id cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(snapshotGroupId), "Snapshot group id cannot be null"); + + HttpResponse response = null; + try { + response = post( + "/instances/System::" + systemId + "/action/removeConsistencyGroupSnapshots", + String.format("{\"snapGroupId\":\"%s\"}", snapshotGroupId)); + checkResponseOK(response); + JsonNode node = new ObjectMapper().readTree(response.getEntity().getContent()); + JsonNode noOfVolumesNode = node.get("numberOfVolumes"); + return noOfVolumesNode.asInt(); + } catch (final IOException e) { + LOG.error("Failed to delete PowerFlex snapshot group due to: " + e.getMessage(), e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return -1; + } + + @Override + public Volume takeSnapshot(final String volumeId, final String snapshotVolumeName) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "Volume id cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(snapshotVolumeName), "Snapshot name cannot be null"); + + HttpResponse response = null; + try { + final SnapshotDef[] snapshotDef = new SnapshotDef[1]; + snapshotDef[0] = new SnapshotDef(); + snapshotDef[0].setVolumeId(volumeId); + snapshotDef[0].setSnapshotName(snapshotVolumeName); + final SnapshotDefs snapshotDefs = new SnapshotDefs(); + snapshotDefs.setSnapshotDefs(snapshotDef); + + response = post("/instances/System/action/snapshotVolumes", snapshotDefs); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + SnapshotGroup snapshotGroup = mapper.readValue(response.getEntity().getContent(), SnapshotGroup.class); + if (snapshotGroup != null) { + List volumeIds = snapshotGroup.getVolumeIds(); + if (volumeIds != null && !volumeIds.isEmpty()) { + return getVolume(volumeIds.get(0)); + } + } + } catch (final IOException e) { + LOG.error("Failed to take snapshot due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + + @Override + public boolean revertSnapshot(final String sourceSnapshotVolumeId, final String destVolumeId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(sourceSnapshotVolumeId), "Source snapshot volume id cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(destVolumeId), "Destination volume id cannot be null"); + + HttpResponse response = null; + try { + Volume sourceSnapshotVolume = getVolume(sourceSnapshotVolumeId); + if (sourceSnapshotVolume == null) { + throw new CloudRuntimeException("Source snapshot volume: " + sourceSnapshotVolumeId + " doesn't exists"); + } + + Volume destVolume = getVolume(destVolumeId); + if (sourceSnapshotVolume == null) { + throw new CloudRuntimeException("Destination volume: " + destVolumeId + " doesn't exists"); + } + + if (!sourceSnapshotVolume.getVtreeId().equals(destVolume.getVtreeId())) { + throw new CloudRuntimeException("Unable to revert, source snapshot volume and destination volume doesn't belong to same volume tree"); + } + + response = post( + "/instances/Volume::" + destVolumeId + "/action/overwriteVolumeContent", + String.format("{\"srcVolumeId\":\"%s\",\"allowOnExtManagedVol\":\"TRUE\"}", sourceSnapshotVolumeId)); + checkResponseOK(response); + return true; + } catch (final IOException e) { + LOG.error("Failed to map PowerFlex volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return false; + } + + @Override + public boolean mapVolumeToSdc(final String volumeId, final String sdcId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "Volume id cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(sdcId), "Sdc Id cannot be null"); + + HttpResponse response = null; + try { + if (isVolumeMappedToSdc(volumeId, sdcId)) { + return true; + } + + response = post( + "/instances/Volume::" + volumeId + "/action/addMappedSdc", + String.format("{\"sdcId\":\"%s\",\"allowMultipleMappings\":\"TRUE\"}", sdcId)); + checkResponseOK(response); + return true; + } catch (final IOException e) { + LOG.error("Failed to map PowerFlex volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return false; + } + + @Override + public boolean mapVolumeToSdcWithLimits(final String volumeId, final String sdcId, final Long iopsLimit, final Long bandwidthLimitInKbps) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "Volume id cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(sdcId), "Sdc Id cannot be null"); + Preconditions.checkArgument(iopsLimit != null && (iopsLimit == 0 || iopsLimit > 10), + "IOPS limit must be 0 (unlimited) or greater than 10"); + Preconditions.checkArgument(bandwidthLimitInKbps != null && (bandwidthLimitInKbps == 0 || (bandwidthLimitInKbps > 0 && bandwidthLimitInKbps % 1024 == 0)), + "Bandwidth limit(Kbps) must be 0 (unlimited) or in granularity of 1024"); + + HttpResponse response = null; + try { + if (mapVolumeToSdc(volumeId, sdcId)) { + long iopsLimitVal = 0; + if (iopsLimit != null && iopsLimit.longValue() > 0) { + iopsLimitVal = iopsLimit.longValue(); + } + + long bandwidthLimitInKbpsVal = 0; + if (bandwidthLimitInKbps != null && bandwidthLimitInKbps.longValue() > 0) { + bandwidthLimitInKbpsVal = bandwidthLimitInKbps.longValue(); + } + + response = post( + "/instances/Volume::" + volumeId + "/action/setMappedSdcLimits", + String.format("{\"sdcId\":\"%s\",\"bandwidthLimitInKbps\":\"%d\",\"iopsLimit\":\"%d\"}", sdcId, bandwidthLimitInKbpsVal, iopsLimitVal)); + checkResponseOK(response); + return true; + } + } catch (final IOException e) { + LOG.error("Failed to map PowerFlex volume with limits due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return false; + } + + @Override + public boolean unmapVolumeFromSdc(final String volumeId, final String sdcId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "Volume id cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(sdcId), "Sdc Id cannot be null"); + + HttpResponse response = null; + try { + if (isVolumeMappedToSdc(volumeId, sdcId)) { + response = post( + "/instances/Volume::" + volumeId + "/action/removeMappedSdc", + String.format("{\"sdcId\":\"%s\",\"skipApplianceValidation\":\"TRUE\"}", sdcId)); + checkResponseOK(response); + return true; + } + } catch (final IOException e) { + LOG.error("Failed to unmap PowerFlex volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return false; + } + + @Override + public boolean unmapVolumeFromAllSdcs(final String volumeId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "Volume id cannot be null"); + + HttpResponse response = null; + try { + Volume volume = getVolume(volumeId); + if (volume == null) { + return false; + } + + List mappedSdcList = volume.getMappedSdcList(); + if (mappedSdcList == null || mappedSdcList.isEmpty()) { + return true; + } + + response = post( + "/instances/Volume::" + volumeId + "/action/removeMappedSdc", + "{\"allSdcs\": \"\"}"); + checkResponseOK(response); + return true; + } catch (final IOException e) { + LOG.error("Failed to unmap PowerFlex volume from all SDCs due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return false; + } + + @Override + public boolean isVolumeMappedToSdc(final String volumeId, final String sdcId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "Volume id cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(sdcId), "Sdc Id cannot be null"); + + if (Strings.isNullOrEmpty(volumeId) || Strings.isNullOrEmpty(sdcId)) { + return false; + } + + Volume volume = getVolume(volumeId); + if (volume == null) { + return false; + } + + List mappedSdcList = volume.getMappedSdcList(); + if (mappedSdcList != null && !mappedSdcList.isEmpty()) { + for (SdcMappingInfo mappedSdc : mappedSdcList) { + if (sdcId.equalsIgnoreCase(mappedSdc.getSdcId())) { + return true; + } + } + } + + return false; + } + + @Override + public boolean deleteVolume(final String volumeId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "Volume id cannot be null"); + + HttpResponse response = null; + try { + try { + unmapVolumeFromAllSdcs(volumeId); + } catch (Exception ignored) {} + response = post( + "/instances/Volume::" + volumeId + "/action/removeVolume", + "{\"removeMode\":\"ONLY_ME\"}"); + checkResponseOK(response); + return true; + } catch (final IOException e) { + LOG.error("Failed to delete PowerFlex volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return false; + } + + @Override + public boolean migrateVolume(final String srcVolumeId, final String destPoolId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(srcVolumeId), "src volume id cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(destPoolId), "dest pool id cannot be null"); + + HttpResponse response = null; + try { + response = post( + "/instances/Volume::" + srcVolumeId + "/action/migrateVTree", + String.format("{\"destSPId\":\"%s\"}", destPoolId)); + checkResponseOK(response); + return true; + } catch (final IOException e) { + LOG.error("Failed to migrate PowerFlex volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return false; + } + + /////////////////////////////////////////////////////// + //////////////// StoragePool APIs ///////////////////// + /////////////////////////////////////////////////////// + + @Override + public List listStoragePools() { + HttpResponse response = null; + try { + response = get("/types/StoragePool/instances"); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + StoragePool[] pools = mapper.readValue(response.getEntity().getContent(), StoragePool[].class); + return Arrays.asList(pools); + } catch (final IOException e) { + LOG.error("Failed to list PowerFlex storage pools due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return new ArrayList<>(); + } + + @Override + public StoragePool getStoragePool(String poolId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(poolId), "Storage pool id cannot be null"); + + HttpResponse response = null; + try { + response = get("/instances/StoragePool::" + poolId); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + return mapper.readValue(response.getEntity().getContent(), StoragePool.class); + } catch (final IOException e) { + LOG.error("Failed to get storage pool due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + + @Override + public StoragePoolStatistics getStoragePoolStatistics(String poolId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(poolId), "Storage pool id cannot be null"); + + HttpResponse response = null; + try { + response = get("/instances/StoragePool::" + poolId + "/relationships/Statistics"); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + return mapper.readValue(response.getEntity().getContent(), StoragePoolStatistics.class); + } catch (final IOException e) { + LOG.error("Failed to get storage pool due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + + @Override + public VolumeStatistics getVolumeStatistics(String volumeId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "Volume id cannot be null"); + + HttpResponse response = null; + try { + Volume volume = getVolume(volumeId); + if (volume != null) { + String volumeTreeId = volume.getVtreeId(); + if (!Strings.isNullOrEmpty(volumeTreeId)) { + response = get("/instances/VTree::" + volumeTreeId + "/relationships/Statistics"); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + VolumeStatistics volumeStatistics = mapper.readValue(response.getEntity().getContent(), VolumeStatistics.class); + if (volumeStatistics != null) { + volumeStatistics.setAllocatedSizeInKb(volume.getSizeInKb()); + return volumeStatistics; + } + } + } + } catch (final IOException e) { + LOG.error("Failed to get volume stats due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + + return null; + } + + @Override + public String getSystemId(String protectionDomainId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(protectionDomainId), "Protection domain id cannot be null"); + + HttpResponse response = null; + try { + response = get("/instances/ProtectionDomain::" + protectionDomainId); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + ProtectionDomain protectionDomain = mapper.readValue(response.getEntity().getContent(), ProtectionDomain.class); + if (protectionDomain != null) { + return protectionDomain.getSystemId(); + } + } catch (final IOException e) { + LOG.error("Failed to get protection domain details due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + + @Override + public List listVolumesInStoragePool(String poolId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(poolId), "Storage pool id cannot be null"); + + HttpResponse response = null; + try { + response = get("/instances/StoragePool::" + poolId + "/relationships/Volume"); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + Volume[] volumes = mapper.readValue(response.getEntity().getContent(), Volume[].class); + return Arrays.asList(volumes); + } catch (final IOException e) { + LOG.error("Failed to list volumes in storage pool due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return new ArrayList<>(); + } + + /////////////////////////////////////////////// + //////////////// SDC APIs ///////////////////// + /////////////////////////////////////////////// + + @Override + public List listSdcs() { + HttpResponse response = null; + try { + response = get("/types/Sdc/instances"); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + Sdc[] sdcs = mapper.readValue(response.getEntity().getContent(), Sdc[].class); + return Arrays.asList(sdcs); + } catch (final IOException e) { + LOG.error("Failed to list SDCs due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return new ArrayList<>(); + } + + @Override + public Sdc getSdc(String sdcId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(sdcId), "Sdc id cannot be null"); + + HttpResponse response = null; + try { + response = get("/instances/Sdc::" + sdcId); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + return mapper.readValue(response.getEntity().getContent(), Sdc.class); + } catch (final IOException e) { + LOG.error("Failed to get volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + + @Override + public Sdc getSdcByIp(String ipAddress) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(ipAddress), "IP address cannot be null"); + + HttpResponse response = null; + try { + response = post("/types/Sdc/instances/action/queryIdByKey", String.format("{\"ip\":\"%s\"}", ipAddress)); + checkResponseOK(response); + String sdcId = EntityUtils.toString(response.getEntity()); + if (!Strings.isNullOrEmpty(sdcId)) { + return getSdc(sdcId.replace("\"", "")); + } + } catch (final IOException e) { + LOG.error("Failed to get volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + + @Override + public Sdc getConnectedSdcByIp(String ipAddress) { + Sdc sdc = getSdcByIp(ipAddress); + if (sdc != null && MDM_CONNECTED_STATE.equalsIgnoreCase(sdc.getMdmConnectionState())) { + return sdc; + } + + return null; + } + + @Override + public List listConnectedSdcIps() { + List sdcIps = new ArrayList<>(); + List sdcs = listSdcs(); + if(sdcs != null) { + for (Sdc sdc : sdcs) { + if (MDM_CONNECTED_STATE.equalsIgnoreCase(sdc.getMdmConnectionState())) { + sdcIps.add(sdc.getSdcIp()); + } + } + } + + return sdcIps; + } + + @Override + public boolean isSdcConnected(String ipAddress) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(ipAddress), "IP address cannot be null"); + + List sdcs = listSdcs(); + if(sdcs != null) { + for (Sdc sdc : sdcs) { + if (ipAddress.equalsIgnoreCase(sdc.getSdcIp()) && MDM_CONNECTED_STATE.equalsIgnoreCase(sdc.getMdmConnectionState())) { + return true; + } + } + } + + return false; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java new file mode 100644 index 000000000000..d64c4088d7a7 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java @@ -0,0 +1,898 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.driver; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.RemoteHostEndPoint; +import org.apache.cloudstack.storage.command.CommandResult; +import org.apache.cloudstack.storage.command.CopyCommand; +import org.apache.cloudstack.storage.command.CreateObjectAnswer; +import org.apache.cloudstack.storage.datastore.api.Sdc; +import org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics; +import org.apache.cloudstack.storage.datastore.api.VolumeStatistics; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; +import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.commons.collections.CollectionUtils; +import org.apache.log4j.Logger; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.to.DataObjectType; +import com.cloud.agent.api.to.DataStoreTO; +import com.cloud.agent.api.to.DataTO; +import com.cloud.alert.AlertManager; +import com.cloud.host.Host; +import com.cloud.server.ManagementServerImpl; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.ResizeVolumePayload; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeDetailVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeDetailsDao; +import com.cloud.utils.Pair; +import com.cloud.utils.crypt.DBEncryptionUtil; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VirtualMachineManager; +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; + +public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { + private static final Logger LOGGER = Logger.getLogger(ScaleIOPrimaryDataStoreDriver.class); + + @Inject + EndPointSelector selector; + @Inject + private PrimaryDataStoreDao storagePoolDao; + @Inject + private StoragePoolDetailsDao storagePoolDetailsDao; + @Inject + private VolumeDao volumeDao; + @Inject + private VolumeDetailsDao volumeDetailsDao; + @Inject + private VMTemplatePoolDao vmTemplatePoolDao; + @Inject + private SnapshotDataStoreDao snapshotDataStoreDao; + @Inject + protected SnapshotDao snapshotDao; + @Inject + private AlertManager alertMgr; + + public ScaleIOPrimaryDataStoreDriver() { + + } + + private ScaleIOGatewayClient getScaleIOClient(final Long storagePoolId) throws Exception { + final int clientTimeout = StorageManager.STORAGE_POOL_CLIENT_TIMEOUT.valueIn(storagePoolId); + final String url = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.GATEWAY_API_ENDPOINT).getValue(); + final String encryptedUsername = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.GATEWAY_API_USERNAME).getValue(); + final String username = DBEncryptionUtil.decrypt(encryptedUsername); + final String encryptedPassword = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.GATEWAY_API_PASSWORD).getValue(); + final String password = DBEncryptionUtil.decrypt(encryptedPassword); + return ScaleIOGatewayClient.getClient(url, username, password, false, clientTimeout); + } + + @Override + public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) { + try { + if (DataObjectType.VOLUME.equals(dataObject.getType())) { + final VolumeVO volume = volumeDao.findById(dataObject.getId()); + LOGGER.debug("Granting access for PowerFlex volume: " + volume.getPath()); + + Long bandwidthLimitInKbps = Long.valueOf(0); // Unlimited + // Check Bandwidht Limit parameter in volume details + final VolumeDetailVO bandwidthVolumeDetail = volumeDetailsDao.findDetail(volume.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS); + if (bandwidthVolumeDetail != null && bandwidthVolumeDetail.getValue() != null) { + bandwidthLimitInKbps = Long.parseLong(bandwidthVolumeDetail.getValue()) * 1024; + } + + Long iopsLimit = Long.valueOf(0); // Unlimited + // Check IOPS Limit parameter in volume details, else try MaxIOPS + final VolumeDetailVO iopsVolumeDetail = volumeDetailsDao.findDetail(volume.getId(), Volume.IOPS_LIMIT); + if (iopsVolumeDetail != null && iopsVolumeDetail.getValue() != null) { + iopsLimit = Long.parseLong(iopsVolumeDetail.getValue()); + } else if (volume.getMaxIops() != null) { + iopsLimit = volume.getMaxIops(); + } + if (iopsLimit > 0 && iopsLimit < ScaleIOUtil.MINIMUM_ALLOWED_IOPS_LIMIT) { + iopsLimit = ScaleIOUtil.MINIMUM_ALLOWED_IOPS_LIMIT; + } + + final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + final Sdc sdc = client.getConnectedSdcByIp(host.getPrivateIpAddress()); + if (sdc == null) { + alertHostSdcDisconnection(host); + throw new CloudRuntimeException("Unable to grant access to volume: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress()); + } + + return client.mapVolumeToSdcWithLimits(volume.getPath(), sdc.getId(), iopsLimit, bandwidthLimitInKbps); + } else if (DataObjectType.TEMPLATE.equals(dataObject.getType())) { + final VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(dataStore.getId(), dataObject.getId(), null); + LOGGER.debug("Granting access for PowerFlex template volume: " + templatePoolRef.getInstallPath()); + + final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + final Sdc sdc = client.getConnectedSdcByIp(host.getPrivateIpAddress()); + if (sdc == null) { + alertHostSdcDisconnection(host); + throw new CloudRuntimeException("Unable to grant access to template: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress()); + } + + return client.mapVolumeToSdc(templatePoolRef.getInstallPath(), sdc.getId()); + } else if (DataObjectType.SNAPSHOT.equals(dataObject.getType())) { + SnapshotInfo snapshot = (SnapshotInfo) dataObject; + LOGGER.debug("Granting access for PowerFlex volume snapshot: " + snapshot.getPath()); + + final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + final Sdc sdc = client.getConnectedSdcByIp(host.getPrivateIpAddress()); + if (sdc == null) { + alertHostSdcDisconnection(host); + throw new CloudRuntimeException("Unable to grant access to snapshot: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress()); + } + + return client.mapVolumeToSdc(snapshot.getPath(), sdc.getId()); + } + + return false; + } catch (Exception e) { + throw new CloudRuntimeException(e); + } + } + + @Override + public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) { + try { + if (DataObjectType.VOLUME.equals(dataObject.getType())) { + final VolumeVO volume = volumeDao.findById(dataObject.getId()); + LOGGER.debug("Revoking access for PowerFlex volume: " + volume.getPath()); + + final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + final Sdc sdc = client.getConnectedSdcByIp(host.getPrivateIpAddress()); + if (sdc == null) { + throw new CloudRuntimeException("Unable to revoke access for volume: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress()); + } + + client.unmapVolumeFromSdc(volume.getPath(), sdc.getId()); + } else if (DataObjectType.TEMPLATE.equals(dataObject.getType())) { + final VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(dataStore.getId(), dataObject.getId(), null); + LOGGER.debug("Revoking access for PowerFlex template volume: " + templatePoolRef.getInstallPath()); + + final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + final Sdc sdc = client.getConnectedSdcByIp(host.getPrivateIpAddress()); + if (sdc == null) { + throw new CloudRuntimeException("Unable to revoke access for template: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress()); + } + + client.unmapVolumeFromSdc(templatePoolRef.getInstallPath(), sdc.getId()); + } else if (DataObjectType.SNAPSHOT.equals(dataObject.getType())) { + SnapshotInfo snapshot = (SnapshotInfo) dataObject; + LOGGER.debug("Revoking access for PowerFlex volume snapshot: " + snapshot.getPath()); + + final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + final Sdc sdc = client.getConnectedSdcByIp(host.getPrivateIpAddress()); + if (sdc == null) { + throw new CloudRuntimeException("Unable to revoke access for snapshot: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress()); + } + + client.unmapVolumeFromSdc(snapshot.getPath(), sdc.getId()); + } + } catch (Exception e) { + LOGGER.warn("Failed to revoke access due to: " + e.getMessage(), e); + } + } + + @Override + public long getUsedBytes(StoragePool storagePool) { + long usedSpaceBytes = 0; + // Volumes + List volumes = volumeDao.findByPoolIdAndState(storagePool.getId(), Volume.State.Ready); + if (volumes != null) { + for (VolumeVO volume : volumes) { + usedSpaceBytes += volume.getSize(); + + long vmSnapshotChainSize = volume.getVmSnapshotChainSize() == null ? 0 : volume.getVmSnapshotChainSize(); + usedSpaceBytes += vmSnapshotChainSize; + } + } + + //Snapshots + List snapshots = snapshotDataStoreDao.listByStoreIdAndState(storagePool.getId(), ObjectInDataStoreStateMachine.State.Ready); + if (snapshots != null) { + for (SnapshotDataStoreVO snapshot : snapshots) { + usedSpaceBytes += snapshot.getSize(); + } + } + + // Templates + List templates = vmTemplatePoolDao.listByPoolIdAndState(storagePool.getId(), ObjectInDataStoreStateMachine.State.Ready); + if (templates != null) { + for (VMTemplateStoragePoolVO template : templates) { + usedSpaceBytes += template.getTemplateSize(); + } + } + + LOGGER.debug("Used/Allocated storage space (in bytes): " + String.valueOf(usedSpaceBytes)); + + return usedSpaceBytes; + } + + @Override + public long getUsedIops(StoragePool storagePool) { + return 0; + } + + @Override + public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataObject, StoragePool pool) { + return ((dataObject != null && dataObject.getSize() != null) ? dataObject.getSize() : 0); + } + + @Override + public long getBytesRequiredForTemplate(TemplateInfo templateInfo, StoragePool storagePool) { + if (templateInfo == null || storagePool == null) { + return 0; + } + + VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(storagePool.getId(), templateInfo.getId(), null); + if (templatePoolRef != null) { + // Template exists on this primary storage, do not require additional space + return 0; + } + + return getDataObjectSizeIncludingHypervisorSnapshotReserve(templateInfo, storagePool); + } + + @Override + public Map getCapabilities() { + Map mapCapabilities = new HashMap<>(); + mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString()); + mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString()); + mapCapabilities.put(DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString(), Boolean.TRUE.toString()); + mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.TRUE.toString()); + return mapCapabilities; + } + + @Override + public ChapInfo getChapInfo(DataObject dataObject) { + return null; + } + + @Override + public DataTO getTO(DataObject data) { + return null; + } + + @Override + public DataStoreTO getStoreTO(DataStore store) { + return null; + } + + @Override + public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback callback) { + LOGGER.debug("Taking PowerFlex volume snapshot"); + + Preconditions.checkArgument(snapshotInfo != null, "snapshotInfo cannot be null"); + + VolumeInfo volumeInfo = snapshotInfo.getBaseVolume(); + Preconditions.checkArgument(volumeInfo != null, "volumeInfo cannot be null"); + + VolumeVO volumeVO = volumeDao.findById(volumeInfo.getId()); + + long storagePoolId = volumeVO.getPoolId(); + Preconditions.checkArgument(storagePoolId > 0, "storagePoolId should be > 0"); + + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + Preconditions.checkArgument(storagePool != null && storagePool.getHostAddress() != null, "storagePool and host address should not be null"); + + CreateCmdResult result; + + try { + SnapshotObjectTO snapshotObjectTo = (SnapshotObjectTO)snapshotInfo.getTO(); + + final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + final String scaleIOVolumeId = volumeVO.getPath(); + String snapshotName = String.format("%s-%s-%s-%s", ScaleIOUtil.SNAPSHOT_PREFIX, snapshotInfo.getId(), + storagePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value()); + + org.apache.cloudstack.storage.datastore.api.Volume scaleIOVolume = null; + scaleIOVolume = client.takeSnapshot(scaleIOVolumeId, snapshotName); + + if (scaleIOVolume == null) { + throw new CloudRuntimeException("Failed to take snapshot on PowerFlex cluster"); + } + + snapshotObjectTo.setPath(scaleIOVolume.getId()); + CreateObjectAnswer createObjectAnswer = new CreateObjectAnswer(snapshotObjectTo); + result = new CreateCmdResult(null, createObjectAnswer); + result.setResult(null); + } catch (Exception e) { + String errMsg = "Unable to take PowerFlex volume snapshot for volume: " + volumeInfo.getId() + " due to " + e.getMessage(); + LOGGER.warn(errMsg); + result = new CreateCmdResult(null, new CreateObjectAnswer(e.toString())); + result.setResult(e.toString()); + } + + callback.complete(result); + } + + @Override + public void revertSnapshot(SnapshotInfo snapshot, SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback callback) { + LOGGER.debug("Reverting to PowerFlex volume snapshot"); + + Preconditions.checkArgument(snapshot != null, "snapshotInfo cannot be null"); + + VolumeInfo volumeInfo = snapshot.getBaseVolume(); + Preconditions.checkArgument(volumeInfo != null, "volumeInfo cannot be null"); + + VolumeVO volumeVO = volumeDao.findById(volumeInfo.getId()); + + try { + if (volumeVO == null || volumeVO.getRemoved() != null) { + String errMsg = "The volume that the snapshot belongs to no longer exists."; + CommandResult commandResult = new CommandResult(); + commandResult.setResult(errMsg); + callback.complete(commandResult); + return; + } + + long storagePoolId = volumeVO.getPoolId(); + final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + String snapshotVolumeId = snapshot.getPath(); + final String destVolumeId = volumeVO.getPath(); + client.revertSnapshot(snapshotVolumeId, destVolumeId); + + CommandResult commandResult = new CommandResult(); + callback.complete(commandResult); + } catch (Exception ex) { + LOGGER.debug("Unable to revert to PowerFlex snapshot: " + snapshot.getId(), ex); + throw new CloudRuntimeException(ex.getMessage()); + } + } + + private String createVolume(VolumeInfo volumeInfo, long storagePoolId) { + LOGGER.debug("Creating PowerFlex volume"); + + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + + Preconditions.checkArgument(volumeInfo != null, "volumeInfo cannot be null"); + Preconditions.checkArgument(storagePoolId > 0, "storagePoolId should be > 0"); + Preconditions.checkArgument(storagePool != null && storagePool.getHostAddress() != null, "storagePool and host address should not be null"); + + try { + final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + final String scaleIOStoragePoolId = storagePool.getPath(); + final Long sizeInBytes = volumeInfo.getSize(); + final long sizeInGb = (long) Math.ceil(sizeInBytes / (1024.0 * 1024.0 * 1024.0)); + final String scaleIOVolumeName = String.format("%s-%s-%s-%s", ScaleIOUtil.VOLUME_PREFIX, volumeInfo.getId(), + storagePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value()); + + org.apache.cloudstack.storage.datastore.api.Volume scaleIOVolume = null; + scaleIOVolume = client.createVolume(scaleIOVolumeName, scaleIOStoragePoolId, (int) sizeInGb, volumeInfo.getProvisioningType()); + + if (scaleIOVolume == null) { + throw new CloudRuntimeException("Failed to create volume on PowerFlex cluster"); + } + + VolumeVO volume = volumeDao.findById(volumeInfo.getId()); + volume.set_iScsiName(scaleIOVolume.getId()); + volume.setPath(scaleIOVolume.getId()); + volume.setFolder(scaleIOVolume.getVtreeId()); + volume.setSize(scaleIOVolume.getSizeInKb() * 1024); + volume.setPoolType(Storage.StoragePoolType.PowerFlex); + volume.setFormat(Storage.ImageFormat.RAW); + volume.setPoolId(storagePoolId); + volumeDao.update(volume.getId(), volume); + + long capacityBytes = storagePool.getCapacityBytes(); + long usedBytes = storagePool.getUsedBytes(); + usedBytes += volume.getSize(); + storagePool.setUsedBytes(usedBytes > capacityBytes ? capacityBytes : usedBytes); + storagePoolDao.update(storagePoolId, storagePool); + + return volume.getPath(); + } catch (Exception e) { + String errMsg = "Unable to create PowerFlex Volume due to " + e.getMessage(); + LOGGER.warn(errMsg); + throw new CloudRuntimeException(errMsg, e); + } + } + + private String createTemplateVolume(TemplateInfo templateInfo, long storagePoolId) { + LOGGER.debug("Creating PowerFlex template volume"); + + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + Preconditions.checkArgument(templateInfo != null, "templateInfo cannot be null"); + Preconditions.checkArgument(storagePoolId > 0, "storagePoolId should be > 0"); + Preconditions.checkArgument(storagePool != null && storagePool.getHostAddress() != null, "storagePool and host address should not be null"); + + try { + final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + final String scaleIOStoragePoolId = storagePool.getPath(); + final Long sizeInBytes = templateInfo.getSize(); + final long sizeInGb = (long) Math.ceil(sizeInBytes / (1024.0 * 1024.0 * 1024.0)); + final String scaleIOVolumeName = String.format("%s-%s-%s-%s", ScaleIOUtil.TEMPLATE_PREFIX, templateInfo.getId(), + storagePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value()); + + org.apache.cloudstack.storage.datastore.api.Volume scaleIOVolume = null; + scaleIOVolume = client.createVolume(scaleIOVolumeName, scaleIOStoragePoolId, (int) sizeInGb, Storage.ProvisioningType.THIN); + + if (scaleIOVolume == null) { + throw new CloudRuntimeException("Failed to create template volume on PowerFlex cluster"); + } + + VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(storagePoolId, templateInfo.getId(), null); + templatePoolRef.setInstallPath(scaleIOVolume.getId()); + templatePoolRef.setLocalDownloadPath(scaleIOVolume.getId()); + templatePoolRef.setTemplateSize(scaleIOVolume.getSizeInKb() * 1024); + vmTemplatePoolDao.update(templatePoolRef.getId(), templatePoolRef); + + long capacityBytes = storagePool.getCapacityBytes(); + long usedBytes = storagePool.getUsedBytes(); + usedBytes += templatePoolRef.getTemplateSize(); + storagePool.setUsedBytes(usedBytes > capacityBytes ? capacityBytes : usedBytes); + storagePoolDao.update(storagePoolId, storagePool); + + return scaleIOVolume.getId(); + } catch (Exception e) { + String errMsg = "Unable to create PowerFlex template volume due to " + e.getMessage(); + LOGGER.warn(errMsg); + throw new CloudRuntimeException(errMsg, e); + } + } + + @Override + public void createAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) { + String scaleIOVolId = null; + String errMsg = null; + try { + if (dataObject.getType() == DataObjectType.VOLUME) { + LOGGER.debug("createAsync - creating volume"); + scaleIOVolId = createVolume((VolumeInfo) dataObject, dataStore.getId()); + } else if (dataObject.getType() == DataObjectType.TEMPLATE) { + LOGGER.debug("createAsync - creating template"); + scaleIOVolId = createTemplateVolume((TemplateInfo)dataObject, dataStore.getId()); + } else { + errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync"; + LOGGER.error(errMsg); + } + } catch (Exception ex) { + errMsg = ex.getMessage(); + LOGGER.error(errMsg); + if (callback == null) { + throw ex; + } + } + + if (callback != null) { + CreateCmdResult result = new CreateCmdResult(scaleIOVolId, new Answer(null, errMsg == null, errMsg)); + result.setResult(errMsg); + callback.complete(result); + } + } + + @Override + public void deleteAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) { + Preconditions.checkArgument(dataObject != null, "dataObject cannot be null"); + + long storagePoolId = dataStore.getId(); + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + Preconditions.checkArgument(storagePoolId > 0, "storagePoolId should be > 0"); + Preconditions.checkArgument(storagePool != null && storagePool.getHostAddress() != null, "storagePool and host address should not be null"); + + String errMsg = null; + String scaleIOVolumeId = null; + try { + boolean deleteResult = false; + if (dataObject.getType() == DataObjectType.VOLUME) { + LOGGER.debug("deleteAsync - deleting volume"); + scaleIOVolumeId = ((VolumeInfo) dataObject).getPath(); + } else if (dataObject.getType() == DataObjectType.SNAPSHOT) { + LOGGER.debug("deleteAsync - deleting snapshot"); + scaleIOVolumeId = ((SnapshotInfo) dataObject).getPath(); + } else if (dataObject.getType() == DataObjectType.TEMPLATE) { + LOGGER.debug("deleteAsync - deleting template"); + scaleIOVolumeId = ((TemplateInfo) dataObject).getInstallPath(); + } else { + errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to deleteAsync"; + } + + try { + final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + deleteResult = client.deleteVolume(scaleIOVolumeId); + if (!deleteResult) { + errMsg = "Failed to delete PowerFlex volume with id: " + scaleIOVolumeId; + } + + long usedBytes = storagePool.getUsedBytes(); + usedBytes -= dataObject.getSize(); + storagePool.setUsedBytes(usedBytes < 0 ? 0 : usedBytes); + storagePoolDao.update(storagePoolId, storagePool); + } catch (Exception e) { + errMsg = "Unable to delete PowerFlex volume: " + scaleIOVolumeId + " due to " + e.getMessage(); + LOGGER.warn(errMsg); + throw new CloudRuntimeException(errMsg, e); + } + } catch (Exception ex) { + errMsg = ex.getMessage(); + LOGGER.error(errMsg); + if (callback == null) { + throw ex; + } + } + + if (callback != null) { + CommandResult result = new CommandResult(); + result.setResult(errMsg); + callback.complete(result); + } + } + + @Override + public void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCallback callback) { + copyAsync(srcData, destData, null, callback); + } + + @Override + public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback) { + Answer answer = null; + String errMsg = null; + + try { + DataStore srcStore = srcData.getDataStore(); + DataStore destStore = destData.getDataStore(); + if (srcStore.getRole() == DataStoreRole.Primary && (destStore.getRole() == DataStoreRole.Primary && destData.getType() == DataObjectType.VOLUME)) { + if (srcData.getType() == DataObjectType.TEMPLATE) { + answer = copyTemplateToVolume(srcData, destData, destHost); + if (answer == null) { + errMsg = "No answer for copying template to PowerFlex volume"; + } else if (!answer.getResult()) { + errMsg = answer.getDetails(); + } + } else if (srcData.getType() == DataObjectType.VOLUME) { + answer = migrateVolume(srcData, destData); + if (answer == null) { + errMsg = "No answer for migrate PowerFlex volume"; + } else if (!answer.getResult()) { + errMsg = answer.getDetails(); + } + } else { + errMsg = "Unsupported copy operation from src object: (" + srcData.getType() + ", " + srcData.getDataStore() + "), dest object: (" + + destData.getType() + ", " + destData.getDataStore() + ")"; + LOGGER.warn(errMsg); + } + } else { + errMsg = "Unsupported copy operation"; + } + } catch (Exception e) { + LOGGER.debug("Failed to copy due to " + e.getMessage(), e); + errMsg = e.toString(); + } + + CopyCommandResult result = new CopyCommandResult(null, answer); + result.setResult(errMsg); + callback.complete(result); + } + + private Answer copyTemplateToVolume(DataObject srcData, DataObject destData, Host destHost) { + // Copy PowerFlex/ScaleIO template to volume + LOGGER.debug("Initiating copy from PowerFlex template volume on host " + destHost != null ? destHost.getId() : ""); + int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); + CopyCommand cmd = new CopyCommand(srcData.getTO(), destData.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); + + Answer answer = null; + EndPoint ep = destHost != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(destHost) : selector.select(srcData.getDataStore()); + if (ep == null) { + String errorMsg = "No remote endpoint to send command, check if host or ssvm is down?"; + LOGGER.error(errorMsg); + answer = new Answer(cmd, false, errorMsg); + } else { + answer = ep.sendMessage(cmd); + } + + return answer; + } + + private Answer migrateVolume(DataObject srcData, DataObject destData) { + // Volume migration within same PowerFlex/ScaleIO cluster (with same System ID) + DataStore srcStore = srcData.getDataStore(); + DataStore destStore = destData.getDataStore(); + Answer answer = null; + try { + long srcPoolId = srcStore.getId(); + String srcPoolSystemId = null; + StoragePoolDetailVO srcPoolSystemIdDetail = storagePoolDetailsDao.findDetail(srcPoolId, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID); + if (srcPoolSystemIdDetail != null) { + srcPoolSystemId = srcPoolSystemIdDetail.getValue(); + } + + long destPoolId = destStore.getId(); + String destPoolSystemId = null; + StoragePoolDetailVO destPoolSystemIdDetail = storagePoolDetailsDao.findDetail(destPoolId, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID); + if (destPoolSystemIdDetail != null) { + destPoolSystemId = destPoolSystemIdDetail.getValue(); + } + + if (Strings.isNullOrEmpty(srcPoolSystemId) || Strings.isNullOrEmpty(destPoolSystemId)) { + throw new CloudRuntimeException("Failed to validate PowerFlex pools compatibilty for migration"); + } + + if (!srcPoolSystemId.equals(destPoolSystemId)) { + throw new CloudRuntimeException("Volume migration across different PowerFlex clusters is not supported"); + } + + final ScaleIOGatewayClient client = getScaleIOClient(srcPoolId); + final String srcVolumeId = ((VolumeInfo) srcData).getPath(); + final StoragePoolVO destStoragePool = storagePoolDao.findById(destPoolId); + final String destStoragePoolId = destStoragePool.getPath(); + boolean migrateStatus = client.migrateVolume(srcVolumeId, destStoragePoolId); + if (migrateStatus) { + if (srcData.getId() != destData.getId()) { + VolumeVO destVolume = volumeDao.findById(destData.getId()); + destVolume.set_iScsiName(srcVolumeId); + destVolume.setPath(srcVolumeId); + volumeDao.update(destData.getId(), destVolume); + + VolumeVO srcVolume = volumeDao.findById(srcData.getId()); + srcVolume.set_iScsiName(null); + srcVolume.setPath(null); + srcVolume.setFolder(null); + volumeDao.update(srcData.getId(), srcVolume); + } else { + // Live migrate volume + VolumeVO volume = volumeDao.findById(srcData.getId()); + Long oldPoolId = volume.getPoolId(); + volume.setPoolId(destPoolId); + volume.setLastPoolId(oldPoolId); + volumeDao.update(srcData.getId(), volume); + } + + String newVolumeName = String.format("%s-%s-%s-%s", ScaleIOUtil.VOLUME_PREFIX, destData.getId(), + destStoragePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value()); + client.renameVolume(srcVolumeId, newVolumeName); + + List snapshots = snapshotDao.listByVolumeId(srcData.getId()); + if (CollectionUtils.isNotEmpty(snapshots)) { + for (SnapshotVO snapshot : snapshots) { + SnapshotDataStoreVO snapshotStore = snapshotDataStoreDao.findBySnapshot(snapshot.getId(), DataStoreRole.Primary); + if (snapshotStore == null) { + continue; + } + + snapshotStore.setDataStoreId(destPoolId); + snapshotDataStoreDao.update(snapshotStore.getId(), snapshotStore); + + String snapshotVolumeId = snapshotStore.getInstallPath(); + String newSnapshotName = String.format("%s-%s-%s-%s", ScaleIOUtil.SNAPSHOT_PREFIX, snapshot.getId(), + destStoragePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value()); + client.renameVolume(snapshotVolumeId, newSnapshotName); + } + } + + answer = new Answer(null, true, null); + } else { + String errorMsg = "Failed to migrate PowerFlex volume: " + srcData.getId() + " to storage pool " + destPoolId; + LOGGER.debug(errorMsg); + answer = new Answer(null, false, errorMsg); + } + } catch (Exception e) { + LOGGER.error("Failed to migrate PowerFlex volume: " + srcData.getId() + " due to: " + e.getMessage()); + answer = new Answer(null, false, e.getMessage()); + } + + return answer; + } + + @Override + public boolean canCopy(DataObject srcData, DataObject destData) { + DataStore srcStore = destData.getDataStore(); + DataStore destStore = destData.getDataStore(); + if ((srcStore.getRole() == DataStoreRole.Primary && (srcData.getType() == DataObjectType.TEMPLATE || srcData.getType() == DataObjectType.VOLUME)) + && (destStore.getRole() == DataStoreRole.Primary && destData.getType() == DataObjectType.VOLUME)) { + StoragePoolVO srcPoolVO = storagePoolDao.findById(srcStore.getId()); + StoragePoolVO destPoolVO = storagePoolDao.findById(destStore.getId()); + if (srcPoolVO != null && srcPoolVO.getPoolType() == Storage.StoragePoolType.PowerFlex + && destPoolVO != null && destPoolVO.getPoolType() == Storage.StoragePoolType.PowerFlex) { + return true; + } + } + return false; + } + + private void resizeVolume(VolumeInfo volumeInfo) { + LOGGER.debug("Resizing PowerFlex volume"); + + Preconditions.checkArgument(volumeInfo != null, "volumeInfo cannot be null"); + + try { + String scaleIOVolumeId = volumeInfo.getPath(); + Long storagePoolId = volumeInfo.getPoolId(); + + ResizeVolumePayload payload = (ResizeVolumePayload)volumeInfo.getpayload(); + long newSizeInBytes = payload.newSize != null ? payload.newSize : volumeInfo.getSize(); + // Only increase size is allowed and size should be specified in granularity of 8 GB + if (newSizeInBytes <= volumeInfo.getSize()) { + throw new CloudRuntimeException("Only increase size is allowed for volume: " + volumeInfo.getName()); + } + + org.apache.cloudstack.storage.datastore.api.Volume scaleIOVolume = null; + long newSizeInGB = newSizeInBytes / (1024 * 1024 * 1024); + long newSizeIn8gbBoundary = (long) (Math.ceil(newSizeInGB / 8.0) * 8.0); + final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + scaleIOVolume = client.resizeVolume(scaleIOVolumeId, (int) newSizeIn8gbBoundary); + if (scaleIOVolume == null) { + throw new CloudRuntimeException("Failed to resize volume: " + volumeInfo.getName()); + } + + VolumeVO volume = volumeDao.findById(volumeInfo.getId()); + long oldVolumeSize = volume.getSize(); + volume.setSize(scaleIOVolume.getSizeInKb() * 1024); + volumeDao.update(volume.getId(), volume); + + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + long capacityBytes = storagePool.getCapacityBytes(); + long usedBytes = storagePool.getUsedBytes(); + + long newVolumeSize = volume.getSize(); + usedBytes += newVolumeSize - oldVolumeSize; + storagePool.setUsedBytes(usedBytes > capacityBytes ? capacityBytes : usedBytes); + storagePoolDao.update(storagePoolId, storagePool); + } catch (Exception e) { + String errMsg = "Unable to resize PowerFlex volume: " + volumeInfo.getId() + " due to " + e.getMessage(); + LOGGER.warn(errMsg); + throw new CloudRuntimeException(errMsg, e); + } + } + + @Override + public void resize(DataObject dataObject, AsyncCompletionCallback callback) { + String scaleIOVolumeId = null; + String errMsg = null; + try { + if (dataObject.getType() == DataObjectType.VOLUME) { + scaleIOVolumeId = ((VolumeInfo) dataObject).getPath(); + resizeVolume((VolumeInfo) dataObject); + } else { + errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to resize"; + } + } catch (Exception ex) { + errMsg = ex.getMessage(); + LOGGER.error(errMsg); + if (callback == null) { + throw ex; + } + } + + if (callback != null) { + CreateCmdResult result = new CreateCmdResult(scaleIOVolumeId, new Answer(null, errMsg == null, errMsg)); + result.setResult(errMsg); + callback.complete(result); + } + } + + @Override + public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, QualityOfServiceState qualityOfServiceState) { + } + + @Override + public boolean canProvideStorageStats() { + return true; + } + + @Override + public Pair getStorageStats(StoragePool storagePool) { + Preconditions.checkArgument(storagePool != null, "storagePool cannot be null"); + + try { + final ScaleIOGatewayClient client = getScaleIOClient(storagePool.getId()); + StoragePoolStatistics poolStatistics = client.getStoragePoolStatistics(storagePool.getPath()); + if (poolStatistics != null && poolStatistics.getNetMaxCapacityInBytes() != null && poolStatistics.getNetUsedCapacityInBytes() != null) { + Long capacityBytes = poolStatistics.getNetMaxCapacityInBytes(); + Long usedBytes = poolStatistics.getNetUsedCapacityInBytes(); + return new Pair(capacityBytes, usedBytes); + } + } catch (Exception e) { + String errMsg = "Unable to get storage stats for the pool: " + storagePool.getId() + " due to " + e.getMessage(); + LOGGER.warn(errMsg); + throw new CloudRuntimeException(errMsg, e); + } + + return null; + } + + @Override + public boolean canProvideVolumeStats() { + return true; + } + + @Override + public Pair getVolumeStats(StoragePool storagePool, String volumeId) { + Preconditions.checkArgument(storagePool != null, "storagePool cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "volumeId cannot be null"); + + try { + final ScaleIOGatewayClient client = getScaleIOClient(storagePool.getId()); + VolumeStatistics volumeStatistics = client.getVolumeStatistics(volumeId); + if (volumeStatistics != null) { + Long provisionedSizeInBytes = volumeStatistics.getNetProvisionedAddressesInBytes(); + Long allocatedSizeInBytes = volumeStatistics.getAllocatedSizeInBytes(); + return new Pair(provisionedSizeInBytes, allocatedSizeInBytes); + } + } catch (Exception e) { + String errMsg = "Unable to get stats for the volume: " + volumeId + " in the pool: " + storagePool.getId() + " due to " + e.getMessage(); + LOGGER.warn(errMsg); + throw new CloudRuntimeException(errMsg, e); + } + + return null; + } + + @Override + public boolean canHostAccessStoragePool(Host host, StoragePool pool) { + if (host == null || pool == null) { + return false; + } + + try { + final ScaleIOGatewayClient client = getScaleIOClient(pool.getId()); + return client.isSdcConnected(host.getPrivateIpAddress()); + } catch (Exception e) { + LOGGER.warn("Unable to check the host: " + host.getId() + " access to storage pool: " + pool.getId() + " due to " + e.getMessage(), e); + return false; + } + } + + private void alertHostSdcDisconnection(Host host) { + if (host == null) { + return; + } + + LOGGER.warn("SDC not connected on the host: " + host.getId()); + String msg = "SDC not connected on the host: " + host.getId() + ", reconnect the SDC to MDM"; + alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC disconnected on host: " + host.getUuid(), msg); + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java new file mode 100644 index 000000000000..a8a3b3dd6a5e --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java @@ -0,0 +1,460 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.lifecycle; + +import java.io.UnsupportedEncodingException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URLDecoder; +import java.security.KeyManagementException; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import javax.inject.Inject; + +import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; +import org.apache.commons.collections.CollectionUtils; +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; +import org.apache.log4j.Logger; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.DeleteStoragePoolCommand; +import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.capacity.CapacityManager; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolAutomation; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.VMTemplateStorageResourceAssoc; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.template.TemplateManager; +import com.cloud.utils.UriUtils; +import com.cloud.utils.crypt.DBEncryptionUtil; +import com.cloud.utils.exception.CloudRuntimeException; + +public class ScaleIOPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle { + private static final Logger LOGGER = Logger.getLogger(ScaleIOPrimaryDataStoreLifeCycle.class); + + @Inject + private ClusterDao clusterDao; + @Inject + private PrimaryDataStoreDao primaryDataStoreDao; + @Inject + private StoragePoolHostDao storagePoolHostDao; + @Inject + private PrimaryDataStoreHelper dataStoreHelper; + @Inject + private ResourceManager resourceManager; + @Inject + private StorageManager storageMgr; + @Inject + private StoragePoolAutomation storagePoolAutomation; + @Inject + private CapacityManager capacityMgr; + @Inject + private TemplateManager templateMgr; + @Inject + private AgentManager agentMgr; + + public ScaleIOPrimaryDataStoreLifeCycle() { + } + + private org.apache.cloudstack.storage.datastore.api.StoragePool findStoragePool(String url, String username, String password, String storagePoolName) { + try { + final int clientTimeout = StorageManager.STORAGE_POOL_CLIENT_TIMEOUT.value(); + ScaleIOGatewayClient client = ScaleIOGatewayClient.getClient(url, username, password, false, clientTimeout); + List storagePools = client.listStoragePools(); + for (org.apache.cloudstack.storage.datastore.api.StoragePool pool : storagePools) { + if (pool.getName().equals(storagePoolName)) { + LOGGER.info("Found PowerFlex storage pool: " + storagePoolName); + final org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics poolStatistics = client.getStoragePoolStatistics(pool.getId()); + pool.setStatistics(poolStatistics); + + String systemId = client.getSystemId(pool.getProtectionDomainId()); + pool.setSystemId(systemId); + return pool; + } + } + } catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) { + LOGGER.error("Failed to add storage pool", e); + throw new CloudRuntimeException("Failed to establish connection with PowerFlex Gateway to validate storage pool"); + } + throw new CloudRuntimeException("Failed to find the provided storage pool name in discovered PowerFlex storage pools"); + } + + @SuppressWarnings("unchecked") + @Override + public DataStore initialize(Map dsInfos) { + String url = (String) dsInfos.get("url"); + Long zoneId = (Long) dsInfos.get("zoneId"); + Long podId = (Long)dsInfos.get("podId"); + Long clusterId = (Long)dsInfos.get("clusterId"); + String dataStoreName = (String) dsInfos.get("name"); + String providerName = (String) dsInfos.get("providerName"); + Long capacityBytes = (Long)dsInfos.get("capacityBytes"); + Long capacityIops = (Long)dsInfos.get("capacityIops"); + String tags = (String)dsInfos.get("tags"); + Map details = (Map) dsInfos.get("details"); + + if (zoneId == null) { + throw new CloudRuntimeException("Zone Id must be specified."); + } + + PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters(); + if (clusterId != null) { + // Primary datastore is cluster-wide, check and set the podId and clusterId parameters + if (podId == null) { + throw new CloudRuntimeException("Pod Id must also be specified when the Cluster Id is specified for Cluster-wide primary storage."); + } + + Hypervisor.HypervisorType hypervisorType = getHypervisorTypeForCluster(clusterId); + if (!isSupportedHypervisorType(hypervisorType)) { + throw new CloudRuntimeException("Unsupported hypervisor type: " + hypervisorType.toString()); + } + + parameters.setPodId(podId); + parameters.setClusterId(clusterId); + } else if (podId != null) { + throw new CloudRuntimeException("Cluster Id must also be specified when the Pod Id is specified for Cluster-wide primary storage."); + } + + URI uri = null; + try { + uri = new URI(UriUtils.encodeURIComponent(url)); + if (uri.getScheme() == null || !uri.getScheme().equalsIgnoreCase("powerflex")) { + throw new InvalidParameterValueException("scheme is invalid for url: " + url + ", should be powerflex://username:password@gatewayhost/pool"); + } + } catch (Exception ignored) { + throw new InvalidParameterValueException(url + " is not a valid uri"); + } + + String storagePoolName = null; + try { + storagePoolName = URLDecoder.decode(uri.getPath(), "UTF-8"); + } catch (UnsupportedEncodingException e) { + LOGGER.error("[ignored] we are on a platform not supporting \"UTF-8\"!?!", e); + } + if (storagePoolName == null) { // if decoding fails, use getPath() anyway + storagePoolName = uri.getPath(); + } + storagePoolName = storagePoolName.replaceFirst("/", ""); + + final String storageHost = uri.getHost(); + final int port = uri.getPort(); + String gatewayApiURL = null; + if (port == -1) { + gatewayApiURL = String.format("https://%s/api", storageHost); + } else { + gatewayApiURL = String.format("https://%s:%d/api", storageHost, port); + } + + final String userInfo = uri.getUserInfo(); + final String gatewayUsername = userInfo.split(":")[0]; + final String gatewayPassword = userInfo.split(":")[1]; + + List storagePoolVO = primaryDataStoreDao.findPoolsByProvider(ScaleIOUtil.PROVIDER_NAME); + if (CollectionUtils.isNotEmpty(storagePoolVO)) { + for (StoragePoolVO poolVO : storagePoolVO) { + Map poolDetails = primaryDataStoreDao.getDetails(poolVO.getId()); + String poolUrl = poolDetails.get(ScaleIOGatewayClient.GATEWAY_API_ENDPOINT); + String poolName = poolDetails.get(ScaleIOGatewayClient.STORAGE_POOL_NAME); + + if (gatewayApiURL.equals(poolUrl) && storagePoolName.equals(poolName)) { + throw new IllegalArgumentException("PowerFlex storage pool: " + storagePoolName + " already exists, please specify other storage pool."); + } + } + } + + final org.apache.cloudstack.storage.datastore.api.StoragePool scaleIOPool = this.findStoragePool(gatewayApiURL, + gatewayUsername, gatewayPassword, storagePoolName); + + parameters.setZoneId(zoneId); + parameters.setName(dataStoreName); + parameters.setProviderName(providerName); + parameters.setManaged(true); + parameters.setHost(storageHost); + parameters.setPath(scaleIOPool.getId()); + parameters.setUserInfo(userInfo); + parameters.setType(Storage.StoragePoolType.PowerFlex); + parameters.setHypervisorType(Hypervisor.HypervisorType.KVM); + parameters.setUuid(UUID.randomUUID().toString()); + parameters.setTags(tags); + + StoragePoolStatistics poolStatistics = scaleIOPool.getStatistics(); + if (poolStatistics != null) { + if (capacityBytes == null) { + parameters.setCapacityBytes(poolStatistics.getNetMaxCapacityInBytes()); + } + parameters.setUsedBytes(poolStatistics.getNetUsedCapacityInBytes()); + } + + if (capacityBytes != null) { + parameters.setCapacityBytes(capacityBytes); + } + + if (capacityIops != null) { + parameters.setCapacityIops(capacityIops); + } + + details.put(ScaleIOGatewayClient.GATEWAY_API_ENDPOINT, gatewayApiURL); + details.put(ScaleIOGatewayClient.GATEWAY_API_USERNAME, DBEncryptionUtil.encrypt(gatewayUsername)); + details.put(ScaleIOGatewayClient.GATEWAY_API_PASSWORD, DBEncryptionUtil.encrypt(gatewayPassword)); + details.put(ScaleIOGatewayClient.STORAGE_POOL_NAME, storagePoolName); + details.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, scaleIOPool.getSystemId()); + parameters.setDetails(details); + + return dataStoreHelper.createPrimaryDataStore(parameters); + } + + @Override + public boolean attachCluster(DataStore dataStore, ClusterScope scope) { + final ClusterVO cluster = clusterDao.findById(scope.getScopeId()); + if (!isSupportedHypervisorType(cluster.getHypervisorType())) { + throw new CloudRuntimeException("Unsupported hypervisor type: " + cluster.getHypervisorType().toString()); + } + + List connectedSdcIps = null; + try { + Map dataStoreDetails = primaryDataStoreDao.getDetails(dataStore.getId()); + final String url = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_ENDPOINT); + final String encryptedUsername = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_USERNAME); + final String username = DBEncryptionUtil.decrypt(encryptedUsername); + final String encryptedPassword = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_PASSWORD); + final String password = DBEncryptionUtil.decrypt(encryptedPassword); + final int clientTimeout = StorageManager.STORAGE_POOL_CLIENT_TIMEOUT.value(); + ScaleIOGatewayClient client = ScaleIOGatewayClient.getClient(url, username, password, false, clientTimeout); + connectedSdcIps = client.listConnectedSdcIps(); + } catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) { + LOGGER.error("Failed to create storage pool", e); + throw new CloudRuntimeException("Failed to establish connection with PowerFlex Gateway to create storage pool"); + } + + if (connectedSdcIps == null || connectedSdcIps.isEmpty()) { + LOGGER.debug("No connected SDCs found for the PowerFlex storage pool"); + throw new CloudRuntimeException("Failed to create storage pool as connected SDCs not found"); + } + + PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) dataStore; + + List hostsInCluster = resourceManager.listAllUpAndEnabledHosts(Host.Type.Routing, primaryDataStoreInfo.getClusterId(), + primaryDataStoreInfo.getPodId(), primaryDataStoreInfo.getDataCenterId()); + if (hostsInCluster.isEmpty()) { + primaryDataStoreDao.expunge(primaryDataStoreInfo.getId()); + throw new CloudRuntimeException("No hosts are Up to associate a storage pool with in cluster: " + primaryDataStoreInfo.getClusterId()); + } + + LOGGER.debug("Attaching the pool to each of the hosts in the cluster: " + primaryDataStoreInfo.getClusterId()); + List poolHosts = new ArrayList(); + for (HostVO host : hostsInCluster) { + try { + if (connectedSdcIps.contains(host.getPrivateIpAddress())) { + storageMgr.connectHostToSharedPool(host.getId(), primaryDataStoreInfo.getId()); + poolHosts.add(host); + } + } catch (Exception e) { + LOGGER.warn("Unable to establish a connection between " + host + " and " + primaryDataStoreInfo, e); + } + } + + if (poolHosts.isEmpty()) { + LOGGER.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '" + primaryDataStoreInfo.getClusterId() + "'."); + primaryDataStoreDao.expunge(primaryDataStoreInfo.getId()); + throw new CloudRuntimeException("Failed to create storage pool in the cluster: " + primaryDataStoreInfo.getClusterId() + " as it is not accessible to hosts"); + } + + dataStoreHelper.attachCluster(dataStore); + return true; + } + + @Override + public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) { + return true; + } + + @Override + public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.HypervisorType hypervisorType) { + if (!isSupportedHypervisorType(hypervisorType)) { + throw new CloudRuntimeException("Unsupported hypervisor type: " + hypervisorType.toString()); + } + + List connectedSdcIps = null; + try { + Map dataStoreDetails = primaryDataStoreDao.getDetails(dataStore.getId()); + String url = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_ENDPOINT); + String encryptedUsername = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_USERNAME); + final String username = DBEncryptionUtil.decrypt(encryptedUsername); + String encryptedPassword = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_PASSWORD); + final String password = DBEncryptionUtil.decrypt(encryptedPassword); + final int clientTimeout = StorageManager.STORAGE_POOL_CLIENT_TIMEOUT.value(); + ScaleIOGatewayClient client = ScaleIOGatewayClient.getClient(url, username, password, false, clientTimeout); + connectedSdcIps = client.listConnectedSdcIps(); + } catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) { + LOGGER.error("Failed to create storage pool", e); + throw new CloudRuntimeException("Failed to establish connection with PowerFlex Gateway to create storage pool"); + } + + if (connectedSdcIps == null || connectedSdcIps.isEmpty()) { + LOGGER.debug("No connected SDCs found for the PowerFlex storage pool"); + throw new CloudRuntimeException("Failed to create storage pool as connected SDCs not found"); + } + + LOGGER.debug("Attaching the pool to each of the hosts in the zone: " + scope.getScopeId()); + List hosts = resourceManager.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId()); + List poolHosts = new ArrayList(); + for (HostVO host : hosts) { + try { + if (connectedSdcIps.contains(host.getPrivateIpAddress())) { + storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + poolHosts.add(host); + } + } catch (Exception e) { + LOGGER.warn("Unable to establish a connection between " + host + " and " + dataStore, e); + } + } + if (poolHosts.isEmpty()) { + LOGGER.warn("No host can access storage pool " + dataStore + " in this zone."); + primaryDataStoreDao.expunge(dataStore.getId()); + throw new CloudRuntimeException("Failed to create storage pool as it is not accessible to hosts."); + } + + dataStoreHelper.attachZone(dataStore); + return true; + } + + @Override + public boolean maintain(DataStore store) { + storagePoolAutomation.maintain(store); + dataStoreHelper.maintain(store); + return true; + } + + @Override + public boolean cancelMaintain(DataStore store) { + dataStoreHelper.cancelMaintain(store); + storagePoolAutomation.cancelMaintain(store); + return true; + } + + @Override + public void enableStoragePool(DataStore dataStore) { + dataStoreHelper.enable(dataStore); + } + + @Override + public void disableStoragePool(DataStore dataStore) { + dataStoreHelper.disable(dataStore); + } + + @Override + public boolean deleteDataStore(DataStore dataStore) { + StoragePool storagePool = (StoragePool)dataStore; + StoragePoolVO storagePoolVO = primaryDataStoreDao.findById(storagePool.getId()); + if (storagePoolVO == null) { + return false; + } + + List unusedTemplatesInPool = templateMgr.getUnusedTemplatesInPool(storagePoolVO); + for (VMTemplateStoragePoolVO templatePoolVO : unusedTemplatesInPool) { + if (templatePoolVO.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED) { + templateMgr.evictTemplateFromStoragePool(templatePoolVO); + } + } + + List poolHostVOs = storagePoolHostDao.listByPoolId(dataStore.getId()); + for (StoragePoolHostVO poolHostVO : poolHostVOs) { + DeleteStoragePoolCommand deleteStoragePoolCommand = new DeleteStoragePoolCommand(storagePool); + final Answer answer = agentMgr.easySend(poolHostVO.getHostId(), deleteStoragePoolCommand); + if (answer != null && answer.getResult()) { + LOGGER.info("Successfully deleted storage pool: " + storagePool.getId() + " from host: " + poolHostVO.getHostId()); + } else { + if (answer != null) { + LOGGER.error("Failed to delete storage pool: " + storagePool.getId() + " from host: " + poolHostVO.getHostId() + " , result: " + answer.getResult()); + } else { + LOGGER.error("Failed to delete storage pool: " + storagePool.getId() + " from host: " + poolHostVO.getHostId()); + } + } + } + + return dataStoreHelper.deletePrimaryDataStore(dataStore); + } + + @Override + public boolean migrateToObjectStore(DataStore store) { + return false; + } + + @Override + public void updateStoragePool(StoragePool storagePool, Map details) { + String capacityBytes = details.get(PrimaryDataStoreLifeCycle.CAPACITY_BYTES); + StoragePoolVO storagePoolVO = primaryDataStoreDao.findById(storagePool.getId()); + + try { + if (capacityBytes == null || capacityBytes.isBlank()) { + return; + } + + long usedBytes = capacityMgr.getUsedBytes(storagePoolVO); + if (Long.parseLong(capacityBytes) < usedBytes) { + throw new CloudRuntimeException("Cannot reduce the number of bytes for this storage pool as it would lead to an insufficient number of bytes"); + } + + primaryDataStoreDao.updateCapacityBytes(storagePool.getId(), Long.parseLong(capacityBytes)); + LOGGER.info("Storage pool successfully updated"); + } catch (Throwable e) { + throw new CloudRuntimeException("Failed to update the storage pool" + e); + } + } + + private Hypervisor.HypervisorType getHypervisorTypeForCluster(long clusterId) { + ClusterVO cluster = clusterDao.findById(clusterId); + if (cluster == null) { + throw new CloudRuntimeException("Unable to locate the specified cluster: " + clusterId); + } + + return cluster.getHypervisorType(); + } + + private static boolean isSupportedHypervisorType(Hypervisor.HypervisorType hypervisorType) { + return Hypervisor.HypervisorType.KVM.equals(hypervisorType); + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java new file mode 100644 index 000000000000..e27f8bd2608a --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.provider; + +import java.net.URISyntaxException; +import java.security.KeyManagementException; +import java.security.NoSuchAlgorithmException; +import java.util.Map; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.log4j.Logger; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.ModifyStoragePoolAnswer; +import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.alert.AlertManager; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.utils.crypt.DBEncryptionUtil; +import com.cloud.utils.exception.CloudRuntimeException; + +public class ScaleIOHostListener implements HypervisorHostListener { + private static final Logger s_logger = Logger.getLogger(ScaleIOHostListener.class); + + @Inject private AgentManager _agentMgr; + @Inject private AlertManager _alertMgr; + @Inject private DataStoreManager _dataStoreMgr; + @Inject private HostDao _hostDao; + @Inject private StoragePoolHostDao _storagePoolHostDao; + @Inject private PrimaryDataStoreDao _primaryDataStoreDao; + + @Override + public boolean hostAdded(long hostId) { + return true; + } + + @Override + public boolean hostConnect(long hostId, long poolId) { + HostVO host = _hostDao.findById(hostId); + if (host == null) { + s_logger.error("Failed to add host by HostListener as host was not found with id : " + hostId); + return false; + } + + if (!isHostSdcConnected(host.getPrivateIpAddress(), poolId)) { + s_logger.warn("SDC not connected on the host: " + hostId); + String msg = "SDC not connected on the host: " + hostId + ", reconnect the SDC to MDM and restart agent"; + _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC disconnected on host: " + host.getUuid(), msg); + return false; + } + + StoragePoolHostVO storagePoolHost = _storagePoolHostDao.findByPoolHost(poolId, hostId); + if (storagePoolHost == null) { + storagePoolHost = new StoragePoolHostVO(poolId, hostId, ""); + _storagePoolHostDao.persist(storagePoolHost); + } + + StoragePool storagePool = (StoragePool)_dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); + ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool); + sendModifyStoragePoolCommand(cmd, storagePool, hostId); + return true; + } + + private boolean isHostSdcConnected(String hostIpAddress, long poolId) { + try { + Map dataStoreDetails = _primaryDataStoreDao.getDetails(poolId); + final String url = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_ENDPOINT); + final String encryptedUsername = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_USERNAME); + final String username = DBEncryptionUtil.decrypt(encryptedUsername); + final String encryptedPassword = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_PASSWORD); + final String password = DBEncryptionUtil.decrypt(encryptedPassword); + final int clientTimeout = StorageManager.STORAGE_POOL_CLIENT_TIMEOUT.valueIn(poolId); + ScaleIOGatewayClient client = ScaleIOGatewayClient.getClient(url, username, password, false, clientTimeout); + return client.isSdcConnected(hostIpAddress); + } catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) { + s_logger.error("Failed to check host sdc connection", e); + throw new CloudRuntimeException("Failed to establish connection with PowerFlex Gateway to check host sdc connection"); + } + } + + private void sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StoragePool storagePool, long hostId) { + Answer answer = _agentMgr.easySend(hostId, cmd); + + if (answer == null) { + throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command (" + storagePool.getId() + ")"); + } + + if (!answer.getResult()) { + String msg = "Unable to attach storage pool " + storagePool.getId() + " to host " + hostId; + + _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg); + + throw new CloudRuntimeException("Unable to establish a connection from agent to storage pool " + storagePool.getId() + " due to " + answer.getDetails() + + " (" + storagePool.getId() + ")"); + } + + assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer expected ; Pool = " + storagePool.getId() + " Host = " + hostId; + + s_logger.info("Connection established between storage pool " + storagePool + " and host: " + hostId); + } + + @Override + public boolean hostDisconnected(long hostId, long poolId) { + StoragePoolHostVO storagePoolHost = _storagePoolHostDao.findByPoolHost(poolId, hostId); + if (storagePoolHost != null) { + _storagePoolHostDao.deleteStoragePoolHostDetails(hostId, poolId); + } + + return true; + } + + @Override + public boolean hostAboutToBeRemoved(long hostId) { + return true; + } + + @Override + public boolean hostRemoved(long hostId, long clusterId) { + return true; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOPrimaryDatastoreProvider.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOPrimaryDatastoreProvider.java new file mode 100644 index 000000000000..0cc82c0d9f1c --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOPrimaryDatastoreProvider.java @@ -0,0 +1,77 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.provider; + +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; +import org.apache.cloudstack.storage.datastore.driver.ScaleIOPrimaryDataStoreDriver; +import org.apache.cloudstack.storage.datastore.lifecycle.ScaleIOPrimaryDataStoreLifeCycle; +import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; +import org.apache.log4j.Logger; + +import com.cloud.utils.component.ComponentContext; + +public class ScaleIOPrimaryDatastoreProvider implements PrimaryDataStoreProvider { + private static final Logger LOGGER = Logger.getLogger(ScaleIOPrimaryDatastoreProvider.class); + + private DataStoreLifeCycle lifeCycle; + private PrimaryDataStoreDriver driver; + private HypervisorHostListener listener; + + @Override + public DataStoreLifeCycle getDataStoreLifeCycle() { + return lifeCycle; + } + + @Override + public DataStoreDriver getDataStoreDriver() { + return driver; + } + + @Override + public HypervisorHostListener getHostListener() { + return listener; + } + + @Override + public String getName() { + return ScaleIOUtil.PROVIDER_NAME; + } + + @Override + public boolean configure(Map params) { + lifeCycle = ComponentContext.inject(ScaleIOPrimaryDataStoreLifeCycle.class); + driver = ComponentContext.inject(ScaleIOPrimaryDataStoreDriver.class); + listener = ComponentContext.inject(ScaleIOHostListener.class); + + return true; + } + + @Override + public Set getTypes() { + Set types = new HashSet(); + types.add(DataStoreProviderType.PRIMARY); + return types; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/util/ScaleIOUtil.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/util/ScaleIOUtil.java new file mode 100644 index 000000000000..d28d72c51ca3 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/util/ScaleIOUtil.java @@ -0,0 +1,98 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.util; + +import org.apache.log4j.Logger; + +import com.cloud.utils.script.Script; + +public class ScaleIOUtil { + private static final Logger LOGGER = Logger.getLogger(ScaleIOUtil.class); + + public static final String PROVIDER_NAME = "PowerFlex"; + + // Use prefix for CloudStack resources + public static final String VOLUME_PREFIX = "vol"; + public static final String TEMPLATE_PREFIX = "tmpl"; + public static final String SNAPSHOT_PREFIX = "snap"; + public static final String VMSNAPSHOT_PREFIX = "vmsnap"; + + public static final int IDENTIFIER_LENGTH = 16; + public static final Long MINIMUM_ALLOWED_IOPS_LIMIT = Long.valueOf(10); + + public static final String DISK_PATH = "/dev/disk/by-id"; + public static final String DISK_NAME_PREFIX = "emc-vol-"; + public static final String DISK_NAME_PREFIX_FILTER = DISK_NAME_PREFIX + "*-"; + + private static final String AGENT_PROPERTIES_FILE = "/etc/cloudstack/agent/agent.properties"; + + private static final String DEFAULT_SDC_HOME_PATH = "/opt/emc/scaleio/sdc"; + private static final String SDC_HOME_PARAMETER = "powerflex.sdc.home.dir"; + private static final String SDC_HOME_PATH = getSdcHomePath(); + + private static final String RESCAN_CMD = "drv_cfg --rescan"; + private static final String QUERY_VOLUMES_CMD = "drv_cfg --query_vols"; + // Sample output for cmd: drv_cfg --query_vols: + // Retrieved 2 volume(s) + // VOL-ID 6c33633100000009 MDM-ID 218ce1797566a00f + // VOL-ID 6c3362a30000000a MDM-ID 218ce1797566a00f + + public static String getSdcHomePath() { + String sdcHomePath = DEFAULT_SDC_HOME_PATH; + String sdcHomePropertyCmdFormat = "sed -n '/%s/p' '%s' 2>/dev/null | sed 's/%s=//g' 2>/dev/null"; + String sdcHomeCmd = String.format(sdcHomePropertyCmdFormat, SDC_HOME_PARAMETER, AGENT_PROPERTIES_FILE, SDC_HOME_PARAMETER); + + String result = Script.runSimpleBashScript(sdcHomeCmd); + if (result == null) { + LOGGER.warn("Failed to get sdc home path from agent.properties, fallback to default path"); + } else { + sdcHomePath = result; + } + + return sdcHomePath; + } + + public static final void rescanForNewVolumes() { + // Detecting new volumes + String rescanCmd = ScaleIOUtil.SDC_HOME_PATH + "/bin/" + ScaleIOUtil.RESCAN_CMD; + + String result = Script.runSimpleBashScript(rescanCmd); + if (result == null) { + LOGGER.warn("Failed to rescan for new volumes"); + } + } + + public static final String getSystemIdForVolume(String volumeId) { + //query_vols outputs "VOL-ID MDM-ID " for a volume with ID: + String queryDiskCmd = SDC_HOME_PATH + "/bin/" + ScaleIOUtil.QUERY_VOLUMES_CMD; + queryDiskCmd += "|grep " + volumeId + "|awk '{print $4}'"; + + String result = Script.runSimpleBashScript(queryDiskCmd); + if (result == null) { + LOGGER.warn("Query volumes failed to get volume: " + volumeId + " details for system id"); + return null; + } + + if (result.isEmpty()) { + LOGGER.warn("Query volumes doesn't list volume: " + volumeId + ", probably volume is not mapped yet, or sdc not connected"); + return null; + } + + return result; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/resources/META-INF/cloudstack/storage-volume-scaleio/module.properties b/plugins/storage/volume/scaleio/src/main/resources/META-INF/cloudstack/storage-volume-scaleio/module.properties new file mode 100755 index 000000000000..5bf9aa0172e2 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/resources/META-INF/cloudstack/storage-volume-scaleio/module.properties @@ -0,0 +1,21 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +name=storage-volume-scaleio +parent=storage diff --git a/plugins/storage/volume/scaleio/src/main/resources/META-INF/cloudstack/storage-volume-scaleio/spring-storage-volume-scaleio-context.xml b/plugins/storage/volume/scaleio/src/main/resources/META-INF/cloudstack/storage-volume-scaleio/spring-storage-volume-scaleio-context.xml new file mode 100755 index 000000000000..8b86e212e299 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/resources/META-INF/cloudstack/storage-volume-scaleio/spring-storage-volume-scaleio-context.xml @@ -0,0 +1,35 @@ + + + + + + diff --git a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImplTest.java b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImplTest.java new file mode 100644 index 000000000000..10823102cf8d --- /dev/null +++ b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImplTest.java @@ -0,0 +1,48 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.storage.datastore.client; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.utils.exception.CloudRuntimeException; + +@RunWith(MockitoJUnitRunner.class) +public class ScaleIOGatewayClientImplTest { + + ScaleIOGatewayClientImpl client; + + @Before + public void setUp() throws Exception { + } + + @After + public void tearDown() throws Exception { + } + + @Test(expected = CloudRuntimeException.class) + public void testClient() throws Exception { + client = (ScaleIOGatewayClientImpl) ScaleIOGatewayClient.getClient("https://10.2.3.149/api", + "admin", "P@ssword123", false, 60); + } +} \ No newline at end of file diff --git a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java new file mode 100644 index 000000000000..c62371f4c246 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java @@ -0,0 +1,259 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.storage.datastore.lifecycle; + +import static com.google.common.truth.Truth.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.mockito.MockitoAnnotations.initMocks; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientImpl; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.provider.ScaleIOHostListener; +import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; +import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.ModifyStoragePoolAnswer; +import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.resource.ResourceManager; +import com.cloud.resource.ResourceState; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StorageManagerImpl; +import com.cloud.storage.StoragePoolAutomation; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.template.TemplateManager; +import com.cloud.utils.crypt.DBEncryptionUtil; +import com.cloud.utils.exception.CloudRuntimeException; + +@PrepareForTest(ScaleIOGatewayClient.class) +@RunWith(PowerMockRunner.class) +public class ScaleIOPrimaryDataStoreLifeCycleTest { + + @Mock + private PrimaryDataStoreDao primaryDataStoreDao; + @Mock + private PrimaryDataStoreHelper dataStoreHelper; + @Mock + private ResourceManager resourceManager; + @Mock + private StoragePoolAutomation storagePoolAutomation; + @Mock + private HostDao hostDao; + @Mock + private StoragePoolHostDao storagePoolHostDao; + @Mock + private DataStoreProviderManager dataStoreProviderMgr; + @Mock + private DataStoreProvider dataStoreProvider; + @Mock + private DataStoreManager dataStoreMgr; + @Mock + private PrimaryDataStore store; + @Mock + private TemplateManager templateMgr; + @Mock + private AgentManager agentMgr; + @Mock + ModifyStoragePoolAnswer answer; + + @Spy + @InjectMocks + private StorageManager storageMgr = new StorageManagerImpl(); + + @Spy + @InjectMocks + private HypervisorHostListener hostListener = new ScaleIOHostListener(); + + @InjectMocks + private ScaleIOPrimaryDataStoreLifeCycle scaleIOPrimaryDataStoreLifeCycleTest; + + @Before + public void setUp() { + initMocks(this); + } + + @Test + public void testAttachZone() throws Exception { + final DataStore dataStore = mock(DataStore.class); + when(dataStore.getId()).thenReturn(1L); + + Map mockDataStoreDetails = new HashMap<>(); + mockDataStoreDetails.put(ScaleIOGatewayClient.GATEWAY_API_ENDPOINT, "https://192.168.1.19/api"); + String encryptedUsername = DBEncryptionUtil.encrypt("root"); + mockDataStoreDetails.put(ScaleIOGatewayClient.GATEWAY_API_USERNAME, encryptedUsername); + String encryptedPassword = DBEncryptionUtil.encrypt("Password@123"); + mockDataStoreDetails.put(ScaleIOGatewayClient.GATEWAY_API_PASSWORD, encryptedPassword); + when(primaryDataStoreDao.getDetails(1L)).thenReturn(mockDataStoreDetails); + + PowerMockito.mockStatic(ScaleIOGatewayClient.class); + ScaleIOGatewayClientImpl client = mock(ScaleIOGatewayClientImpl.class); + String username = DBEncryptionUtil.decrypt(encryptedUsername); + String password = DBEncryptionUtil.decrypt(encryptedPassword); + when(ScaleIOGatewayClient.getClient("https://192.168.1.19/api", username, password, false, 60)).thenReturn(client); + + List connectedSdcIps = new ArrayList<>(); + connectedSdcIps.add("192.168.1.1"); + connectedSdcIps.add("192.168.1.2"); + when(client.listConnectedSdcIps()).thenReturn(connectedSdcIps); + when(client.isSdcConnected(anyString())).thenReturn(true); + + final ZoneScope scope = new ZoneScope(1L); + + List hostList = new ArrayList(); + HostVO host1 = new HostVO(1L, "host01", Host.Type.Routing, "192.168.1.1", "255.255.255.0", null, null, null, null, null, null, null, null, null, null, + UUID.randomUUID().toString(), Status.Up, "1.0", null, null, 1L, null, 0, 0, "aa", 0, Storage.StoragePoolType.PowerFlex); + HostVO host2 = new HostVO(2L, "host02", Host.Type.Routing, "192.168.1.2", "255.255.255.0", null, null, null, null, null, null, null, null, null, null, + UUID.randomUUID().toString(), Status.Up, "1.0", null, null, 1L, null, 0, 0, "aa", 0, Storage.StoragePoolType.PowerFlex); + + host1.setResourceState(ResourceState.Enabled); + host2.setResourceState(ResourceState.Enabled); + hostList.add(host1); + hostList.add(host2); + when(resourceManager.listAllUpAndEnabledHostsInOneZoneByHypervisor(Hypervisor.HypervisorType.KVM, 1L)).thenReturn(hostList); + + when(dataStoreMgr.getDataStore(anyLong(), eq(DataStoreRole.Primary))).thenReturn(store); + when(store.getId()).thenReturn(1L); + when(store.getPoolType()).thenReturn(Storage.StoragePoolType.PowerFlex); + when(store.isShared()).thenReturn(true); + when(store.getName()).thenReturn("ScaleIOPool"); + when(store.getStorageProviderName()).thenReturn(ScaleIOUtil.PROVIDER_NAME); + + when(dataStoreProviderMgr.getDataStoreProvider(ScaleIOUtil.PROVIDER_NAME)).thenReturn(dataStoreProvider); + when(dataStoreProvider.getName()).thenReturn(ScaleIOUtil.PROVIDER_NAME); + storageMgr.registerHostListener(ScaleIOUtil.PROVIDER_NAME, hostListener); + + when(agentMgr.easySend(anyLong(), Mockito.any(ModifyStoragePoolCommand.class))).thenReturn(answer); + when(answer.getResult()).thenReturn(true); + + when(storagePoolHostDao.findByPoolHost(anyLong(), anyLong())).thenReturn(null); + + when(hostDao.findById(1L)).thenReturn(host1); + when(hostDao.findById(2L)).thenReturn(host2); + + when(dataStoreHelper.attachZone(Mockito.any(DataStore.class))).thenReturn(null); + + scaleIOPrimaryDataStoreLifeCycleTest.attachZone(dataStore, scope, Hypervisor.HypervisorType.KVM); + verify(storageMgr,times(2)).connectHostToSharedPool(Mockito.any(Long.class), Mockito.any(Long.class)); + verify(storagePoolHostDao,times(2)).persist(Mockito.any(StoragePoolHostVO.class)); + } + + @Test(expected = CloudRuntimeException.class) + public void testAttachZone_UnsupportedHypervisor() throws Exception { + final DataStore dataStore = mock(DataStore.class); + final ZoneScope scope = new ZoneScope(1L); + scaleIOPrimaryDataStoreLifeCycleTest.attachZone(dataStore, scope, Hypervisor.HypervisorType.VMware); + } + + @Test + public void testMaintain() { + final DataStore store = mock(DataStore.class); + when(storagePoolAutomation.maintain(any(DataStore.class))).thenReturn(true); + when(dataStoreHelper.maintain(any(DataStore.class))).thenReturn(true); + final boolean result = scaleIOPrimaryDataStoreLifeCycleTest.maintain(store); + assertThat(result).isTrue(); + } + + @Test + public void testCancelMaintain() { + final DataStore store = mock(DataStore.class); + when(dataStoreHelper.cancelMaintain(any(DataStore.class))).thenReturn(true); + when(storagePoolAutomation.cancelMaintain(any(DataStore.class))).thenReturn(true); + final boolean result = scaleIOPrimaryDataStoreLifeCycleTest.cancelMaintain(store); + assertThat(result).isTrue(); + } + + @Test + public void testEnableStoragePool() { + final DataStore dataStore = mock(DataStore.class); + when(dataStoreHelper.enable(any(DataStore.class))).thenReturn(true); + scaleIOPrimaryDataStoreLifeCycleTest.enableStoragePool(dataStore); + } + + @Test + public void testDisableStoragePool() { + final DataStore dataStore = mock(DataStore.class); + when(dataStoreHelper.disable(any(DataStore.class))).thenReturn(true); + scaleIOPrimaryDataStoreLifeCycleTest.disableStoragePool(dataStore); + } + + @Test + public void testDeleteDataStoreWithStoragePoolNull() { + final PrimaryDataStore store = mock(PrimaryDataStore.class); + when(primaryDataStoreDao.findById(anyLong())).thenReturn(null); + when(dataStoreHelper.deletePrimaryDataStore(any(DataStore.class))).thenReturn(true); + final boolean result = scaleIOPrimaryDataStoreLifeCycleTest.deleteDataStore(store); + assertThat(result).isFalse(); + } + + @Test + public void testDeleteDataStore() { + final PrimaryDataStore store = mock(PrimaryDataStore.class); + final StoragePoolVO storagePoolVO = mock(StoragePoolVO.class); + when(primaryDataStoreDao.findById(anyLong())).thenReturn(storagePoolVO); + List unusedTemplates = new ArrayList<>(); + when(templateMgr.getUnusedTemplatesInPool(storagePoolVO)).thenReturn(unusedTemplates); + List poolHostVOs = new ArrayList<>(); + when(storagePoolHostDao.listByPoolId(anyLong())).thenReturn(poolHostVOs); + when(dataStoreHelper.deletePrimaryDataStore(any(DataStore.class))).thenReturn(true); + final boolean result = scaleIOPrimaryDataStoreLifeCycleTest.deleteDataStore(store); + assertThat(result).isTrue(); + } +} diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java index 0651f2ea8560..f73a8fdbe0fb 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java @@ -82,6 +82,7 @@ import com.cloud.user.AccountDetailsDao; import com.cloud.user.AccountVO; import com.cloud.user.dao.AccountDao; +import com.cloud.utils.Pair; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.exception.CloudRuntimeException; import com.google.common.base.Preconditions; @@ -830,6 +831,11 @@ public void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCa throw new UnsupportedOperationException(); } + @Override + public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback) { + throw new UnsupportedOperationException(); + } + @Override public boolean canCopy(DataObject srcData, DataObject destData) { return false; @@ -1588,4 +1594,29 @@ private List getNonDestroyedSnapshots(long csVolumeId) { return lstSnapshots2; } + + @Override + public boolean canProvideStorageStats() { + return false; + } + + @Override + public Pair getStorageStats(StoragePool storagePool) { + return null; + } + + @Override + public boolean canProvideVolumeStats() { + return false; + } + + @Override + public Pair getVolumeStats(StoragePool storagePool, String volumeId) { + return null; + } + + @Override + public boolean canHostAccessStoragePool(Host host, StoragePool pool) { + return true; + } } diff --git a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java index 75b8cc7f3b4e..d48f2a362a4e 100644 --- a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java +++ b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java @@ -769,7 +769,8 @@ public void sendAlert(AlertType alertType, long dataCenterId, Long podId, Long c (alertType != AlertManager.AlertType.ALERT_TYPE_UPLOAD_FAILED) && (alertType != AlertManager.AlertType.ALERT_TYPE_OOBM_AUTH_ERROR) && (alertType != AlertManager.AlertType.ALERT_TYPE_HA_ACTION) && - (alertType != AlertManager.AlertType.ALERT_TYPE_CA_CERT)) { + (alertType != AlertManager.AlertType.ALERT_TYPE_CA_CERT) && + (alertType != AlertManager.AlertType.ALERT_TYPE_VM_SNAPSHOT)) { alert = _alertDao.getLastAlert(alertType.getType(), dataCenterId, podId, clusterId); } diff --git a/server/src/main/java/com/cloud/api/ApiDBUtils.java b/server/src/main/java/com/cloud/api/ApiDBUtils.java index 9fade85cf5b8..0d396c6b598f 100644 --- a/server/src/main/java/com/cloud/api/ApiDBUtils.java +++ b/server/src/main/java/com/cloud/api/ApiDBUtils.java @@ -1209,7 +1209,7 @@ public static HypervisorType getHypervisorTypeFromFormat(long dcId, ImageFormat type = HypervisorType.Hyperv; } } if (format == ImageFormat.RAW) { - // Currently, KVM only suppoorts RBD images of type RAW. + // Currently, KVM only supports RBD and PowerFlex images of type RAW. // This results in a weird collision with OVM volumes which // can only be raw, thus making KVM RBD volumes show up as OVM // rather than RBD. This block of code can (hopefuly) by checking to @@ -1221,7 +1221,7 @@ public static HypervisorType getHypervisorTypeFromFormat(long dcId, ImageFormat ListIterator itr = pools.listIterator(); while(itr.hasNext()) { StoragePoolVO pool = itr.next(); - if(pool.getPoolType() == StoragePoolType.RBD || pool.getPoolType() == StoragePoolType.CLVM) { + if(pool.getPoolType() == StoragePoolType.RBD || pool.getPoolType() == StoragePoolType.PowerFlex || pool.getPoolType() == StoragePoolType.CLVM) { // This case will note the presence of non-qcow2 primary stores, suggesting KVM without NFS. Otherwse, // If this check is not passed, the hypervisor type will remain OVM. type = HypervisorType.KVM; diff --git a/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java b/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java index a4de3663cf94..1067ff2ed472 100644 --- a/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java +++ b/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java @@ -288,16 +288,17 @@ public static List createVolumeResponse(ResponseView view, Volum vrDataList.put(vr.getId(), vrData); VolumeStats vs = null; - if (vr.getFormat() == ImageFormat.VHD || vr.getFormat() == ImageFormat.QCOW2) { - vs = ApiDBUtils.getVolumeStatistics(vrData.getPath()); - } - else if (vr.getFormat() == ImageFormat.OVA) { + if (vr.getFormat() == ImageFormat.VHD || vr.getFormat() == ImageFormat.QCOW2 || vr.getFormat() == ImageFormat.RAW) { + if (vrData.getPath() != null) { + vs = ApiDBUtils.getVolumeStatistics(vrData.getPath()); + } + } else if (vr.getFormat() == ImageFormat.OVA) { if (vrData.getChainInfo() != null) { vs = ApiDBUtils.getVolumeStatistics(vrData.getChainInfo()); } } - if (vs != null){ + if (vs != null) { long vsz = vs.getVirtualSize(); long psz = vs.getPhysicalSize() ; double util = (double)psz/vsz; diff --git a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java index 8e489f89eca0..ff2b66cfc7e2 100644 --- a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java @@ -336,6 +336,11 @@ public UserVmResponse newUserVmResponse(ResponseView view, String objectName, Us userVmResponse.setBootType("Bios"); userVmResponse.setBootMode("legacy"); } + + if (userVm.getPoolType() != null) { + userVmResponse.setPoolType(userVm.getPoolType().toString()); + } + // Remove blacklisted settings if user is not admin if (caller.getType() != Account.ACCOUNT_TYPE_ADMIN) { String[] userVmSettingsToHide = QueryService.UserVMBlacklistedDetails.value().split(","); diff --git a/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java b/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java index fba24e0a8c7d..543c9772d7c6 100644 --- a/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java +++ b/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java @@ -585,9 +585,19 @@ public long getAllocatedPoolCapacity(StoragePoolVO pool, VMTemplateVO templateFo // if the storage pool is managed, the used bytes can be larger than the sum of the sizes of all of the non-destroyed volumes // in this case, call getUsedBytes(StoragePoolVO) if (pool.isManaged()) { - return getUsedBytes(pool); - } - else { + totalAllocatedSize = getUsedBytes(pool); + + if (templateForVmCreation != null) { + VMTemplateStoragePoolVO templatePoolVO = _templatePoolDao.findByPoolTemplate(pool.getId(), templateForVmCreation.getId(), null); + if (templatePoolVO == null) { + // template is not installed in the pool, consider the template size for allocation + long templateForVmCreationSize = templateForVmCreation.getSize() != null ? templateForVmCreation.getSize() : 0; + totalAllocatedSize += templateForVmCreationSize; + } + } + + return totalAllocatedSize; + } else { // Get size for all the non-destroyed volumes. Pair sizes = _volumeDao.getNonDestroyedCountAndTotalByPool(pool.getId()); diff --git a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java index 0ad2dbf7dc8d..f5de35af3ed2 100755 --- a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java @@ -209,6 +209,7 @@ import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.Storage.ProvisioningType; import com.cloud.storage.StorageManager; +import com.cloud.storage.Volume; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.VMTemplateZoneDao; import com.cloud.storage.dao.VolumeDao; @@ -2600,6 +2601,10 @@ protected ServiceOfferingVO createServiceOffering(final long userId, final boole continue; } } + if (detailEntry.getKey().equalsIgnoreCase(Volume.BANDWIDTH_LIMIT_IN_MBPS) || detailEntry.getKey().equalsIgnoreCase(Volume.IOPS_LIMIT)) { + // Add in disk offering details + continue; + } detailsVO.add(new ServiceOfferingDetailsVO(offering.getId(), detailEntry.getKey(), detailEntryValue, true)); } } @@ -2623,6 +2628,21 @@ protected ServiceOfferingVO createServiceOffering(final long userId, final boole } _serviceOfferingDetailsDao.saveDetails(detailsVO); } + + if (details != null && !details.isEmpty()) { + List diskDetailsVO = new ArrayList(); + // Support disk offering details for below parameters + if (details.containsKey(Volume.BANDWIDTH_LIMIT_IN_MBPS)) { + diskDetailsVO.add(new DiskOfferingDetailVO(offering.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS, details.get(Volume.BANDWIDTH_LIMIT_IN_MBPS), false)); + } + if (details.containsKey(Volume.IOPS_LIMIT)) { + diskDetailsVO.add(new DiskOfferingDetailVO(offering.getId(), Volume.IOPS_LIMIT, details.get(Volume.IOPS_LIMIT), false)); + } + if (!diskDetailsVO.isEmpty()) { + diskOfferingDetailsDao.saveDetails(diskDetailsVO); + } + } + CallContext.current().setEventDetails("Service offering id=" + offering.getId()); return offering; } else { @@ -2868,7 +2888,7 @@ protected DiskOfferingVO createDiskOffering(final Long userId, final List Long bytesWriteRate, Long bytesWriteRateMax, Long bytesWriteRateMaxLength, Long iopsReadRate, Long iopsReadRateMax, Long iopsReadRateMaxLength, Long iopsWriteRate, Long iopsWriteRateMax, Long iopsWriteRateMaxLength, - final Integer hypervisorSnapshotReserve, String cacheMode, final Long storagePolicyID) { + final Integer hypervisorSnapshotReserve, String cacheMode, final Map details, final Long storagePolicyID) { long diskSize = 0;// special case for custom disk offerings if (numGibibytes != null && numGibibytes <= 0) { throw new InvalidParameterValueException("Please specify a disk size of at least 1 Gb."); @@ -2963,6 +2983,15 @@ protected DiskOfferingVO createDiskOffering(final Long userId, final List detailsVO.add(new DiskOfferingDetailVO(offering.getId(), ApiConstants.ZONE_ID, String.valueOf(zoneId), false)); } } + if (details != null && !details.isEmpty()) { + // Support disk offering details for below parameters + if (details.containsKey(Volume.BANDWIDTH_LIMIT_IN_MBPS)) { + detailsVO.add(new DiskOfferingDetailVO(offering.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS, details.get(Volume.BANDWIDTH_LIMIT_IN_MBPS), false)); + } + if (details.containsKey(Volume.IOPS_LIMIT)) { + detailsVO.add(new DiskOfferingDetailVO(offering.getId(), Volume.IOPS_LIMIT, details.get(Volume.IOPS_LIMIT), false)); + } + } if (storagePolicyID != null) { detailsVO.add(new DiskOfferingDetailVO(offering.getId(), ApiConstants.STORAGE_POLICY, String.valueOf(storagePolicyID), false)); } @@ -2987,6 +3016,7 @@ public DiskOffering createDiskOffering(final CreateDiskOfferingCmd cmd) { final String tags = cmd.getTags(); final List domainIds = cmd.getDomainIds(); final List zoneIds = cmd.getZoneIds(); + final Map details = cmd.getDetails(); final Long storagePolicyId = cmd.getStoragePolicy(); // check if valid domain @@ -3063,7 +3093,7 @@ public DiskOffering createDiskOffering(final CreateDiskOfferingCmd cmd) { localStorageRequired, isDisplayOfferingEnabled, isCustomizedIops, minIops, maxIops, bytesReadRate, bytesReadRateMax, bytesReadRateMaxLength, bytesWriteRate, bytesWriteRateMax, bytesWriteRateMaxLength, iopsReadRate, iopsReadRateMax, iopsReadRateMaxLength, iopsWriteRate, iopsWriteRateMax, iopsWriteRateMaxLength, - hypervisorSnapshotReserve, cacheMode, storagePolicyId); + hypervisorSnapshotReserve, cacheMode, details, storagePolicyId); } /** diff --git a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java index 6c00709e1734..fed29aa5a50d 100644 --- a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -30,24 +30,14 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.storage.VMTemplateVO; -import com.cloud.storage.dao.VMTemplateDao; -import com.cloud.utils.StringUtils; -import com.cloud.exception.StorageUnavailableException; -import com.cloud.utils.db.Filter; -import com.cloud.utils.fsm.StateMachine2; - -import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; +import org.apache.cloudstack.affinity.AffinityGroupDomainMapVO; import org.apache.cloudstack.affinity.AffinityGroupProcessor; import org.apache.cloudstack.affinity.AffinityGroupService; import org.apache.cloudstack.affinity.AffinityGroupVMMapVO; import org.apache.cloudstack.affinity.AffinityGroupVO; -import org.apache.cloudstack.affinity.AffinityGroupDomainMapVO; import org.apache.cloudstack.affinity.dao.AffinityGroupDao; -import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; import org.apache.cloudstack.affinity.dao.AffinityGroupDomainMapDao; +import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; import org.apache.cloudstack.engine.cloud.entity.api.db.VMReservationVO; import org.apache.cloudstack.engine.cloud.entity.api.db.dao.VMReservationDao; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -60,6 +50,9 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.utils.identity.ManagementServerNode; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.collections.MapUtils; +import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -91,6 +84,7 @@ import com.cloud.exception.AffinityConflictException; import com.cloud.exception.ConnectionException; import com.cloud.exception.InsufficientServerCapacityException; +import com.cloud.exception.StorageUnavailableException; import com.cloud.gpu.GPU; import com.cloud.host.DetailVO; import com.cloud.host.Host; @@ -113,26 +107,31 @@ import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.GuestOSCategoryDao; import com.cloud.storage.dao.GuestOSDao; import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.user.AccountManager; import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; +import com.cloud.utils.StringUtils; import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; import com.cloud.utils.db.TransactionCallback; import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.StateListener; +import com.cloud.utils.fsm.StateMachine2; import com.cloud.vm.DiskProfile; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; @@ -1435,7 +1434,7 @@ protected boolean hostCanAccessSPool(Host host, StoragePool pool) { boolean hostCanAccessSPool = false; StoragePoolHostVO hostPoolLinkage = _poolHostDao.findByPoolHost(pool.getId(), host.getId()); - if (hostPoolLinkage != null) { + if (hostPoolLinkage != null && _storageMgr.canHostAccessStoragePool(host, pool)) { hostCanAccessSPool = true; } diff --git a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java index c320a7a55377..1d3f34ee1581 100644 --- a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java +++ b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java @@ -249,6 +249,7 @@ protected VirtualMachineTO toVirtualMachineTO(VirtualMachineProfile vmProfile) { to.setConfigDriveLabel(vmProfile.getConfigDriveLabel()); to.setConfigDriveIsoRootFolder(vmProfile.getConfigDriveIsoRootFolder()); to.setConfigDriveIsoFile(vmProfile.getConfigDriveIsoFile()); + to.setConfigDriveLocation(vmProfile.getConfigDriveLocation()); to.setState(vm.getState()); return to; diff --git a/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java b/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java index eaa5fea183b4..e63a992ec4b7 100644 --- a/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java +++ b/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java @@ -31,6 +31,7 @@ import org.apache.cloudstack.agent.lb.IndirectAgentLB; import org.apache.cloudstack.ca.CAManager; import org.apache.cloudstack.ca.SetupCertificateCommand; +import org.apache.cloudstack.direct.download.DirectDownloadManager; import org.apache.cloudstack.framework.ca.Certificate; import org.apache.cloudstack.utils.security.KeyStoreUtils; import org.apache.log4j.Logger; @@ -53,6 +54,7 @@ import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.network.PhysicalNetworkSetupInfo; @@ -80,7 +82,11 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements @Inject private CAManager caManager; @Inject + DirectDownloadManager directDownloadManager; + @Inject private IndirectAgentLB indirectAgentLB; + @Inject + private HostDao hostDao; @Override public abstract Hypervisor.HypervisorType getHypervisorType(); @@ -105,6 +111,10 @@ public AgentControlAnswer processControlCommand(long agentId, AgentControlComman @Override public void processHostAdded(long hostId) { + HostVO host = hostDao.findById(hostId); + if (host != null) { + directDownloadManager.syncCertificatesToHost(hostId, host.getDataCenterId()); + } } @Override @@ -405,6 +415,7 @@ public boolean configure(String name, Map params) throws Configu _kvmGuestNic = _kvmPrivateNic; } + agentMgr.registerForHostEvents(this, true, false, false); _resourceMgr.registerResourceStateAdapter(this.getClass().getSimpleName(), this); return true; } diff --git a/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java b/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java index 7482eca27a89..60e4deca217e 100644 --- a/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java +++ b/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java @@ -36,7 +36,7 @@ import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; -import com.cloud.agent.api.Answer; +import com.cloud.agent.api.HandleConfigDriveIsoAnswer; import com.cloud.agent.api.HandleConfigDriveIsoCommand; import com.cloud.agent.api.to.DiskTO; import com.cloud.configuration.ConfigurationManager; @@ -338,7 +338,16 @@ public boolean prepareMigration(NicProfile nic, Network network, VirtualMachineP if (_networkModel.getUserDataUpdateProvider(network).getProvider().equals(Provider.ConfigDrive)) { LOG.trace(String.format("[prepareMigration] for vm: %s", vm.getInstanceName())); try { - addPasswordAndUserdata(network, nic, vm, dest, context); + if (isConfigDriveIsoOnHostCache(vm.getId())) { + vm.setConfigDriveLocation(Location.HOST); + configureConfigDriveData(vm, nic, dest); + + // Create the config drive on dest host cache + createConfigDriveIsoOnHostCache(vm, dest.getHost().getId()); + } else { + vm.setConfigDriveLocation(getConfigDriveLocation(vm.getId())); + addPasswordAndUserdata(network, nic, vm, dest, context); + } } catch (InsufficientCapacityException | ResourceUnavailableException e) { LOG.error("Failed to add config disk drive due to: ", e); return false; @@ -349,10 +358,28 @@ public boolean prepareMigration(NicProfile nic, Network network, VirtualMachineP @Override public void rollbackMigration(NicProfile nic, Network network, VirtualMachineProfile vm, ReservationContext src, ReservationContext dst) { + try { + if (isConfigDriveIsoOnHostCache(vm.getId())) { + vm.setConfigDriveLocation(Location.HOST); + // Delete the config drive on dest host cache + deleteConfigDriveIsoOnHostCache(vm.getVirtualMachine(), vm.getHostId()); + } + } catch (ConcurrentOperationException | ResourceUnavailableException e) { + LOG.error("rollbackMigration failed.", e); + } } @Override public void commitMigration(NicProfile nic, Network network, VirtualMachineProfile vm, ReservationContext src, ReservationContext dst) { + try { + if (isConfigDriveIsoOnHostCache(vm.getId())) { + vm.setConfigDriveLocation(Location.HOST); + // Delete the config drive on src host cache + deleteConfigDriveIsoOnHostCache(vm.getVirtualMachine(), vm.getHostId()); + } + } catch (ConcurrentOperationException | ResourceUnavailableException e) { + LOG.error("commitMigration failed.", e); + } } private void recreateConfigDriveIso(NicProfile nic, Network network, VirtualMachineProfile vm, DeployDestination dest) throws ResourceUnavailableException { @@ -383,7 +410,8 @@ private boolean isWindows(long guestOSId) { private DataStore findDataStore(VirtualMachineProfile profile, DeployDestination dest) { DataStore dataStore = null; - if (VirtualMachineManager.VmConfigDriveOnPrimaryPool.value()) { + if (VirtualMachineManager.VmConfigDriveOnPrimaryPool.valueIn(dest.getDataCenter().getId()) || + VirtualMachineManager.VmConfigDriveForceHostCacheUse.valueIn(dest.getDataCenter().getId())) { if(MapUtils.isNotEmpty(dest.getStorageForDisks())) { dataStore = getPlannedDataStore(dest, dataStore); } @@ -472,12 +500,86 @@ private Long findAgentId(VirtualMachineProfile profile, DeployDestination dest, } else { agentId = dest.getHost().getId(); } - if (!VirtualMachineManager.VmConfigDriveOnPrimaryPool.value()) { + if (!VirtualMachineManager.VmConfigDriveOnPrimaryPool.valueIn(dest.getDataCenter().getId()) && + !VirtualMachineManager.VmConfigDriveForceHostCacheUse.valueIn(dest.getDataCenter().getId())) { agentId = findAgentIdForImageStore(dataStore); } return agentId; } + private Location getConfigDriveLocation(long vmId) { + final UserVmDetailVO vmDetailConfigDriveLocation = _userVmDetailsDao.findDetail(vmId, VmDetailConstants.CONFIG_DRIVE_LOCATION); + if (vmDetailConfigDriveLocation != null) { + if (Location.HOST.toString().equalsIgnoreCase(vmDetailConfigDriveLocation.getValue())) { + return Location.HOST; + } else if (Location.PRIMARY.toString().equalsIgnoreCase(vmDetailConfigDriveLocation.getValue())) { + return Location.PRIMARY; + } else { + return Location.SECONDARY; + } + } + return Location.SECONDARY; + } + + private boolean isConfigDriveIsoOnHostCache(long vmId) { + final UserVmDetailVO vmDetailConfigDriveLocation = _userVmDetailsDao.findDetail(vmId, VmDetailConstants.CONFIG_DRIVE_LOCATION); + if (vmDetailConfigDriveLocation != null && Location.HOST.toString().equalsIgnoreCase(vmDetailConfigDriveLocation.getValue())) { + return true; + } + return false; + } + + private boolean createConfigDriveIsoOnHostCache(VirtualMachineProfile profile, Long hostId) throws ResourceUnavailableException { + if (hostId == null) { + throw new ResourceUnavailableException("Config drive iso creation failed, dest host not available", + ConfigDriveNetworkElement.class, 0L); + } + + LOG.debug("Creating config drive ISO for vm: " + profile.getInstanceName() + " on host: " + hostId); + + final String isoFileName = ConfigDrive.configIsoFileName(profile.getInstanceName()); + final String isoPath = ConfigDrive.createConfigDrivePath(profile.getInstanceName()); + final String isoData = ConfigDriveBuilder.buildConfigDrive(profile.getVmData(), isoFileName, profile.getConfigDriveLabel()); + final HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(isoPath, isoData, null, false, true, true); + + final HandleConfigDriveIsoAnswer answer = (HandleConfigDriveIsoAnswer) agentManager.easySend(hostId, configDriveIsoCommand); + if (answer == null) { + throw new CloudRuntimeException("Unable to get an answer to handle config drive creation for vm: " + profile.getInstanceName() + " on host: " + hostId); + } + + if (!answer.getResult()) { + throw new ResourceUnavailableException(String.format("Config drive iso creation failed, details: %s", + answer.getDetails()), ConfigDriveNetworkElement.class, 0L); + } + + profile.setConfigDriveLocation(answer.getConfigDriveLocation()); + _userVmDetailsDao.addDetail(profile.getId(), VmDetailConstants.CONFIG_DRIVE_LOCATION, answer.getConfigDriveLocation().toString(), false); + addConfigDriveDisk(profile, null); + return true; + } + + private boolean deleteConfigDriveIsoOnHostCache(final VirtualMachine vm, final Long hostId) throws ResourceUnavailableException { + if (hostId == null) { + throw new ResourceUnavailableException("Config drive iso deletion failed, host not available", + ConfigDriveNetworkElement.class, 0L); + } + + LOG.debug("Deleting config drive ISO for vm: " + vm.getInstanceName() + " on host: " + hostId); + final String isoPath = ConfigDrive.createConfigDrivePath(vm.getInstanceName()); + final HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(isoPath, null, null, false, true, false); + + final HandleConfigDriveIsoAnswer answer = (HandleConfigDriveIsoAnswer) agentManager.easySend(hostId, configDriveIsoCommand); + if (answer == null) { + throw new CloudRuntimeException("Unable to get an answer to handle config drive deletion for vm: " + vm.getInstanceName() + " on host: " + hostId); + } + + if (!answer.getResult()) { + LOG.error("Failed to remove config drive for instance: " + vm.getInstanceName()); + return false; + } + return true; + } + private boolean createConfigDriveIso(VirtualMachineProfile profile, DeployDestination dest, DiskTO disk) throws ResourceUnavailableException { DataStore dataStore = getDatastoreForConfigDriveIso(disk, profile, dest); @@ -492,13 +594,17 @@ private boolean createConfigDriveIso(VirtualMachineProfile profile, DeployDestin final String isoFileName = ConfigDrive.configIsoFileName(profile.getInstanceName()); final String isoPath = ConfigDrive.createConfigDrivePath(profile.getInstanceName()); final String isoData = ConfigDriveBuilder.buildConfigDrive(profile.getVmData(), isoFileName, profile.getConfigDriveLabel()); - final HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(isoPath, isoData, dataStore.getTO(), true); + boolean useHostCacheOnUnsupportedPool = VirtualMachineManager.VmConfigDriveUseHostCacheOnUnsupportedPool.valueIn(dest.getDataCenter().getId()); + boolean preferHostCache = VirtualMachineManager.VmConfigDriveForceHostCacheUse.valueIn(dest.getDataCenter().getId()); + final HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(isoPath, isoData, dataStore.getTO(), useHostCacheOnUnsupportedPool, preferHostCache, true); - final Answer answer = agentManager.easySend(agentId, configDriveIsoCommand); + final HandleConfigDriveIsoAnswer answer = (HandleConfigDriveIsoAnswer) agentManager.easySend(agentId, configDriveIsoCommand); if (!answer.getResult()) { throw new ResourceUnavailableException(String.format("Config drive iso creation failed, details: %s", answer.getDetails()), ConfigDriveNetworkElement.class, 0L); } + profile.setConfigDriveLocation(answer.getConfigDriveLocation()); + _userVmDetailsDao.addDetail(profile.getId(), VmDetailConstants.CONFIG_DRIVE_LOCATION, answer.getConfigDriveLocation().toString(), false); addConfigDriveDisk(profile, dataStore); return true; } @@ -526,28 +632,37 @@ private DataStore getDatastoreForConfigDriveIso(DiskTO disk, VirtualMachineProfi } private boolean deleteConfigDriveIso(final VirtualMachine vm) throws ResourceUnavailableException { - DataStore dataStore = _dataStoreMgr.getImageStoreWithFreeCapacity(vm.getDataCenterId()); - Long agentId = findAgentIdForImageStore(dataStore); + Long hostId = (vm.getHostId() != null) ? vm.getHostId() : vm.getLastHostId(); + Location location = getConfigDriveLocation(vm.getId()); + if (location == Location.HOST) { + return deleteConfigDriveIsoOnHostCache(vm, hostId); + } + + Long agentId = null; + DataStore dataStore = null; - if (VirtualMachineManager.VmConfigDriveOnPrimaryPool.value()) { + if (location == Location.SECONDARY) { + dataStore = _dataStoreMgr.getImageStoreWithFreeCapacity(vm.getDataCenterId()); + agentId = findAgentIdForImageStore(dataStore); + } else if (location == Location.PRIMARY) { List volumes = _volumeDao.findByInstanceAndType(vm.getId(), Volume.Type.ROOT); if (volumes != null && volumes.size() > 0) { dataStore = _dataStoreMgr.getDataStore(volumes.get(0).getPoolId(), DataStoreRole.Primary); } - agentId = (vm.getHostId() != null) ? vm.getHostId() : vm.getLastHostId(); + agentId = hostId; } if (agentId == null || dataStore == null) { - throw new ResourceUnavailableException("Config drive iso creation failed, agent or datastore not available", + throw new ResourceUnavailableException("Config drive iso deletion failed, agent or datastore not available", ConfigDriveNetworkElement.class, 0L); } LOG.debug("Deleting config drive ISO for vm: " + vm.getInstanceName()); final String isoPath = ConfigDrive.createConfigDrivePath(vm.getInstanceName()); - final HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(isoPath, null, dataStore.getTO(), false); + final HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(isoPath, null, dataStore.getTO(), false, false, false); - final Answer answer = agentManager.easySend(agentId, configDriveIsoCommand); + final HandleConfigDriveIsoAnswer answer = (HandleConfigDriveIsoAnswer) agentManager.easySend(agentId, configDriveIsoCommand); if (!answer.getResult()) { LOG.error("Failed to remove config drive for instance: " + vm.getInstanceName()); return false; @@ -566,11 +681,13 @@ private void addConfigDriveDisk(final VirtualMachineProfile profile, final DataS } if (!isoAvailable) { TemplateObjectTO dataTO = new TemplateObjectTO(); - if (dataStore == null) { + if (dataStore == null && !isConfigDriveIsoOnHostCache(profile.getId())) { throw new ResourceUnavailableException("Config drive disk add failed, datastore not available", ConfigDriveNetworkElement.class, 0L); + } else if (dataStore != null) { + dataTO.setDataStore(dataStore.getTO()); } - dataTO.setDataStore(dataStore.getTO()); + dataTO.setUuid(profile.getUuid()); dataTO.setPath(isoPath); dataTO.setFormat(Storage.ImageFormat.ISO); diff --git a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManager.java b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManager.java index 26b2045e13a3..18f669ee91b3 100644 --- a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManager.java +++ b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManager.java @@ -98,7 +98,7 @@ public interface VirtualNetworkApplianceManager extends Manager, VirtualNetworkA false, ConfigKey.Scope.Global, null); static final ConfigKey RouterHealthChecksFailuresToRecreateVr = new ConfigKey(String.class, RouterHealthChecksFailuresToRecreateVrCK, "Advanced", "", "Health checks failures defined by this config are the checks that should cause router recreation. If empty the recreate is not attempted for any health check failure. Possible values are comma separated script names " + - "from systemvm’s /root/health_scripts/ (namely - cpu_usage_check.py, dhcp_check.py, disk_space_check.py, dns_check.py, gateways_check.py, haproxy_check.py, iptables_check.py, memory_usage_check.py, router_version_check.py), connectivity.test " + + "from systemvm’s /root/health_scripts/ (namely - cpu_usage_check.py, dhcp_check.py, disk_space_check.py, dns_check.py, gateways_check.py, haproxy_check.py, iptables_check.py, memory_usage_check.py, router_version_check.py), connectivity.test, filesystem.writable.test " + " or services (namely - loadbalancing.service, webserver.service, dhcp.service) ", true, ConfigKey.Scope.Zone, null); static final ConfigKey RouterHealthChecksToExclude = new ConfigKey(String.class, "router.health.checks.to.exclude", "Advanced", "", diff --git a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index 1103ff9c0589..9e0a2bfd190b 100644 --- a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -280,6 +280,8 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V Configurable, StateListener { private static final Logger s_logger = Logger.getLogger(VirtualNetworkApplianceManagerImpl.class); private static final String CONNECTIVITY_TEST = "connectivity.test"; + private static final String FILESYSTEM_WRITABLE_TEST = "filesystem.writable.test"; + private static final String READONLY_FILESYSTEM_ERROR = "Read-only file system"; private static final String BACKUP_ROUTER_EXCLUDED_TESTS = "gateways_check.py"; @Inject private EntityManager _entityMgr; @@ -1274,14 +1276,19 @@ private List getFailingChecks(DomainRouterVO router, GetRouterMonitorRes if (answer == null) { s_logger.warn("Unable to fetch monitor results for router " + router); - resetRouterHealthChecksAndConnectivity(router.getId(), false, "Communication failed"); + resetRouterHealthChecksAndConnectivity(router.getId(), false, false, "Communication failed"); return Arrays.asList(CONNECTIVITY_TEST); } else if (!answer.getResult()) { s_logger.warn("Failed to fetch monitor results from router " + router + " with details: " + answer.getDetails()); - resetRouterHealthChecksAndConnectivity(router.getId(), false, "Failed to fetch results with details: " + answer.getDetails()); - return Arrays.asList(CONNECTIVITY_TEST); + if (StringUtils.isNotBlank(answer.getDetails()) && answer.getDetails().equalsIgnoreCase(READONLY_FILESYSTEM_ERROR)) { + resetRouterHealthChecksAndConnectivity(router.getId(), true, false, "Failed to write: " + answer.getDetails()); + return Arrays.asList(FILESYSTEM_WRITABLE_TEST); + } else { + resetRouterHealthChecksAndConnectivity(router.getId(), false, false, "Failed to fetch results with details: " + answer.getDetails()); + return Arrays.asList(CONNECTIVITY_TEST); + } } else { - resetRouterHealthChecksAndConnectivity(router.getId(), true, "Successfully fetched data"); + resetRouterHealthChecksAndConnectivity(router.getId(), true, true, "Successfully fetched data"); updateDbHealthChecksFromRouterResponse(router.getId(), answer.getMonitoringResults()); return answer.getFailingChecks(); } @@ -1418,28 +1425,31 @@ private Map> getHealthChecksFromD return healthCheckResults; } - private RouterHealthCheckResultVO resetRouterHealthChecksAndConnectivity(final long routerId, boolean connected, String message) { + private void resetRouterHealthChecksAndConnectivity(final long routerId, boolean connected, boolean writable, String message) { routerHealthCheckResultDao.expungeHealthChecks(routerId); - boolean newEntry = false; - RouterHealthCheckResultVO connectivityVO = routerHealthCheckResultDao.getRouterHealthCheckResult(routerId, CONNECTIVITY_TEST, "basic"); + updateRouterHealthCheckResult(routerId, CONNECTIVITY_TEST, "basic", connected, connected ? "Successfully connected to router" : message); + updateRouterHealthCheckResult(routerId, FILESYSTEM_WRITABLE_TEST, "basic", writable, writable ? "Successfully written to file system" : message); + } + + private void updateRouterHealthCheckResult(final long routerId, String checkName, String checkType, boolean checkResult, String checkMessage) { + boolean newHealthCheckEntry = false; + RouterHealthCheckResultVO connectivityVO = routerHealthCheckResultDao.getRouterHealthCheckResult(routerId, checkName, checkType); if (connectivityVO == null) { - connectivityVO = new RouterHealthCheckResultVO(routerId, CONNECTIVITY_TEST, "basic"); - newEntry = true; + connectivityVO = new RouterHealthCheckResultVO(routerId, checkName, checkType); + newHealthCheckEntry = true; } - connectivityVO.setCheckResult(connected); + connectivityVO.setCheckResult(checkResult); connectivityVO.setLastUpdateTime(new Date()); - if (StringUtils.isNotEmpty(message)) { - connectivityVO.setCheckDetails(message.getBytes(com.cloud.utils.StringUtils.getPreferredCharset())); + if (StringUtils.isNotEmpty(checkMessage)) { + connectivityVO.setCheckDetails(checkMessage.getBytes(com.cloud.utils.StringUtils.getPreferredCharset())); } - if (newEntry) { + if (newHealthCheckEntry) { routerHealthCheckResultDao.persist(connectivityVO); } else { routerHealthCheckResultDao.update(connectivityVO.getId(), connectivityVO); } - - return routerHealthCheckResultDao.getRouterHealthCheckResult(routerId, CONNECTIVITY_TEST, "basic"); } private RouterHealthCheckResultVO parseHealthCheckVOFromJson(final long routerId, @@ -1596,12 +1606,18 @@ public boolean performRouterHealthChecks(long routerId) { } // Step 2: Update health checks values in database. We do this irrespective of new health check config. - if (answer == null || !answer.getResult()) { + if (answer == null) { success = false; - resetRouterHealthChecksAndConnectivity(routerId, false, - answer == null ? "Communication failed " : "Failed to fetch results with details: " + answer.getDetails()); + resetRouterHealthChecksAndConnectivity(routerId, false, false, "Communication failed"); + } else if (!answer.getResult()) { + success = false; + if (StringUtils.isNotBlank(answer.getDetails()) && answer.getDetails().equalsIgnoreCase(READONLY_FILESYSTEM_ERROR)) { + resetRouterHealthChecksAndConnectivity(routerId, true, false, "Failed to write: " + answer.getDetails()); + } else { + resetRouterHealthChecksAndConnectivity(routerId, false, false, "Failed to fetch results with details: " + answer.getDetails()); + } } else { - resetRouterHealthChecksAndConnectivity(routerId, true, "Successfully fetched data"); + resetRouterHealthChecksAndConnectivity(routerId, true, true, "Successfully fetched data"); updateDbHealthChecksFromRouterResponse(routerId, answer.getMonitoringResults()); } diff --git a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java index f2a3caa60004..f2455020c5a2 100755 --- a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java @@ -3030,12 +3030,15 @@ public HashMap> getGPUStatistics(final Ho } @Override - public HostVO findOneRandomRunningHostByHypervisor(HypervisorType type) { + public HostVO findOneRandomRunningHostByHypervisor(final HypervisorType type, final Long dcId) { final QueryBuilder sc = QueryBuilder.create(HostVO.class); sc.and(sc.entity().getHypervisorType(), Op.EQ, type); sc.and(sc.entity().getType(),Op.EQ, Type.Routing); sc.and(sc.entity().getStatus(), Op.EQ, Status.Up); sc.and(sc.entity().getResourceState(), Op.EQ, ResourceState.Enabled); + if (dcId != null) { + sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId); + } sc.and(sc.entity().getRemoved(), Op.NULL); List hosts = sc.list(); if (CollectionUtils.isEmpty(hosts)) { diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index 99d922181bbb..aae32925ca9e 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -29,10 +29,13 @@ import java.util.Map; import java.util.Set; import java.util.TimeZone; +import java.util.UUID; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.function.Predicate; + + import java.util.stream.Collectors; import javax.crypto.Mac; @@ -729,6 +732,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe static final ConfigKey vmPasswordLength = new ConfigKey("Advanced", Integer.class, "vm.password.length", "6", "Specifies the length of a randomly generated password", false); static final ConfigKey sshKeyLength = new ConfigKey("Advanced", Integer.class, "ssh.key.length", "2048", "Specifies custom SSH key length (bit)", true, ConfigKey.Scope.Global); static final ConfigKey humanReadableSizes = new ConfigKey("Advanced", Boolean.class, "display.human.readable.sizes", "true", "Enables outputting human readable byte sizes to logs and usage records.", false, ConfigKey.Scope.Global); + public static final ConfigKey customCsIdentifier = new ConfigKey("Advanced", String.class, "custom.cs.identifier", UUID.randomUUID().toString().split("-")[0].substring(4), "Custom identifier for the cloudstack installation", true, ConfigKey.Scope.Global); @Inject public AccountManager _accountMgr; @@ -1335,6 +1339,11 @@ public Ternary, Integer>, List, Map, List> listStorag StoragePool srcVolumePool = _poolDao.findById(volume.getPoolId()); allPools = getAllStoragePoolCompatileWithVolumeSourceStoragePool(srcVolumePool); - allPools.remove(srcVolumePool); if (vm != null) { suitablePools = findAllSuitableStoragePoolsForVm(volume, vm, srcVolumePool); } else { @@ -1540,14 +1548,21 @@ private void abstractDataStoreClustersList(List storagePools, List< */ private List getAllStoragePoolCompatileWithVolumeSourceStoragePool(StoragePool srcVolumePool) { List storagePools = new ArrayList<>(); - List zoneWideStoragePools = _poolDao.findZoneWideStoragePoolsByTags(srcVolumePool.getDataCenterId(), null); - if (CollectionUtils.isNotEmpty(zoneWideStoragePools)) { - storagePools.addAll(zoneWideStoragePools); - } List clusterAndLocalStoragePools = _poolDao.listBy(srcVolumePool.getDataCenterId(), srcVolumePool.getPodId(), srcVolumePool.getClusterId(), null); if (CollectionUtils.isNotEmpty(clusterAndLocalStoragePools)) { + clusterAndLocalStoragePools.remove(srcVolumePool); storagePools.addAll(clusterAndLocalStoragePools); } + if (srcVolumePool.getClusterId() == null) { + // Return the pools as the above storage pools list would also contain zone wide pools when srcVolumePool is a zone wide pool + return storagePools; + } + + List zoneWideStoragePools = _poolDao.findZoneWideStoragePoolsByTags(srcVolumePool.getDataCenterId(), null); + if (CollectionUtils.isNotEmpty(zoneWideStoragePools)) { + zoneWideStoragePools.remove(srcVolumePool); + storagePools.addAll(zoneWideStoragePools); + } return storagePools; } @@ -1589,7 +1604,6 @@ private List findAllSuitableStoragePoolsForVm(final VolumeVO volume if (isLocalPoolSameHostAsSourcePool || pool.isShared()) { suitablePools.add(pool); } - } } return suitablePools; @@ -3236,7 +3250,7 @@ public String getConfigComponentName() { @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[] {vmPasswordLength, sshKeyLength, humanReadableSizes}; + return new ConfigKey[] {vmPasswordLength, sshKeyLength, humanReadableSizes, customCsIdentifier}; } protected class EventPurgeTask extends ManagedContextRunnable { diff --git a/server/src/main/java/com/cloud/server/StatsCollector.java b/server/src/main/java/com/cloud/server/StatsCollector.java index 86db544a196d..d78a10607baa 100644 --- a/server/src/main/java/com/cloud/server/StatsCollector.java +++ b/server/src/main/java/com/cloud/server/StatsCollector.java @@ -110,6 +110,7 @@ import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.ImageStoreDetailsUtil; import com.cloud.storage.ScopeType; +import com.cloud.storage.Storage; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageManager; import com.cloud.storage.StorageStats; @@ -931,7 +932,7 @@ protected void runInContext() { for (StoragePoolVO pool : pools) { List volumes = _volsDao.findByPoolId(pool.getId(), null); for (VolumeVO volume : volumes) { - if (volume.getFormat() != ImageFormat.QCOW2 && volume.getFormat() != ImageFormat.VHD && volume.getFormat() != ImageFormat.OVA) { + if (volume.getFormat() != ImageFormat.QCOW2 && volume.getFormat() != ImageFormat.VHD && volume.getFormat() != ImageFormat.OVA && (volume.getFormat() != ImageFormat.RAW || pool.getPoolType() != Storage.StoragePoolType.PowerFlex)) { s_logger.warn("Volume stats not implemented for this format type " + volume.getFormat()); break; } diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 96589f163715..2a3a45c7d154 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -96,6 +96,7 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; @@ -111,7 +112,12 @@ import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; import com.cloud.agent.api.DeleteStoragePoolCommand; +import com.cloud.agent.api.GetStorageStatsAnswer; +import com.cloud.agent.api.GetStorageStatsCommand; +import com.cloud.agent.api.GetVolumeStatsAnswer; +import com.cloud.agent.api.GetVolumeStatsCommand; import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.agent.api.VolumeStatsEntry; import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.manager.Commands; @@ -204,6 +210,7 @@ import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.dao.VMInstanceDao; +import com.google.common.base.Strings; import static com.cloud.utils.NumbersUtil.toHumanReadableSize; @@ -442,6 +449,12 @@ public Answer sendToPool(StoragePool pool, long[] hostIdsToTryFirst, Command cmd @Override public Answer sendToPool(StoragePool pool, Command cmd) throws StorageUnavailableException { + if (cmd instanceof GetStorageStatsCommand && pool.getPoolType() == StoragePoolType.PowerFlex) { + // Get stats from the pool directly instead of sending cmd to host + // Added support for ScaleIO/PowerFlex pool only + return getStoragePoolStats(pool, (GetStorageStatsCommand) cmd); + } + Answer[] answers = sendToPool(pool, new Commands(cmd)); if (answers == null) { return null; @@ -449,6 +462,52 @@ public Answer sendToPool(StoragePool pool, Command cmd) throws StorageUnavailabl return answers[0]; } + private GetStorageStatsAnswer getStoragePoolStats(StoragePool pool, GetStorageStatsCommand cmd) { + DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); + DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); + GetStorageStatsAnswer answer = null; + + if (storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).canProvideStorageStats()) { + PrimaryDataStoreDriver primaryStoreDriver = (PrimaryDataStoreDriver)storeDriver; + Pair storageStats = primaryStoreDriver.getStorageStats(pool); + if (storageStats == null) { + answer = new GetStorageStatsAnswer((GetStorageStatsCommand) cmd, "Failed to get storage stats for pool: " + pool.getId()); + } else { + answer = new GetStorageStatsAnswer((GetStorageStatsCommand) cmd, storageStats.first(), storageStats.second()); + } + } + + return answer; + } + + @Override + public Answer getVolumeStats(StoragePool pool, Command cmd) { + if (!(cmd instanceof GetVolumeStatsCommand)) { + return null; + } + + DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); + DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); + + if (storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).canProvideVolumeStats()) { + PrimaryDataStoreDriver primaryStoreDriver = (PrimaryDataStoreDriver)storeDriver; + HashMap statEntry = new HashMap(); + GetVolumeStatsCommand getVolumeStatsCommand = (GetVolumeStatsCommand) cmd; + for (String volumeUuid : getVolumeStatsCommand.getVolumeUuids()) { + Pair volumeStats = primaryStoreDriver.getVolumeStats(pool, volumeUuid); + if (volumeStats == null) { + return new GetVolumeStatsAnswer(getVolumeStatsCommand, "Failed to get stats for volume: " + volumeUuid, null); + } else { + VolumeStatsEntry volumeStatsEntry = new VolumeStatsEntry(volumeUuid, volumeStats.first(), volumeStats.second()); + statEntry.put(volumeUuid, volumeStatsEntry); + } + } + return new GetVolumeStatsAnswer(getVolumeStatsCommand, "", statEntry); + } + + return null; + } + public Long chooseHostForStoragePool(StoragePoolVO poolVO, List avoidHosts, boolean sendToVmResidesOn, Long vmId) { if (sendToVmResidesOn) { if (vmId != null) { @@ -1028,6 +1087,17 @@ public void connectHostToSharedPool(long hostId, long poolId) throws StorageUnav listener.hostConnect(hostId, pool.getId()); } + @Override + public void disconnectHostFromSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException { + StoragePool pool = (StoragePool)_dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); + assert (pool.isShared()) : "Now, did you actually read the name of this method?"; + s_logger.debug("Removing pool " + pool.getName() + " from host " + hostId); + + DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); + HypervisorHostListener listener = hostListeners.get(provider.getName()); + listener.hostDisconnected(hostId, pool.getId()); + } + @Override public BigDecimal getStorageOverProvisioningFactor(Long poolId) { return new BigDecimal(CapacityManager.StorageOverprovisioningFactor.valueIn(poolId)); @@ -1215,6 +1285,7 @@ public void cleanupStorage(boolean recurring) { try { VolumeInfo volumeInfo = volFactory.getVolume(vol.getId()); if (volumeInfo != null) { + volService.ensureVolumeIsExpungeReady(vol.getId()); volService.expungeVolumeAsync(volumeInfo); } else { s_logger.debug("Volume " + vol.getUuid() + " is already destroyed"); @@ -1350,6 +1421,9 @@ private void handleManagedStorage(Volume volume) { if (storagePool != null && storagePool.isManaged()) { VMInstanceVO vmInstanceVO = _vmInstanceDao.findById(instanceId); + if (vmInstanceVO == null) { + return; + } Long lastHostId = vmInstanceVO.getLastHostId(); @@ -1772,6 +1846,38 @@ public StoragePoolVO findLocalStorageOnHost(long hostId) { } } + @Override + @DB + public List findStoragePoolsConnectedToHost(long hostId) { + return _storagePoolHostDao.listByHostId(hostId); + } + + @Override + public boolean canHostAccessStoragePool(Host host, StoragePool pool) { + if (host == null || pool == null) { + return false; + } + + if (!pool.isManaged()) { + return true; + } + + DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); + DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); + + if (storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).canHostAccessStoragePool(host, pool)) { + return true; + } + + return false; + } + + @Override + @DB + public Host getHost(long hostId) { + return _hostDao.findById(hostId); + } + @Override public Host updateSecondaryStorage(long secStorageId, String newUrl) { HostVO secHost = _hostDao.findById(secStorageId); @@ -1847,7 +1953,8 @@ public HypervisorType getHypervisorTypeFromFormat(ImageFormat format) { private boolean checkUsagedSpace(StoragePool pool) { // Managed storage does not currently deal with accounting for physically used space (only provisioned space). Just return true if "pool" is managed. - if (pool.isManaged()) { + // StatsCollector gets the storage stats from the ScaleIO/PowerFlex pool directly, limit the usage based on the capacity disable threshold + if (pool.isManaged() && pool.getPoolType() != StoragePoolType.PowerFlex) { return true; } @@ -1965,14 +2072,14 @@ public boolean storagePoolHasEnoughSpace(List volumes, StoragePool pool, } @Override - public boolean storagePoolHasEnoughSpaceForResize(StoragePool pool, long currentSize, long newSiz) { + public boolean storagePoolHasEnoughSpaceForResize(StoragePool pool, long currentSize, long newSize) { if (!checkUsagedSpace(pool)) { return false; } if (s_logger.isDebugEnabled()) { s_logger.debug("Destination pool id: " + pool.getId()); } - long totalAskingSize = newSiz - currentSize; + long totalAskingSize = newSize - currentSize; if (totalAskingSize <= 0) { return true; @@ -2147,6 +2254,10 @@ private HypervisorType getHypervisorType(Volume volume) { } private long getBytesRequiredForTemplate(VMTemplateVO tmpl, StoragePool pool) { + if (tmplFactory.isTemplateMarkedForDirectDownload(tmpl.getId())) { + return tmpl.getSize(); + } + DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); @@ -2161,6 +2272,59 @@ private long getBytesRequiredForTemplate(VMTemplateVO tmpl, StoragePool pool) { return tmpl.getSize(); } + @Override + public boolean storagePoolCompatibleWithVolumePool(StoragePool pool, Volume volume) { + if (pool == null || volume == null) { + return false; + } + + if (!pool.isManaged()) { + return true; + } + + if (volume.getPoolId() == null) { + // Volume is not allocated to any pool. Not possible to check compatibility with other pool + return true; + } + + StoragePool volumePool = _storagePoolDao.findById(volume.getPoolId()); + if (volumePool == null) { + // Volume pool doesn't exist. Not possible to check compatibility with other pool + return true; + } + + if (volume.getState() == Volume.State.Ready && volumePool.getPoolType() == Storage.StoragePoolType.PowerFlex) { + if (pool.getPoolType() != Storage.StoragePoolType.PowerFlex) { + return false; + } + + final String STORAGE_POOL_SYSTEM_ID = "powerflex.storagepool.system.id"; + String srcPoolSystemId = null; + StoragePoolDetailVO srcPoolSystemIdDetail = _storagePoolDetailsDao.findDetail(volume.getPoolId(), STORAGE_POOL_SYSTEM_ID); + if (srcPoolSystemIdDetail != null) { + srcPoolSystemId = srcPoolSystemIdDetail.getValue(); + } + + String destPoolSystemId = null; + StoragePoolDetailVO destPoolSystemIdDetail = _storagePoolDetailsDao.findDetail(pool.getId(), STORAGE_POOL_SYSTEM_ID); + if (destPoolSystemIdDetail != null) { + destPoolSystemId = destPoolSystemIdDetail.getValue(); + } + + if (Strings.isNullOrEmpty(srcPoolSystemId) || Strings.isNullOrEmpty(destPoolSystemId)) { + s_logger.debug("Unable to check PowerFlex pool: " + pool.getId() + " compatibilty for the volume: " + volume.getId()); + return false; + } + + if (!srcPoolSystemId.equals(destPoolSystemId)) { + s_logger.debug("PowerFlex pool: " + pool.getId() + " is not compatible for the volume: " + volume.getId()); + return false; + } + } + + return true; + } + @Override public void createCapacityEntry(long poolId) { StoragePoolVO storage = _storagePoolDao.findById(poolId); @@ -2685,6 +2849,8 @@ public ConfigKey[] getConfigKeys() { KvmStorageOnlineMigrationWait, KvmAutoConvergence, MaxNumberOfManagedClusteredFileSystems, + STORAGE_POOL_DISK_WAIT, + STORAGE_POOL_CLIENT_TIMEOUT, PRIMARY_STORAGE_DOWNLOAD_WAIT, SecStorageMaxMigrateSessions, MaxDataMigrationWaitTime diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index dfca5cd70d39..fb50b25e81ba 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -70,11 +70,15 @@ import org.apache.cloudstack.framework.jobs.impl.OutcomeImpl; import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO; import org.apache.cloudstack.jobs.JobInfo; +import org.apache.cloudstack.resourcedetail.DiskOfferingDetailVO; +import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao; import org.apache.cloudstack.storage.command.AttachAnswer; import org.apache.cloudstack.storage.command.AttachCommand; import org.apache.cloudstack.storage.command.DettachCommand; import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; @@ -129,6 +133,7 @@ import com.cloud.storage.dao.StoragePoolTagsDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeDetailsDao; import com.cloud.storage.snapshot.SnapshotApiService; import com.cloud.storage.snapshot.SnapshotManager; import com.cloud.template.TemplateManager; @@ -211,10 +216,14 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic @Inject private VolumeDao _volsDao; @Inject + private VolumeDetailsDao _volsDetailsDao; + @Inject private HostDao _hostDao; @Inject private SnapshotDao _snapshotDao; @Inject + private SnapshotDataStoreDao _snapshotDataStoreDao; + @Inject private ServiceOfferingDetailsDao _serviceOfferingDetailsDao; @Inject private UserVmDao _userVmDao; @@ -229,6 +238,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic @Inject private DiskOfferingDao _diskOfferingDao; @Inject + private DiskOfferingDetailsDao _diskOfferingDetailsDao; + @Inject private AccountDao _accountDao; @Inject private DataCenterDao _dcDao; @@ -590,6 +601,7 @@ public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationExcept parentVolume = null; } + Map details = new HashMap<>(); if (cmd.getDiskOfferingId() != null) { // create a new volume diskOfferingId = cmd.getDiskOfferingId(); @@ -631,6 +643,15 @@ public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationExcept size = diskOffering.getDiskSize(); } + DiskOfferingDetailVO bandwidthLimitDetail = _diskOfferingDetailsDao.findDetail(diskOfferingId, Volume.BANDWIDTH_LIMIT_IN_MBPS); + if (bandwidthLimitDetail != null) { + details.put(Volume.BANDWIDTH_LIMIT_IN_MBPS, bandwidthLimitDetail.getValue()); + } + DiskOfferingDetailVO iopsLimitDetail = _diskOfferingDetailsDao.findDetail(diskOfferingId, Volume.IOPS_LIMIT); + if (iopsLimitDetail != null) { + details.put(Volume.IOPS_LIMIT, iopsLimitDetail.getValue()); + } + Boolean isCustomizedIops = diskOffering.isCustomizedIops(); if (isCustomizedIops != null) { @@ -658,6 +679,9 @@ public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationExcept minIops = diskOffering.getMinIops(); maxIops = diskOffering.getMaxIops(); } + } else { + minIops = diskOffering.getMinIops(); + maxIops = diskOffering.getMaxIops(); } if (!validateVolumeSizeRange(size)) {// convert size from mb to gb @@ -676,6 +700,15 @@ public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationExcept if (snapshotCheck.getState() != Snapshot.State.BackedUp) { throw new InvalidParameterValueException("Snapshot id=" + snapshotId + " is not in " + Snapshot.State.BackedUp + " state yet and can't be used for volume creation"); } + + SnapshotDataStoreVO snapshotStore = _snapshotDataStoreDao.findBySnapshot(snapshotId, DataStoreRole.Primary); + if (snapshotStore != null) { + StoragePoolVO storagePoolVO = _storagePoolDao.findById(snapshotStore.getDataStoreId()); + if (storagePoolVO.getPoolType() == Storage.StoragePoolType.PowerFlex) { + throw new InvalidParameterValueException("Create volume from snapshot is not supported for PowerFlex volume snapshots"); + } + } + parentVolume = _volsDao.findByIdIncludingRemoved(snapshotCheck.getVolumeId()); if (zoneId == null) { @@ -747,11 +780,11 @@ public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationExcept String userSpecifiedName = getVolumeNameFromCommand(cmd); return commitVolume(cmd, caller, owner, displayVolume, zoneId, diskOfferingId, provisioningType, size, minIops, maxIops, parentVolume, userSpecifiedName, - _uuidMgr.generateUuid(Volume.class, cmd.getCustomId())); + _uuidMgr.generateUuid(Volume.class, cmd.getCustomId()), details); } private VolumeVO commitVolume(final CreateVolumeCmd cmd, final Account caller, final Account owner, final Boolean displayVolume, final Long zoneId, final Long diskOfferingId, - final Storage.ProvisioningType provisioningType, final Long size, final Long minIops, final Long maxIops, final VolumeVO parentVolume, final String userSpecifiedName, final String uuid) { + final Storage.ProvisioningType provisioningType, final Long size, final Long minIops, final Long maxIops, final VolumeVO parentVolume, final String userSpecifiedName, final String uuid, final Map details) { return Transaction.execute(new TransactionCallback() { @Override public VolumeVO doInTransaction(TransactionStatus status) { @@ -783,6 +816,19 @@ public VolumeVO doInTransaction(TransactionStatus status) { Volume.class.getName(), volume.getUuid(), displayVolume); } + if (volume != null && details != null) { + List volumeDetailsVO = new ArrayList(); + if (details.containsKey(Volume.BANDWIDTH_LIMIT_IN_MBPS)) { + volumeDetailsVO.add(new VolumeDetailVO(volume.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS, details.get(Volume.BANDWIDTH_LIMIT_IN_MBPS), false)); + } + if (details.containsKey(Volume.IOPS_LIMIT)) { + volumeDetailsVO.add(new VolumeDetailVO(volume.getId(), Volume.IOPS_LIMIT, details.get(Volume.IOPS_LIMIT), false)); + } + if (!volumeDetailsVO.isEmpty()) { + _volsDetailsDao.saveDetails(volumeDetailsVO); + } + } + CallContext.current().setEventDetails("Volume Id: " + volume.getUuid()); // Increment resource count during allocation; if actual creation fails, @@ -962,7 +1008,7 @@ public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationExcep newMaxIops = volume.getMaxIops(); } - validateIops(newMinIops, newMaxIops); + validateIops(newMinIops, newMaxIops, volume.getPoolType()); } else { if (newDiskOffering.getRemoved() != null) { throw new InvalidParameterValueException("Requested disk offering has been removed."); @@ -1005,7 +1051,7 @@ public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationExcep newMinIops = cmd.getMinIops() != null ? cmd.getMinIops() : volume.getMinIops(); newMaxIops = cmd.getMaxIops() != null ? cmd.getMaxIops() : volume.getMaxIops(); - validateIops(newMinIops, newMaxIops); + validateIops(newMinIops, newMaxIops, volume.getPoolType()); } else { newMinIops = newDiskOffering.getMinIops(); newMaxIops = newDiskOffering.getMaxIops(); @@ -1155,7 +1201,12 @@ private void checkIfVolumeIsRootAndVmIsRunning(Long newSize, VolumeVO volume, VM } } - private void validateIops(Long minIops, Long maxIops) { + private void validateIops(Long minIops, Long maxIops, Storage.StoragePoolType poolType) { + if (poolType == Storage.StoragePoolType.PowerFlex) { + // PowerFlex takes iopsLimit as input, skip minIops validation + minIops = (maxIops != null) ? Long.valueOf(0) : null; + } + if ((minIops == null && maxIops != null) || (minIops != null && maxIops == null)) { throw new InvalidParameterValueException("Either 'miniops' and 'maxiops' must both be provided or neither must be provided."); } @@ -1266,7 +1317,9 @@ private VolumeVO orchestrateResizeVolume(long volumeId, long currentSize, long n volume.setDiskOfferingId(newDiskOfferingId); } - if (currentSize != newSize) { + // Update size if volume has same size as before, else it is already updated + final VolumeVO volumeNow = _volsDao.findById(volumeId); + if (currentSize == volumeNow.getSize() && currentSize != newSize) { volume.setSize(newSize); } @@ -2259,6 +2312,10 @@ public Volume migrateVolume(MigrateVolumeCmd cmd) { destPool = _volumeMgr.findChildDataStoreInDataStoreCluster(dc, destPoolPod, destPool.getClusterId(), null, null, destPool.getId()); } + if (!storageMgr.storagePoolCompatibleWithVolumePool(destPool, (Volume) vol)) { + throw new CloudRuntimeException("Storage pool " + destPool.getName() + " is not suitable to migrate volume " + vol.getName()); + } + if (!storageMgr.storagePoolHasEnoughSpace(Collections.singletonList(vol), destPool)) { throw new CloudRuntimeException("Storage pool " + destPool.getName() + " does not have enough space to migrate volume " + vol.getName()); } @@ -3145,6 +3202,7 @@ private VolumeVO sendAttachVolumeCommand(UserVmVO vm, VolumeVO volumeToAttach, L details.put(DiskTO.IQN, volumeToAttach.get_iScsiName()); details.put(DiskTO.MOUNT_POINT, volumeToAttach.get_iScsiName()); details.put(DiskTO.PROTOCOL_TYPE, (volumeToAttach.getPoolType() != null) ? volumeToAttach.getPoolType().toString() : null); + details.put(StorageManager.STORAGE_POOL_DISK_WAIT.toString(), String.valueOf(StorageManager.STORAGE_POOL_DISK_WAIT.valueIn(volumeToAttachStoragePool.getId()))); if (chapInfo != null) { details.put(DiskTO.CHAP_INITIATOR_USERNAME, chapInfo.getInitiatorUsername()); @@ -3210,6 +3268,11 @@ private VolumeVO sendAttachVolumeCommand(UserVmVO vm, VolumeVO volumeToAttach, L volumeToAttach.setPath(volumeToAttach.get_iScsiName()); _volsDao.update(volumeToAttach.getId(), volumeToAttach); } + + if (host != null && volumeToAttachStoragePool.getPoolType() == Storage.StoragePoolType.PowerFlex) { + // Unmap the volume on PowerFlex/ScaleIO pool for stopped VM + volService.revokeAccess(volFactory.getVolume(volumeToAttach.getId()), host, dataStore); + } } // insert record for disk I/O statistics diff --git a/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java b/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java index 0b149189f419..eca96efff3e1 100644 --- a/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java +++ b/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java @@ -20,14 +20,13 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; - import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; import com.cloud.agent.Listener; import com.cloud.agent.api.AgentControlAnswer; @@ -44,6 +43,7 @@ import com.cloud.storage.ScopeType; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StorageManagerImpl; +import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.StoragePoolStatus; public class StoragePoolMonitor implements Listener { @@ -137,7 +137,49 @@ public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) @Override public synchronized boolean processDisconnect(long agentId, Status state) { - return true; + Host host = _storageManager.getHost(agentId); + if (host == null) { + s_logger.warn("Agent: " + agentId + " not found, not disconnecting pools"); + return false; + } + + if (host.getType() != Host.Type.Routing) { + return false; + } + + List storagePoolHosts = _storageManager.findStoragePoolsConnectedToHost(host.getId()); + if (storagePoolHosts == null) { + if (s_logger.isTraceEnabled()) { + s_logger.trace("No pools to disconnect for host: " + host.getId()); + } + return true; + } + + boolean disconnectResult = true; + for (StoragePoolHostVO storagePoolHost : storagePoolHosts) { + StoragePoolVO pool = _poolDao.findById(storagePoolHost.getPoolId()); + if (pool == null) { + continue; + } + + if (!pool.isShared()) { + continue; + } + + // Handle only PowerFlex pool for now, not to impact other pools behavior + if (pool.getPoolType() != StoragePoolType.PowerFlex) { + continue; + } + + try { + _storageManager.disconnectHostFromSharedPool(host.getId(), pool.getId()); + } catch (Exception e) { + s_logger.error("Unable to disconnect host " + host.getId() + " from storage pool id " + pool.getId() + " due to " + e.toString()); + disconnectResult = false; + } + } + + return disconnectResult; } @Override diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java index f46367835ee0..88e87ec819ce 100755 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -1288,7 +1288,7 @@ private DataStoreRole getDataStoreRole(Snapshot snapshot, SnapshotDataStoreDao s } StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId); - if (storagePoolVO.getPoolType() == StoragePoolType.RBD && !BackupSnapshotAfterTakingSnapshot.value()) { + if ((storagePoolVO.getPoolType() == StoragePoolType.RBD || storagePoolVO.getPoolType() == StoragePoolType.PowerFlex) && !BackupSnapshotAfterTakingSnapshot.value()) { return DataStoreRole.Primary; } diff --git a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java index c080ffd2f25c..a4d134032e85 100644 --- a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java +++ b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.template; +import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; import java.util.LinkedList; @@ -142,12 +143,23 @@ public String getName() { * Validate on random running KVM host that URL is reachable * @param url url */ - private Long performDirectDownloadUrlValidation(final String url) { - HostVO host = resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM); + private Long performDirectDownloadUrlValidation(final String format, final String url, final List zoneIds) { + HostVO host = null; + if (zoneIds != null && !zoneIds.isEmpty()) { + for (Long zoneId : zoneIds) { + host = resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, zoneId); + if (host != null) { + break; + } + } + } else { + host = resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, null); + } + if (host == null) { throw new CloudRuntimeException("Couldn't find a host to validate URL " + url); } - CheckUrlCommand cmd = new CheckUrlCommand(url); + CheckUrlCommand cmd = new CheckUrlCommand(format, url); s_logger.debug("Performing URL " + url + " validation on host " + host.getId()); Answer answer = _agentMgr.easySend(host.getId(), cmd); if (answer == null || !answer.getResult()) { @@ -164,7 +176,12 @@ public TemplateProfile prepare(RegisterIsoCmd cmd) throws ResourceAllocationExce UriUtils.validateUrl(ImageFormat.ISO.getFileExtension(), url); if (cmd.isDirectDownload()) { DigestHelper.validateChecksumString(cmd.getChecksum()); - Long templateSize = performDirectDownloadUrlValidation(url); + List zoneIds = null; + if (cmd.getZoneId() != null) { + zoneIds = new ArrayList<>(); + zoneIds.add(cmd.getZoneId()); + } + Long templateSize = performDirectDownloadUrlValidation(ImageFormat.ISO.getFileExtension(), url, zoneIds); profile.setSize(templateSize); } profile.setUrl(url); @@ -189,7 +206,7 @@ public TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocatio UriUtils.validateUrl(cmd.getFormat(), url); if (cmd.isDirectDownload()) { DigestHelper.validateChecksumString(cmd.getChecksum()); - Long templateSize = performDirectDownloadUrlValidation(url); + Long templateSize = performDirectDownloadUrlValidation(cmd.getFormat(), url, cmd.getZoneIds()); profile.setSize(templateSize); } profile.setUrl(url); @@ -583,6 +600,14 @@ public boolean delete(TemplateProfile profile) { // find all eligible image stores for this template List iStores = templateMgr.getImageStoreByTemplate(template.getId(), null); if (iStores == null || iStores.size() == 0) { + // remove any references from template_zone_ref + List templateZones = templateZoneDao.listByTemplateId(template.getId()); + if (templateZones != null) { + for (VMTemplateZoneVO templateZone : templateZones) { + templateZoneDao.remove(templateZone.getId()); + } + } + // Mark template as Inactive. template.setState(VirtualMachineTemplate.State.Inactive); _tmpltDao.update(template.getId(), template); @@ -606,7 +631,6 @@ public boolean delete(TemplateProfile profile) { } return success; - } @Override diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 8a363c296b70..98ecd6d5a3a1 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -512,7 +512,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir @Inject private UserVmDeployAsIsDetailsDao userVmDeployAsIsDetailsDao; @Inject - private StorageManager storageMgr; + private StorageManager _storageManager; private ScheduledExecutorService _executor = null; private ScheduledExecutorService _vmIpFetchExecutor = null; @@ -2019,14 +2019,21 @@ public HashMap getVolumeStatistics(long clusterId, Str if (!CollectionUtils.isEmpty(volumeLocators)) { GetVolumeStatsCommand cmd = new GetVolumeStatsCommand(poolType, poolUuid, volumeLocators); + Answer answer = null; - if (timeout > 0) { - cmd.setWait(timeout/1000); - } + if (poolType == StoragePoolType.PowerFlex) { + // Get volume stats from the pool directly instead of sending cmd to host + // Added support for ScaleIO/PowerFlex pool only + answer = _storageManager.getVolumeStats(storagePool, cmd); + } else { + if (timeout > 0) { + cmd.setWait(timeout/1000); + } - Answer answer = _agentMgr.easySend(neighbor.getId(), cmd); + answer = _agentMgr.easySend(neighbor.getId(), cmd); + } - if (answer instanceof GetVolumeStatsAnswer){ + if (answer != null && answer instanceof GetVolumeStatsAnswer){ GetVolumeStatsAnswer volstats = (GetVolumeStatsAnswer)answer; if (volstats.getVolumeStats() != null) { volumeStatsByUuid.putAll(volstats.getVolumeStats()); @@ -6295,7 +6302,7 @@ public VirtualMachine migrateVirtualMachineWithVolume(Long vmId, Host destinatio HypervisorType hypervisorType = _volsDao.getHypervisorType(volume.getId()); if (hypervisorType.equals(HypervisorType.VMware)) { try { - boolean isStoragePoolStoragepolicyComplaince = storageMgr.isStoragePoolComplaintWithStoragePolicy(Arrays.asList(volume), pool); + boolean isStoragePoolStoragepolicyComplaince = _storageManager.isStoragePoolComplaintWithStoragePolicy(Arrays.asList(volume), pool); if (!isStoragePoolStoragepolicyComplaince) { throw new CloudRuntimeException(String.format("Storage pool %s is not storage policy compliance with the volume %s", pool.getUuid(), volume.getUuid())); } diff --git a/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java b/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java index a117af2bbab2..4a7840eb784f 100644 --- a/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java @@ -50,6 +50,8 @@ import org.apache.cloudstack.framework.jobs.impl.OutcomeImpl; import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO; import org.apache.cloudstack.jobs.JobInfo; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.cloudstack.utils.identity.ManagementServerNode; @@ -76,6 +78,7 @@ import com.cloud.storage.GuestOSVO; import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotVO; +import com.cloud.storage.Storage; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Volume; import com.cloud.storage.Volume.Type; @@ -109,12 +112,12 @@ import com.cloud.vm.UserVmDetailVO; import com.cloud.vm.UserVmManager; import com.cloud.vm.UserVmVO; -import com.cloud.vm.VmDetailConstants; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.VirtualMachineManager; import com.cloud.vm.VirtualMachineProfile; +import com.cloud.vm.VmDetailConstants; import com.cloud.vm.VmWork; import com.cloud.vm.VmWorkConstants; import com.cloud.vm.VmWorkJobHandler; @@ -166,6 +169,8 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme protected UserVmDetailsDao _userVmDetailsDao; @Inject protected VMSnapshotDetailsDao _vmSnapshotDetailsDao; + @Inject + PrimaryDataStoreDao _storagePoolDao; VmWorkJobHandlerProxy _jobHandlerProxy = new VmWorkJobHandlerProxy(this); @@ -358,9 +363,33 @@ public VMSnapshot allocVMSnapshot(Long vmId, String vsDisplayName, String vsDesc throw new InvalidParameterValueException("Can not snapshot memory when VM is not in Running state"); } + List rootVolumes = _volumeDao.findReadyRootVolumesByInstance(userVmVo.getId()); + if (rootVolumes == null || rootVolumes.isEmpty()) { + throw new CloudRuntimeException("Unable to find root volume for the user vm:" + userVmVo.getUuid()); + } + + VolumeVO rootVolume = rootVolumes.get(0); + StoragePoolVO rootVolumePool = _storagePoolDao.findById(rootVolume.getPoolId()); + if (rootVolumePool == null) { + throw new CloudRuntimeException("Unable to find root volume storage pool for the user vm:" + userVmVo.getUuid()); + } + // for KVM, only allow snapshot with memory when VM is in running state - if (userVmVo.getHypervisorType() == HypervisorType.KVM && userVmVo.getState() == State.Running && !snapshotMemory) { - throw new InvalidParameterValueException("KVM VM does not allow to take a disk-only snapshot when VM is in running state"); + if (userVmVo.getHypervisorType() == HypervisorType.KVM) { + if (rootVolumePool.getPoolType() != Storage.StoragePoolType.PowerFlex) { + if (userVmVo.getState() == State.Running && !snapshotMemory) { + throw new InvalidParameterValueException("KVM VM does not allow to take a disk-only snapshot when VM is in running state"); + } + } else { + if (snapshotMemory) { + throw new InvalidParameterValueException("Can not snapshot memory for PowerFlex storage pool"); + } + + // All volumes should be on the same PowerFlex storage pool for VM Snapshot + if (!isVolumesOfUserVmOnSameStoragePool(userVmVo.getId(), rootVolumePool.getId())) { + throw new InvalidParameterValueException("All volumes of the VM: " + userVmVo.getUuid() + " should be on the same PowerFlex storage pool"); + } + } } // check access @@ -379,8 +408,14 @@ public VMSnapshot allocVMSnapshot(Long vmId, String vsDisplayName, String vsDesc if (activeSnapshots.size() > 0) { throw new CloudRuntimeException("There is other active volume snapshot tasks on the instance to which the volume is attached, please try again later."); } - if (userVmVo.getHypervisorType() == HypervisorType.KVM && volume.getFormat() != ImageFormat.QCOW2) { - throw new CloudRuntimeException("We only support create vm snapshots from vm with QCOW2 image"); + if (userVmVo.getHypervisorType() == HypervisorType.KVM) { + if (volume.getPoolType() != Storage.StoragePoolType.PowerFlex) { + if (volume.getFormat() != ImageFormat.QCOW2) { + throw new CloudRuntimeException("We only support create vm snapshots from vm with QCOW2 image"); + } + } else if (volume.getFormat() != ImageFormat.RAW) { + throw new CloudRuntimeException("Only support create vm snapshots for volumes on PowerFlex with RAW image"); + } } } @@ -393,6 +428,10 @@ public VMSnapshot allocVMSnapshot(Long vmId, String vsDisplayName, String vsDesc if (snapshotMemory && userVmVo.getState() == VirtualMachine.State.Running) vmSnapshotType = VMSnapshot.Type.DiskAndMemory; + if (rootVolumePool.getPoolType() == Storage.StoragePoolType.PowerFlex) { + vmSnapshotType = VMSnapshot.Type.Disk; + } + try { return createAndPersistVMSnapshot(userVmVo, vsDescription, vmSnapshotName, vsDisplayName, vmSnapshotType); } catch (Exception e) { @@ -402,6 +441,21 @@ public VMSnapshot allocVMSnapshot(Long vmId, String vsDisplayName, String vsDesc return null; } + private boolean isVolumesOfUserVmOnSameStoragePool(Long userVmId, Long poolId) { + List volumesOfVm = _volumeDao.findCreatedByInstance(userVmId); + if (volumesOfVm == null || volumesOfVm.isEmpty()) { + throw new CloudRuntimeException("Unable to find volumes for the user vm:" + userVmId); + } + + for (VolumeVO volume : volumesOfVm) { + if (volume == null || volume.getPoolId() != poolId) { + return false; + } + } + + return true; + } + /** * Create, persist and return vm snapshot for userVmVo with given parameters. * Persistence and support for custom service offerings are done on the same transaction diff --git a/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java b/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java index dcbc9656448c..8efe8654026e 100644 --- a/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java @@ -20,32 +20,6 @@ import static com.cloud.storage.Storage.ImageFormat; -import com.cloud.agent.AgentManager; -import com.cloud.agent.api.Answer; -import com.cloud.dc.DataCenterVO; -import com.cloud.dc.dao.DataCenterDao; -import com.cloud.event.ActionEventUtils; -import com.cloud.event.EventTypes; -import com.cloud.event.EventVO; -import com.cloud.exception.AgentUnavailableException; -import com.cloud.exception.OperationTimedoutException; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.host.Status; -import com.cloud.host.dao.HostDao; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.storage.DataStoreRole; -import com.cloud.storage.ScopeType; -import com.cloud.storage.Storage; -import com.cloud.storage.VMTemplateStoragePoolVO; -import com.cloud.storage.VMTemplateStorageResourceAssoc; -import com.cloud.storage.VMTemplateVO; -import com.cloud.storage.dao.VMTemplateDao; -import com.cloud.storage.dao.VMTemplatePoolDao; -import com.cloud.utils.component.ManagerBase; -import com.cloud.utils.concurrency.NamedThreadFactory; -import com.cloud.utils.exception.CloudRuntimeException; - import java.net.URI; import java.net.URISyntaxException; import java.security.cert.Certificate; @@ -79,6 +53,9 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.managed.context.ManagedContextRunnable; @@ -86,12 +63,39 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.log4j.Logger; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.event.ActionEventUtils; +import com.cloud.event.EventTypes; +import com.cloud.event.EventVO; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.ScopeType; +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.VMTemplateStorageResourceAssoc; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.concurrency.NamedThreadFactory; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.security.CertificateHelper; import sun.security.x509.X509CertImpl; @@ -126,6 +130,10 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown private DataCenterDao dataCenterDao; @Inject private ConfigurationDao configDao; + @Inject + private TemplateDataFactory tmplFactory; + @Inject + private VolumeService volService; protected ScheduledExecutorService executorService; @@ -259,7 +267,14 @@ public void downloadTemplate(long templateId, long poolId, long hostId) { DownloadProtocol protocol = getProtocolFromUrl(url); DirectDownloadCommand cmd = getDirectDownloadCommandFromProtocol(protocol, url, templateId, to, checksum, headers); cmd.setTemplateSize(template.getSize()); - cmd.setIso(template.getFormat() == ImageFormat.ISO); + cmd.setFormat(template.getFormat()); + + if (tmplFactory.getTemplate(templateId, store) != null) { + cmd.setDestData((TemplateObjectTO) tmplFactory.getTemplate(templateId, store).getTO()); + } + + int cmdTimeOut = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); + cmd.setWait(cmdTimeOut); Answer answer = sendDirectDownloadCommand(cmd, template, poolId, host); @@ -277,6 +292,16 @@ public void downloadTemplate(long templateId, long poolId, long hostId) { sPoolRef.setLocalDownloadPath(ans.getInstallPath()); sPoolRef.setInstallPath(ans.getInstallPath()); vmTemplatePoolDao.persist(sPoolRef); + } else { + // For managed storage, update after template downloaded and copied to the disk + DirectDownloadAnswer ans = (DirectDownloadAnswer) answer; + sPoolRef.setDownloadPercent(100); + sPoolRef.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOADED); + sPoolRef.setState(ObjectInDataStoreStateMachine.State.Ready); + sPoolRef.setTemplateSize(ans.getTemplateSize()); + sPoolRef.setLocalDownloadPath(ans.getInstallPath()); + sPoolRef.setInstallPath(ans.getInstallPath()); + vmTemplatePoolDao.update(sPoolRef.getId(), sPoolRef); } } @@ -294,20 +319,39 @@ private Answer sendDirectDownloadCommand(DirectDownloadCommand cmd, VMTemplateVO int retry = 3; StoragePoolVO storagePoolVO = primaryDataStoreDao.findById(poolId); + // TODO: Move the host retry attempts to upper layer Long[] hostsToRetry = getHostsToRetryOn(host, storagePoolVO); int hostIndex = 0; Answer answer = null; Long hostToSendDownloadCmd = hostsToRetry[hostIndex]; boolean continueRetrying = true; while (!downloaded && retry > 0 && continueRetrying) { - s_logger.debug("Sending Direct download command to host " + hostToSendDownloadCmd); - answer = agentManager.easySend(hostToSendDownloadCmd, cmd); - if (answer != null) { - DirectDownloadAnswer ans = (DirectDownloadAnswer)answer; - downloaded = answer.getResult(); - continueRetrying = ans.isRetryOnOtherHosts(); + PrimaryDataStore primaryDataStore = null; + TemplateInfo templateOnPrimary = null; + + try { + if (hostToSendDownloadCmd != host.getId() && storagePoolVO.isManaged()) { + primaryDataStore = (PrimaryDataStore) dataStoreManager.getPrimaryDataStore(poolId); + templateOnPrimary = primaryDataStore.getTemplate(template.getId(), null); + if (templateOnPrimary != null) { + volService.grantAccess(templateOnPrimary, host, primaryDataStore); + } + } + + s_logger.debug("Sending Direct download command to host " + hostToSendDownloadCmd); + answer = agentManager.easySend(hostToSendDownloadCmd, cmd); + if (answer != null) { + DirectDownloadAnswer ans = (DirectDownloadAnswer)answer; + downloaded = answer.getResult(); + continueRetrying = ans.isRetryOnOtherHosts(); + } + hostToSendDownloadCmd = hostsToRetry[(hostIndex + 1) % hostsToRetry.length]; + } finally { + if (templateOnPrimary != null) { + volService.revokeAccess(templateOnPrimary, host, primaryDataStore); + } } - hostToSendDownloadCmd = hostsToRetry[(hostIndex + 1) % hostsToRetry.length]; + retry --; } if (!downloaded) { @@ -488,6 +532,39 @@ public boolean uploadCertificate(long certificateId, long hostId) { return true; } + @Override + public boolean syncCertificatesToHost(long hostId, long zoneId) { + List zoneCertificates = directDownloadCertificateDao.listByZone(zoneId); + if (CollectionUtils.isEmpty(zoneCertificates)) { + if (s_logger.isTraceEnabled()) { + s_logger.trace("No certificates to sync on host: " + hostId); + } + return true; + } + + boolean syncCertificatesResult = true; + int certificatesSyncCount = 0; + s_logger.debug("Syncing certificates on host: " + hostId); + for (DirectDownloadCertificateVO certificateVO : zoneCertificates) { + DirectDownloadCertificateHostMapVO mapping = directDownloadCertificateHostMapDao.findByCertificateAndHost(certificateVO.getId(), hostId); + if (mapping == null) { + s_logger.debug("Syncing certificate " + certificateVO.getId() + " (" + certificateVO.getAlias() + ") on host: " + hostId + ", uploading it"); + if (!uploadCertificate(certificateVO.getId(), hostId)) { + String msg = "Could not sync certificate " + certificateVO.getId() + " (" + certificateVO.getAlias() + ") on host: " + hostId + ", upload failed"; + s_logger.error(msg); + syncCertificatesResult = false; + } else { + certificatesSyncCount++; + } + } else { + s_logger.debug("Certificate " + certificateVO.getId() + " (" + certificateVO.getAlias() + ") already synced on host: " + hostId); + } + } + + s_logger.debug("Synced " + certificatesSyncCount + " out of " + zoneCertificates.size() + " certificates on host: " + hostId); + return syncCertificatesResult; + } + @Override public boolean revokeCertificateAlias(String certificateAlias, String hypervisor, Long zoneId, Long hostId) { HypervisorType hypervisorType = HypervisorType.getType(hypervisor); diff --git a/server/src/test/java/com/cloud/network/element/ConfigDriveNetworkElementTest.java b/server/src/test/java/com/cloud/network/element/ConfigDriveNetworkElementTest.java index 5d206f4c16d6..ab3489f36b83 100644 --- a/server/src/test/java/com/cloud/network/element/ConfigDriveNetworkElementTest.java +++ b/server/src/test/java/com/cloud/network/element/ConfigDriveNetworkElementTest.java @@ -60,6 +60,7 @@ import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; +import com.cloud.agent.api.HandleConfigDriveIsoAnswer; import com.cloud.agent.api.HandleConfigDriveIsoCommand; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterVO; @@ -228,7 +229,7 @@ public void testExpunge() throws NoTransitionException, NoSuchFieldException, Il when(virtualMachine.getState()).thenReturn(VirtualMachine.State.Stopped); when(_vmInstanceDao.updateState(VirtualMachine.State.Stopped, VirtualMachine.Event.ExpungeOperation, VirtualMachine.State.Expunging, virtualMachine, null)).thenReturn(true); - final Answer answer = mock(Answer.class); + final HandleConfigDriveIsoAnswer answer = mock(HandleConfigDriveIsoAnswer.class); when(agentManager.easySend(anyLong(), any(HandleConfigDriveIsoCommand.class))).thenReturn(answer); when(answer.getResult()).thenReturn(true); @@ -267,10 +268,11 @@ public void testAddPasswordAndUserData() throws Exception { Method method = ReflectionUtils.getMethods(ConfigDriveBuilder.class, ReflectionUtils.withName("buildConfigDrive")).iterator().next(); PowerMockito.when(ConfigDriveBuilder.class, method).withArguments(Mockito.anyListOf(String[].class), Mockito.anyString(), Mockito.anyString()).thenReturn("content"); - final Answer answer = mock(Answer.class); + final HandleConfigDriveIsoAnswer answer = mock(HandleConfigDriveIsoAnswer.class); final UserVmDetailVO userVmDetailVO = mock(UserVmDetailVO.class); when(agentManager.easySend(anyLong(), any(HandleConfigDriveIsoCommand.class))).thenReturn(answer); when(answer.getResult()).thenReturn(true); + when(answer.getConfigDriveLocation()).thenReturn(NetworkElement.Location.PRIMARY); when(network.getTrafficType()).thenReturn(Networks.TrafficType.Guest); when(virtualMachine.getState()).thenReturn(VirtualMachine.State.Stopped); when(virtualMachine.getUuid()).thenReturn("vm-uuid"); diff --git a/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java b/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java index 5fc9a4dcfdb8..4e1daa87c346 100755 --- a/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java +++ b/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java @@ -617,7 +617,7 @@ public HashMap> getGPUStatistics(final Ho } @Override - public HostVO findOneRandomRunningHostByHypervisor(HypervisorType type) { + public HostVO findOneRandomRunningHostByHypervisor(HypervisorType type, Long dcId) { // TODO Auto-generated method stub return null; } diff --git a/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java b/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java index 2cf763fe93c8..91cdbb5863ca 100644 --- a/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java +++ b/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java @@ -23,9 +23,9 @@ import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import java.util.ArrayList; import java.util.Arrays; @@ -33,6 +33,12 @@ import java.util.List; import java.util.Map; +import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.acl.SecurityChecker.AccessType; +import org.apache.cloudstack.api.ResourceDetail; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.junit.Before; import org.junit.Test; import org.mockito.ArgumentCaptor; @@ -42,12 +48,6 @@ import org.mockito.MockitoAnnotations; import org.mockito.Spy; -import org.apache.cloudstack.acl.ControlledEntity; -import org.apache.cloudstack.acl.SecurityChecker.AccessType; -import org.apache.cloudstack.api.ResourceDetail; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; - import com.cloud.agent.AgentManager; import com.cloud.exception.AgentUnavailableException; import com.cloud.exception.ConcurrentOperationException; @@ -183,6 +183,7 @@ public void setup() { _vmSnapshotMgr._userVMDao = _userVMDao; _vmSnapshotMgr._vmSnapshotDao = _vmSnapshotDao; _vmSnapshotMgr._volumeDao = _volumeDao; + _vmSnapshotMgr._storagePoolDao = _storagePoolDao; _vmSnapshotMgr._accountMgr = _accountMgr; _vmSnapshotMgr._snapshotDao = _snapshotDao; _vmSnapshotMgr._guestOSDao = _guestOSDao; @@ -208,6 +209,8 @@ public void setup() { mockVolumeList.add(volumeMock); when(volumeMock.getInstanceId()).thenReturn(TEST_VM_ID); when(_volumeDao.findByInstance(anyLong())).thenReturn(mockVolumeList); + when(_volumeDao.findReadyRootVolumesByInstance(anyLong())).thenReturn(mockVolumeList); + when(_storagePoolDao.findById(anyLong())).thenReturn(mock(StoragePoolVO.class)); when(vmMock.getId()).thenReturn(TEST_VM_ID); when(vmMock.getServiceOfferingId()).thenReturn(SERVICE_OFFERING_ID); @@ -299,7 +302,6 @@ public void testAllocVMSnapshotF5() throws ResourceAllocationException { public void testCreateVMSnapshot() throws AgentUnavailableException, OperationTimedoutException, ResourceAllocationException, NoTransitionException { when(vmMock.getState()).thenReturn(State.Running); _vmSnapshotMgr.allocVMSnapshot(TEST_VM_ID, "", "", true); - } @Test diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index 636a336d1ff5..0db53018b8a2 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -101,6 +101,7 @@ import com.cloud.agent.api.DeleteSnapshotsDirCommand; import com.cloud.agent.api.GetStorageStatsAnswer; import com.cloud.agent.api.GetStorageStatsCommand; +import com.cloud.agent.api.HandleConfigDriveIsoAnswer; import com.cloud.agent.api.HandleConfigDriveIsoCommand; import com.cloud.agent.api.PingCommand; import com.cloud.agent.api.PingStorageCommand; @@ -139,6 +140,7 @@ import com.cloud.host.Host; import com.cloud.host.Host.Type; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.network.element.NetworkElement; import com.cloud.resource.ServerResourceBase; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Storage; @@ -320,7 +322,7 @@ public Answer executeRequest(Command cmd) { private Answer execute(HandleConfigDriveIsoCommand cmd) { if (cmd.isCreate()) { if (cmd.getIsoData() == null) { - return new Answer(cmd, false, "Invalid config drive ISO data"); + return new HandleConfigDriveIsoAnswer(cmd, "Invalid config drive ISO data"); } String nfsMountPoint = getRootDir(cmd.getDestStore().getUrl(), _nfsVersion); File isoFile = new File(nfsMountPoint, cmd.getIsoFile()); @@ -333,7 +335,7 @@ private Answer execute(HandleConfigDriveIsoCommand cmd) { File tmpIsoFile = ConfigDriveBuilder.base64StringToFile(cmd.getIsoData(), tempDir.toAbsolutePath().toString(), cmd.getIsoFile()); copyLocalToNfs(tmpIsoFile, new File(cmd.getIsoFile()), cmd.getDestStore()); } catch (IOException | ConfigurationException e) { - return new Answer(cmd, false, "Failed due to exception: " + e.getMessage()); + return new HandleConfigDriveIsoAnswer(cmd, "Failed due to exception: " + e.getMessage()); } finally { try { if (tempDir != null) { @@ -343,7 +345,7 @@ private Answer execute(HandleConfigDriveIsoCommand cmd) { s_logger.warn("Failed to delete ConfigDrive temporary directory: " + tempDir.toString(), ioe); } } - return new Answer(cmd, true, "Successfully saved config drive at secondary storage"); + return new HandleConfigDriveIsoAnswer(cmd, NetworkElement.Location.SECONDARY, "Successfully saved config drive at secondary storage"); } else { DataStoreTO dstore = cmd.getDestStore(); if (dstore instanceof NfsTO) { @@ -354,11 +356,11 @@ private Answer execute(HandleConfigDriveIsoCommand cmd) { try { Files.deleteIfExists(tmpltPath.toPath()); } catch (IOException e) { - return new Answer(cmd, e); + return new HandleConfigDriveIsoAnswer(cmd, e); } - return new Answer(cmd); + return new HandleConfigDriveIsoAnswer(cmd); } else { - return new Answer(cmd, false, "Not implemented yet"); + return new HandleConfigDriveIsoAnswer(cmd, "Not implemented yet"); } } } diff --git a/systemvm/debian/opt/cloud/bin/filesystem_writable_check.py b/systemvm/debian/opt/cloud/bin/filesystem_writable_check.py new file mode 100644 index 000000000000..eac7d9c75536 --- /dev/null +++ b/systemvm/debian/opt/cloud/bin/filesystem_writable_check.py @@ -0,0 +1,46 @@ +#!/usr/bin/python +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os + + +def check_filesystem(): + ST_RDONLY = 1 + if os.ST_RDONLY is not None: + ST_RDONLY = os.ST_RDONLY + + stat1 = os.statvfs('/root') + readOnly1 = bool(stat1.f_flag & ST_RDONLY) + + if (readOnly1): + print "Read-only file system : monitor results (/root) file system is mounted as read-only" + exit(1) + + stat2 = os.statvfs('/var/cache/cloud') + readOnly2 = bool(stat2.f_flag & ST_RDONLY) + + if (readOnly2): + print "Read-only file system : config info (/var/cache/cloud) file system is mounted as read-only" + exit(1) + + print "file system is writable" + exit(0) + + +if __name__ == "__main__": + check_filesystem() diff --git a/test/integration/plugins/scaleio/README.md b/test/integration/plugins/scaleio/README.md new file mode 100644 index 000000000000..4a980d86cf71 --- /dev/null +++ b/test/integration/plugins/scaleio/README.md @@ -0,0 +1,37 @@ +# PowerFlex/ScaleIO storage plugin +================================== +This directory contains the basic VM, Volume life cycle tests for PowerFlex/ScaleIO storage pool (in KVM hypervisor). + +# Running tests +=============== +To run these tests, first update the below test data of the CloudStack environment + +```` +TestData.zoneId: +TestData.clusterId: +TestData.domainId: +TestData.url: +```` + +and PowerFlex/ScaleIO storage pool url at TestData.primaryStorage in the below format + +```` +powerflex://:@/ + + where, + - : user name for API access + - : url-encoded password for API access + - : scaleio gateway host + - : storage pool name (case sensitive) + + +For example: "powerflex://admin:P%40ssword123@10.10.2.130/cspool" +```` + +Then run the tests using python unittest runner: nosetests + +```` +nosetests --with-marvin --marvin-config= /test/integration/plugins/scaleio/test_scaleio_volumes.py --zone= --hypervisor=kvm +```` + +You can also run these tests out of the box with PyDev or PyCharm or whatever. diff --git a/test/integration/plugins/scaleio/test_scaleio_volumes.py b/test/integration/plugins/scaleio/test_scaleio_volumes.py new file mode 100644 index 000000000000..28d591ae70e9 --- /dev/null +++ b/test/integration/plugins/scaleio/test_scaleio_volumes.py @@ -0,0 +1,834 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import logging +import random +import time + +# All tests inherit from cloudstackTestCase +from marvin.cloudstackTestCase import cloudstackTestCase + +# Import Integration Libraries +# base - contains all resources as entities and defines create, delete, list operations on them +from marvin.lib.base import Account, DiskOffering, ServiceOffering, Snapshot, StoragePool, Template, User, VirtualMachine, Volume + +# common - commonly used methods for all tests are listed here +from marvin.lib.common import get_domain, get_template, get_zone, list_clusters, list_hosts, list_virtual_machines, \ + list_volumes + +# utils - utility classes for common cleanup, external library wrappers, etc. +from marvin.lib.utils import cleanup_resources + +# Prerequisites: +# Only one zone +# Only one pod +# Only one cluster +# +# One ScaleIO storage pool +# Only KVM hypervisor is supported for ScaleIO storage pool +# KVM host(s) with ScaleIO Data Client (SDC) installed and connected to Metadata Manager (MDM) +# + +class TestData(): + # constants + account = "account" + clusterId = "clusterId" + computeOffering = "computeoffering" + diskName = "diskname" + diskOffering = "diskoffering" + domainId = "domainId" + hypervisor = "hypervisor" + kvm = "kvm" + login = "login" + gatewayip = "gatewayip" + one_GB_in_bytes = 1073741824 + password = "password" + port = "port" + primaryStorage = "primarystorage" + provider = "provider" + scope = "scope" + powerFlex = "powerflex" + storageTag = "pflex" + tags = "tags" + templateCacheNameKvm = "centos55-x86-64" + testAccount = "testaccount" + url = "url" + user = "user" + username = "username" + virtualMachine = "virtualmachine" + virtualMachine2 = "virtualmachine2" + volume_1 = "volume_1" + volume_2 = "volume_2" + kvm = "kvm" + zoneId = "zoneId" + + # hypervisor type to test + hypervisor_type = kvm + + def __init__(self): + self.testdata = { + TestData.kvm: { + TestData.username: "root", + TestData.password: "P@ssword123" + }, + TestData.account: { + "email": "test1@test1.com", + "firstname": "John", + "lastname": "Doe", + "username": "test1", + "password": "test" + }, + TestData.testAccount: { + "email": "test2@test2.com", + "firstname": "Jane", + "lastname": "Doe", + "username": "test2", + "password": "test" + }, + TestData.user: { + "email": "user@test1.com", + "firstname": "Jane", + "lastname": "Doe", + "username": "test1user", + "password": "password" + }, + TestData.primaryStorage: { + "name": "PowerFlexPool-%d" % random.randint(0, 100), + TestData.scope: "ZONE", + "url": "powerflex://admin:P%40ssword123@10.10.2.130/cspool", + TestData.provider: "PowerFlex", + TestData.tags: TestData.storageTag, + TestData.hypervisor: "KVM" + }, + TestData.virtualMachine: { + "name": "TestVM1", + "displayname": "Test VM1" + }, + TestData.virtualMachine2: { + "name": "TestVM2", + "displayname": "Test VM 2" + }, + TestData.computeOffering: { + "name": "PowerFlex_Compute", + "displaytext": "PowerFlex_Compute", + "cpunumber": 1, + "cpuspeed": 500, + "memory": 512, + "storagetype": "shared", + TestData.tags: TestData.storageTag + }, + TestData.diskOffering: { + "name": "PowerFlex_Disk", + "displaytext": "PowerFlex_Disk", + "disksize": 8, + TestData.tags: TestData.storageTag, + "storagetype": "shared" + }, + TestData.volume_1: { + TestData.diskName: "test-volume-1", + }, + TestData.volume_2: { + TestData.diskName: "test-volume-2", + }, + TestData.zoneId: 1, + TestData.clusterId: 1, + TestData.domainId: 1, + TestData.url: "10.10.3.226" + } + + +class TestScaleIOVolumes(cloudstackTestCase): + _volume_vm_id_and_vm_id_do_not_match_err_msg = "The volume's VM ID and the VM's ID do not match." + _vm_not_in_running_state_err_msg = "The VM is not in the 'Running' state." + _vm_not_in_stopped_state_err_msg = "The VM is not in the 'Stopped' state." + + @classmethod + def setUpClass(cls): + # Set up API client + testclient = super(TestScaleIOVolumes, cls).getClsTestClient() + + cls.apiClient = testclient.getApiClient() + cls.configData = testclient.getParsedTestDataConfig() + cls.dbConnection = testclient.getDbConnection() + cls.testdata = TestData().testdata + + # Get Resources from Cloud Infrastructure + cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) + cls.cluster = list_clusters(cls.apiClient)[0] + cls.template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type) + cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) + + # Create test account + cls.account = Account.create( + cls.apiClient, + cls.testdata["account"], + admin=1 + ) + + # Set up connection to make customized API calls + cls.user = User.create( + cls.apiClient, + cls.testdata["user"], + account=cls.account.name, + domainid=cls.domain.id + ) + + url = cls.testdata[TestData.url] + + primarystorage = cls.testdata[TestData.primaryStorage] + + cls.primary_storage = StoragePool.create( + cls.apiClient, + primarystorage, + scope=primarystorage[TestData.scope], + zoneid=cls.zone.id, + provider=primarystorage[TestData.provider], + tags=primarystorage[TestData.tags], + hypervisor=primarystorage[TestData.hypervisor] + ) + + cls.compute_offering = ServiceOffering.create( + cls.apiClient, + cls.testdata[TestData.computeOffering] + ) + + cls.disk_offering = DiskOffering.create( + cls.apiClient, + cls.testdata[TestData.diskOffering] + ) + + # Create VM and volume for tests + cls.virtual_machine = VirtualMachine.create( + cls.apiClient, + cls.testdata[TestData.virtualMachine], + accountid=cls.account.name, + zoneid=cls.zone.id, + serviceofferingid=cls.compute_offering.id, + templateid=cls.template.id, + domainid=cls.domain.id, + startvm=False + ) + + TestScaleIOVolumes._start_vm(cls.virtual_machine) + + cls.volume = Volume.create( + cls.apiClient, + cls.testdata[TestData.volume_1], + account=cls.account.name, + domainid=cls.domain.id, + zoneid=cls.zone.id, + diskofferingid=cls.disk_offering.id + ) + + # Resources that are to be destroyed + cls._cleanup = [ + cls.volume, + cls.virtual_machine, + cls.compute_offering, + cls.disk_offering, + cls.user, + cls.account + ] + + @classmethod + def tearDownClass(cls): + try: + cleanup_resources(cls.apiClient, cls._cleanup) + + cls.primary_storage.delete(cls.apiClient) + + except Exception as e: + logging.debug("Exception in tearDownClass(cls): %s" % e) + + def setUp(self): + self.attached = False + self.cleanup = [] + + def tearDown(self): + if self.attached: + self.virtual_machine.detach_volume(self.apiClient, self.volume) + + cleanup_resources(self.apiClient, self.cleanup) + + def test_01_create_vm_with_volume(self): + '''Create VM with attached volume and expunge VM''' + + ####################################### + # STEP 1: Create VM and attach volume # + ####################################### + + test_virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine2], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=False + ) + + TestScaleIOVolumes._start_vm(test_virtual_machine) + + self.volume = test_virtual_machine.attach_volume( + self.apiClient, + self.volume + ) + + self.attached = True + + vm = self._get_vm(test_virtual_machine.id) + + self.assertEqual( + self.volume.virtualmachineid, + vm.id, + TestScaleIOVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'running', + TestScaleIOVolumes._vm_not_in_running_state_err_msg + ) + + ####################################### + # STEP 2: Destroy and Expunge VM # + ####################################### + + test_virtual_machine.delete(self.apiClient, True) + + self.attached = False + + vol = self._get_volume(self.volume.id) + + self.assertEqual( + vol.virtualmachineid, + None, + "Check if attached to virtual machine" + ) + + self.assertEqual( + vol.vmname, + None, + "Check if VM was expunged" + ) + + list_virtual_machine_response = list_virtual_machines( + self.apiClient, + id=test_virtual_machine.id + ) + + self.assertEqual( + list_virtual_machine_response, + None, + "Check if VM was actually expunged" + ) + + def test_02_attach_new_volume_to_stopped_vm(self): + '''Attach a volume to a stopped virtual machine, then start VM''' + + self.virtual_machine.stop(self.apiClient) + + new_volume = Volume.create( + self.apiClient, + self.testdata[TestData.volume_2], + account=self.account.name, + domainid=self.domain.id, + zoneid=self.zone.id, + diskofferingid=self.disk_offering.id + ) + + self.cleanup.append(new_volume) + + new_volume = self.virtual_machine.attach_volume( + self.apiClient, + new_volume + ) + + TestScaleIOVolumes._start_vm(self.virtual_machine) + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + vm.state.lower(), + "running", + TestScaleIOVolumes._vm_not_in_running_state_err_msg + ) + + # Detach volume + new_volume = self.virtual_machine.detach_volume( + self.apiClient, + new_volume + ) + + self.assertEqual( + new_volume.virtualmachineid, + None, + "The volume should not be attached to a VM." + ) + + def test_03_attach_detach_attach_volume_to_vm(self): + '''Attach, detach, and attach volume to a running VM''' + + TestScaleIOVolumes._start_vm(self.virtual_machine) + + ####################################### + # STEP 1: Attach volume to running VM # + ####################################### + + self.volume = self.virtual_machine.attach_volume( + self.apiClient, + self.volume + ) + + self.attached = True + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + self.volume.virtualmachineid, + vm.id, + TestScaleIOVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'running', + TestScaleIOVolumes._vm_not_in_running_state_err_msg + ) + + ######################################### + # STEP 2: Detach volume from running VM # + ######################################### + + self.volume = self.virtual_machine.detach_volume( + self.apiClient, + self.volume + ) + + self.attached = False + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + self.volume.virtualmachineid, + None, + "The volume should not be attached to a VM." + ) + + self.assertEqual( + vm.state.lower(), + 'running', + str(vm.state) + ) + + ####################################### + # STEP 3: Attach volume to running VM # + ####################################### + + self.volume = self.virtual_machine.attach_volume( + self.apiClient, + self.volume + ) + + self.attached = True + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + self.volume.virtualmachineid, + vm.id, + TestScaleIOVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'running', + TestScaleIOVolumes._vm_not_in_running_state_err_msg + ) + + def test_04_detach_vol_stopped_vm_start(self): + '''Detach volume from a stopped VM, then start.''' + + TestScaleIOVolumes._start_vm(self.virtual_machine) + + ####################################### + # STEP 1: Attach volume to running VM # + ####################################### + + self.volume = self.virtual_machine.attach_volume( + self.apiClient, + self.volume + ) + + self.attached = True + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + self.volume.virtualmachineid, + vm.id, + TestScaleIOVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'running', + TestScaleIOVolumes._vm_not_in_running_state_err_msg + ) + + ######################################### + # STEP 2: Detach volume from stopped VM # + ######################################### + + self.virtual_machine.stop(self.apiClient) + + self.volume = self.virtual_machine.detach_volume( + self.apiClient, + self.volume + ) + + self.attached = False + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + self.volume.virtualmachineid, + None, + "The volume should not be attached to a VM." + ) + + self.assertEqual( + vm.state.lower(), + 'stopped', + TestScaleIOVolumes._vm_not_in_stopped_state_err_msg + ) + + ####################################### + # STEP 3: Start VM with detached vol # + ####################################### + + TestScaleIOVolumes._start_vm(self.virtual_machine) + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + vm.state.lower(), + 'running', + TestScaleIOVolumes._vm_not_in_running_state_err_msg + ) + + def test_05_attach_volume_to_stopped_vm(self): + '''Attach a volume to a stopped virtual machine, then start VM''' + + self.virtual_machine.stop(self.apiClient) + + ####################################### + # STEP 1: Attach volume to stopped VM # + ####################################### + + self.volume = self.virtual_machine.attach_volume( + self.apiClient, + self.volume + ) + + self.attached = True + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + self.volume.virtualmachineid, + vm.id, + TestScaleIOVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'stopped', + TestScaleIOVolumes._vm_not_in_stopped_state_err_msg + ) + + TestScaleIOVolumes._start_vm(self.virtual_machine) + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + self.volume.virtualmachineid, + vm.id, + TestScaleIOVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'running', + TestScaleIOVolumes._vm_not_in_running_state_err_msg + ) + + def test_06_attached_volume_reboot_vm(self): + '''Attach volume to running VM, then reboot.''' + + TestScaleIOVolumes._start_vm(self.virtual_machine) + + ####################################### + # STEP 1: Attach volume to running VM # + ####################################### + + self.volume = self.virtual_machine.attach_volume( + self.apiClient, + self.volume + ) + + self.attached = True + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + self.volume.virtualmachineid, + vm.id, + TestScaleIOVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'running', + TestScaleIOVolumes._vm_not_in_running_state_err_msg + ) + + ####################################### + # STEP 2: Reboot VM with attached vol # + ####################################### + TestScaleIOVolumes._reboot_vm(self.virtual_machine) + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + vm.state.lower(), + 'running', + TestScaleIOVolumes._vm_not_in_running_state_err_msg + ) + + def test_07_detach_volume_reboot_vm(self): + '''Detach volume from a running VM, then reboot.''' + + TestScaleIOVolumes._start_vm(self.virtual_machine) + + ####################################### + # STEP 1: Attach volume to running VM # + ####################################### + + self.volume = self.virtual_machine.attach_volume( + self.apiClient, + self.volume + ) + + self.attached = True + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + self.volume.virtualmachineid, + vm.id, + TestScaleIOVolumes._volume_vm_id_and_vm_id_do_not_match_err_msg + ) + + self.assertEqual( + vm.state.lower(), + 'running', + TestScaleIOVolumes._vm_not_in_running_state_err_msg + ) + + ######################################### + # STEP 2: Detach volume from running VM # + ######################################### + + self.volume = self.virtual_machine.detach_volume( + self.apiClient, + self.volume + ) + + self.attached = False + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + self.volume.virtualmachineid, + None, + "The volume should not be attached to a VM." + ) + + self.assertEqual( + vm.state.lower(), + 'running', + TestScaleIOVolumes._vm_not_in_running_state_err_msg + ) + + ####################################### + # STEP 3: Reboot VM with detached vol # + ####################################### + + self.virtual_machine.reboot(self.apiClient) + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + vm.state.lower(), + 'running', + TestScaleIOVolumes._vm_not_in_running_state_err_msg + ) + + def test_08_delete_volume_was_attached(self): + '''Delete volume that was attached to a VM and is detached now''' + + TestScaleIOVolumes._start_vm(self.virtual_machine) + + ####################################### + # STEP 1: Create vol and attach to VM # + ####################################### + + new_volume = Volume.create( + self.apiClient, + self.testdata[TestData.volume_2], + account=self.account.name, + domainid=self.domain.id, + zoneid=self.zone.id, + diskofferingid=self.disk_offering.id + ) + + volume_to_delete_later = new_volume + + new_volume = self.virtual_machine.attach_volume( + self.apiClient, + new_volume + ) + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + new_volume.virtualmachineid, + vm.id, + "Check if attached to virtual machine" + ) + + self.assertEqual( + vm.state.lower(), + 'running', + str(vm.state) + ) + + ####################################### + # STEP 2: Detach and delete volume # + ####################################### + + new_volume = self.virtual_machine.detach_volume( + self.apiClient, + new_volume + ) + + vm = self._get_vm(self.virtual_machine.id) + + self.assertEqual( + new_volume.virtualmachineid, + None, + "Check if attached to virtual machine" + ) + + self.assertEqual( + vm.state.lower(), + 'running', + str(vm.state) + ) + + volume_to_delete_later.delete(self.apiClient) + + list_volumes_response = list_volumes( + self.apiClient, + id=new_volume.id + ) + + self.assertEqual( + list_volumes_response, + None, + "Check volume was deleted" + ) + + + def _create_vm_using_template_and_destroy_vm(self, template): + vm_name = "VM-%d" % random.randint(0, 100) + + virtual_machine_dict = {"name": vm_name, "displayname": vm_name} + + virtual_machine = VirtualMachine.create( + self.apiClient, + virtual_machine_dict, + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=template.id, + domainid=self.domain.id, + startvm=True + ) + + list_volumes_response = list_volumes( + self.apiClient, + virtualmachineid=virtual_machine.id, + listall=True + ) + + vm_root_volume = list_volumes_response[0] + vm_root_volume_name = vm_root_volume.name + + virtual_machine.delete(self.apiClient, True) + + def _get_bytes_from_gb(self, number_in_gb): + return number_in_gb * 1024 * 1024 * 1024 + + def _get_volume(self, volume_id): + list_vols_response = list_volumes(self.apiClient, id=volume_id) + return list_vols_response[0] + + def _get_vm(self, vm_id): + list_vms_response = list_virtual_machines(self.apiClient, id=vm_id) + return list_vms_response[0] + + def _get_template_cache_name(self): + if TestData.hypervisor_type == TestData.kvm: + return TestData.templateCacheNameKvm + + self.assert_(False, "Invalid hypervisor type") + + + @classmethod + def _start_vm(cls, vm): + vm_for_check = list_virtual_machines( + cls.apiClient, + id=vm.id + )[0] + + if vm_for_check.state == VirtualMachine.STOPPED: + vm.start(cls.apiClient) + + # For KVM, just give it 90 seconds to boot up. + if TestData.hypervisor_type == TestData.kvm: + time.sleep(90) + + @classmethod + def _reboot_vm(cls, vm): + vm.reboot(cls.apiClient) + + # For KVM, just give it 90 seconds to boot up. + if TestData.hypervisor_type == TestData.kvm: + time.sleep(90) diff --git a/utils/pom.xml b/utils/pom.xml index 8e84f9951be6..4eee66dc65aa 100755 --- a/utils/pom.xml +++ b/utils/pom.xml @@ -181,6 +181,7 @@ org.apache.commons commons-compress + ${cs.commons-compress.version} diff --git a/utils/src/main/java/com/cloud/utils/SerialVersionUID.java b/utils/src/main/java/com/cloud/utils/SerialVersionUID.java index 21fdbb4cf111..363248c99a96 100644 --- a/utils/src/main/java/com/cloud/utils/SerialVersionUID.java +++ b/utils/src/main/java/com/cloud/utils/SerialVersionUID.java @@ -70,4 +70,5 @@ public interface SerialVersionUID { public static final long SnapshotBackupException = Base | 0x2e; public static final long UnavailableCommandException = Base | 0x2f; public static final long OriginDeniedException = Base | 0x30; + public static final long StorageAccessException = Base | 0x31; } diff --git a/utils/src/main/java/com/cloud/utils/storage/QCOW2Utils.java b/utils/src/main/java/com/cloud/utils/storage/QCOW2Utils.java index 3e08bd6634ed..b45d5b4e3835 100644 --- a/utils/src/main/java/com/cloud/utils/storage/QCOW2Utils.java +++ b/utils/src/main/java/com/cloud/utils/storage/QCOW2Utils.java @@ -19,14 +19,27 @@ package com.cloud.utils.storage; +import java.io.BufferedInputStream; import java.io.IOException; import java.io.InputStream; +import java.net.MalformedURLException; +import java.net.URL; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; + +import org.apache.commons.compress.compressors.CompressorException; +import org.apache.commons.compress.compressors.CompressorInputStream; +import org.apache.commons.compress.compressors.CompressorStreamFactory; +import org.apache.log4j.Logger; import com.cloud.utils.NumbersUtil; public final class QCOW2Utils { + public static final Logger LOGGER = Logger.getLogger(QCOW2Utils.class.getName()); + private static final int VIRTUALSIZE_HEADER_LOCATION = 24; private static final int VIRTUALSIZE_HEADER_LENGTH = 8; + private static final int MAGIC_HEADER_LENGTH = 4; /** * Private constructor -> This utility class cannot be instantiated. @@ -57,4 +70,55 @@ public static long getVirtualSize(InputStream inputStream) throws IOException { return NumbersUtil.bytesToLong(bytes); } + + public static long getVirtualSize(String urlStr) { + InputStream inputStream = null; + + try { + URL url = new URL(urlStr); + BufferedInputStream bufferedInputStream = new BufferedInputStream(url.openStream()); + inputStream = bufferedInputStream; + + try { + CompressorInputStream compressorInputStream = new CompressorStreamFactory().createCompressorInputStream(bufferedInputStream); + inputStream = compressorInputStream; + } catch (CompressorException e) { + LOGGER.warn(e.getMessage()); + inputStream = bufferedInputStream; + } + + byte[] inputBytes = inputStream.readNBytes(VIRTUALSIZE_HEADER_LOCATION + VIRTUALSIZE_HEADER_LENGTH); + + ByteBuffer inputMagicBytes = ByteBuffer.allocate(MAGIC_HEADER_LENGTH); + inputMagicBytes.put(inputBytes, 0, MAGIC_HEADER_LENGTH); + + ByteBuffer qcow2MagicBytes = ByteBuffer.allocate(MAGIC_HEADER_LENGTH); + qcow2MagicBytes.put("QFI".getBytes(Charset.forName("UTF-8"))); + qcow2MagicBytes.put((byte)0xfb); + + long virtualSize = 0L; + // Validate the header magic bytes + if (qcow2MagicBytes.compareTo(inputMagicBytes) == 0) { + ByteBuffer virtualSizeBytes = ByteBuffer.allocate(VIRTUALSIZE_HEADER_LENGTH); + virtualSizeBytes.put(inputBytes, VIRTUALSIZE_HEADER_LOCATION, VIRTUALSIZE_HEADER_LENGTH); + virtualSize = virtualSizeBytes.getLong(0); + } + + return virtualSize; + } catch (MalformedURLException e) { + LOGGER.warn("Failed to validate for qcow2, malformed URL: " + urlStr + ", error: " + e.getMessage()); + throw new IllegalArgumentException("Invalid URL: " + urlStr); + } catch (IOException e) { + LOGGER.warn("Failed to validate for qcow2, error: " + e.getMessage()); + throw new IllegalArgumentException("Failed to connect URL: " + urlStr); + } finally { + if (inputStream != null) { + try { + inputStream.close(); + } catch (final IOException e) { + LOGGER.warn("Failed to close input stream due to: " + e.getMessage()); + } + } + } + } } \ No newline at end of file From 6085c064c1f6dbf5f5d828fc2d5ca166ed2e2a9e Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Mon, 7 Dec 2020 19:33:17 +0530 Subject: [PATCH 02/12] Addressed some issues for few operations on PowerFlex storage pool. - Updated migration volume operation to sync the status and wait for migration to complete. - Updated VM Snapshot naming, for uniqueness in ScaleIO volume name when more than one volume exists in the VM. - Added sync lock while spooling managed storage template before volume creation from the template (non-direct download). - Updated resize volume error message string. - Blocked the below operations on PowerFlex storage pool: -> Extract Volume -> Create Snapshot for VMSnapshot --- .../vmsnapshot/ScaleIOVMSnapshotStrategy.java | 2 +- .../storage/volume/VolumeServiceImpl.java | 46 ++++++++--- .../storage/datastore/api/VTree.java | 39 ++++++++++ .../datastore/api/VTreeMigrationInfo.java | 76 +++++++++++++++++++ .../client/ScaleIOGatewayClient.java | 2 +- .../client/ScaleIOGatewayClientImpl.java | 57 +++++++++++++- .../driver/ScaleIOPrimaryDataStoreDriver.java | 3 +- .../cloud/storage/VolumeApiServiceImpl.java | 13 +++- 8 files changed, 219 insertions(+), 19 deletions(-) create mode 100644 plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/VTree.java create mode 100644 plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/VTreeMigrationInfo.java diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java index 985eeedb205b..0708f3df3415 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java @@ -140,7 +140,7 @@ public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { long prev_chain_size = 0; long virtual_size=0; for (VolumeObjectTO volume : volumeTOs) { - String volumeSnapshotName = String.format("%s-%s-%s-%s", ScaleIOUtil.VMSNAPSHOT_PREFIX, vmSnapshotVO.getId(), + String volumeSnapshotName = String.format("%s-%s-%s-%s-%s", ScaleIOUtil.VMSNAPSHOT_PREFIX, vmSnapshotVO.getId(), volume.getId(), storagePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value()); srcVolumeDestSnapshotMap.put(volume.getPath(), volumeSnapshotName); diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 9f5fcac7d927..35df85fbbc84 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -1398,25 +1398,47 @@ public AsyncCallFuture createManagedStorageVolumeFromTemplateAs if (storageCanCloneVolume && computeSupportsVolumeClone) { s_logger.debug("Storage " + destDataStoreId + " can support cloning using a cached template and compute side is OK with volume cloning."); - TemplateInfo templateOnPrimary = destPrimaryDataStore.getTemplate(srcTemplateInfo.getId(), null); + GlobalLock lock = null; + TemplateInfo templateOnPrimary = null; - if (templateOnPrimary == null) { - templateOnPrimary = createManagedTemplateVolume(srcTemplateInfo, destPrimaryDataStore); + try { + String tmplIdManagedPoolIdLockString = "tmplId:" + srcTemplateInfo.getId() + "managedPoolId:" + destDataStoreId; + lock = GlobalLock.getInternLock(tmplIdManagedPoolIdLockString); + if (lock == null) { + throw new CloudRuntimeException("Unable to create managed storage template/volume, couldn't get global lock on " + tmplIdManagedPoolIdLockString); + } + + int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600); + if (!lock.lock(storagePoolMaxWaitSeconds)) { + s_logger.debug("Unable to create managed storage template/volume, couldn't lock on " + tmplIdManagedPoolIdLockString); + throw new CloudRuntimeException("Unable to create managed storage template/volume, couldn't lock on " + tmplIdManagedPoolIdLockString); + } + + templateOnPrimary = destPrimaryDataStore.getTemplate(srcTemplateInfo.getId(), null); if (templateOnPrimary == null) { - throw new CloudRuntimeException("Failed to create template " + srcTemplateInfo.getUniqueName() + " on primary storage: " + destDataStoreId); + templateOnPrimary = createManagedTemplateVolume(srcTemplateInfo, destPrimaryDataStore); + + if (templateOnPrimary == null) { + throw new CloudRuntimeException("Failed to create template " + srcTemplateInfo.getUniqueName() + " on primary storage: " + destDataStoreId); + } } - } - // Copy the template to the template volume. - VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId(), null); + // Copy the template to the template volume. + VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId(), null); - if (templatePoolRef == null) { - throw new CloudRuntimeException("Failed to find template " + srcTemplateInfo.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId()); - } + if (templatePoolRef == null) { + throw new CloudRuntimeException("Failed to find template " + srcTemplateInfo.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId()); + } - if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { - copyTemplateToManagedTemplateVolume(srcTemplateInfo, templateOnPrimary, templatePoolRef, destPrimaryDataStore, destHost); + if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { + copyTemplateToManagedTemplateVolume(srcTemplateInfo, templateOnPrimary, templatePoolRef, destPrimaryDataStore, destHost); + } + } finally { + if (lock != null) { + lock.unlock(); + lock.releaseRef(); + } } if (destPrimaryDataStore.getPoolType() != StoragePoolType.PowerFlex) { diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/VTree.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/VTree.java new file mode 100644 index 000000000000..824a4c5496c1 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/VTree.java @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +public class VTree { + String storagePoolId; + VTreeMigrationInfo vtreeMigrationInfo; + + public String getStoragePoolId() { + return storagePoolId; + } + + public void setStoragePoolId(String storagePoolId) { + this.storagePoolId = storagePoolId; + } + + public VTreeMigrationInfo getVTreeMigrationInfo() { + return vtreeMigrationInfo; + } + + public void setVTreeMigrationInfo(VTreeMigrationInfo vtreeMigrationInfo) { + this.vtreeMigrationInfo = vtreeMigrationInfo; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/VTreeMigrationInfo.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/VTreeMigrationInfo.java new file mode 100644 index 000000000000..f4e926bfd33f --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/VTreeMigrationInfo.java @@ -0,0 +1,76 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +import com.cloud.utils.EnumUtils; + +public class VTreeMigrationInfo { + public enum MigrationStatus { + NotInMigration, + MigrationNormal, + PendingRetry, + InternalPausing, + GracefullyPausing, + ForcefullyPausing, + Paused, + PendingMigration, + PendingRebalance, + None + } + + String sourceStoragePoolId; + String destinationStoragePoolId; + MigrationStatus migrationStatus; + Long migrationQueuePosition; + + public String getSourceStoragePoolId() { + return sourceStoragePoolId; + } + + public void setSourceStoragePoolId(String sourceStoragePoolId) { + this.sourceStoragePoolId = sourceStoragePoolId; + } + + public String getDestinationStoragePoolId() { + return destinationStoragePoolId; + } + + public void setDestinationStoragePoolId(String destinationStoragePoolId) { + this.destinationStoragePoolId = destinationStoragePoolId; + } + + public MigrationStatus getMigrationStatus() { + return migrationStatus; + } + + public void setMigrationStatus(String migrationStatus) { + this.migrationStatus = EnumUtils.fromString(MigrationStatus.class, migrationStatus, MigrationStatus.None); + } + + public void setMigrationStatus(MigrationStatus migrationStatus) { + this.migrationStatus = migrationStatus; + } + + public Long getMigrationQueuePosition() { + return migrationQueuePosition; + } + + public void setMigrationQueuePosition(Long migrationQueuePosition) { + this.migrationQueuePosition = migrationQueuePosition; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClient.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClient.java index a1e69bae8c26..f6b10f888320 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClient.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClient.java @@ -55,7 +55,7 @@ Volume createVolume(final String name, final String storagePoolId, Volume resizeVolume(final String volumeId, final Integer sizeInGb); Volume cloneVolume(final String sourceVolumeId, final String destVolumeName); boolean deleteVolume(final String volumeId); - boolean migrateVolume(final String srcVolumeId, final String destPoolId); + boolean migrateVolume(final String srcVolumeId, final String destPoolId, final int timeoutInSecs); boolean mapVolumeToSdc(final String volumeId, final String sdcId); boolean mapVolumeToSdcWithLimits(final String volumeId, final String sdcId, final Long iopsLimit, final Long bandwidthLimitInKbps); diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java index 6baf46532290..5cc37699aa3e 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java @@ -44,6 +44,8 @@ import org.apache.cloudstack.storage.datastore.api.SnapshotGroup; import org.apache.cloudstack.storage.datastore.api.StoragePool; import org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics; +import org.apache.cloudstack.storage.datastore.api.VTree; +import org.apache.cloudstack.storage.datastore.api.VTreeMigrationInfo; import org.apache.cloudstack.storage.datastore.api.Volume; import org.apache.cloudstack.storage.datastore.api.VolumeStatistics; import org.apache.cloudstack.utils.security.SSLUtils; @@ -744,9 +746,10 @@ public boolean deleteVolume(final String volumeId) { } @Override - public boolean migrateVolume(final String srcVolumeId, final String destPoolId) { + public boolean migrateVolume(final String srcVolumeId, final String destPoolId, final int timeoutInSecs) { Preconditions.checkArgument(!Strings.isNullOrEmpty(srcVolumeId), "src volume id cannot be null"); Preconditions.checkArgument(!Strings.isNullOrEmpty(destPoolId), "dest pool id cannot be null"); + Preconditions.checkArgument(timeoutInSecs > 0, "timeout must be greater than 0"); HttpResponse response = null; try { @@ -754,7 +757,7 @@ public boolean migrateVolume(final String srcVolumeId, final String destPoolId) "/instances/Volume::" + srcVolumeId + "/action/migrateVTree", String.format("{\"destSPId\":\"%s\"}", destPoolId)); checkResponseOK(response); - return true; + return waitForVolumeMigrationToComplete(srcVolumeId, timeoutInSecs); } catch (final IOException e) { LOG.error("Failed to migrate PowerFlex volume due to:", e); checkResponseTimeOut(e); @@ -766,6 +769,56 @@ public boolean migrateVolume(final String srcVolumeId, final String destPoolId) return false; } + private boolean waitForVolumeMigrationToComplete(final String volumeId, int waitTimeInSec) { + LOG.debug("Waiting for the migration to complete for the volume " + volumeId); + Volume volume = getVolume(volumeId); + if (volume == null || Strings.isNullOrEmpty(volume.getVtreeId())) { + LOG.warn("Failed to get volume details, unable to check the migration status for the volume " + volumeId); + return false; + } + + String volumeTreeId = volume.getVtreeId(); + while (waitTimeInSec > 0) { + VTreeMigrationInfo.MigrationStatus migrationStatus = getVolumeTreeMigrationStatus(volumeTreeId); + if (migrationStatus != null && migrationStatus == VTreeMigrationInfo.MigrationStatus.NotInMigration) { + LOG.debug("Migration completed for the volume " + volumeId); + return true; + } + + waitTimeInSec--; + + try { + Thread.sleep(1000); // Try every sec and return after migration is complete + } catch (Exception ex) { + // don't do anything + } + } + + LOG.debug("Unable to complete the migration for the volume " + volumeId); + return false; + } + + private VTreeMigrationInfo.MigrationStatus getVolumeTreeMigrationStatus(final String volumeTreeId) { + HttpResponse response = null; + try { + response = get("/instances/VTree::" + volumeTreeId); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + VTree volumeTree = mapper.readValue(response.getEntity().getContent(), VTree.class); + if (volumeTree != null && volumeTree.getVTreeMigrationInfo() != null) { + return volumeTree.getVTreeMigrationInfo().getMigrationStatus(); + } + } catch (final IOException e) { + LOG.error("Failed to migrate PowerFlex volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + /////////////////////////////////////////////////////// //////////////// StoragePool APIs ///////////////////// /////////////////////////////////////////////////////// diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java index d64c4088d7a7..f31d62b4f8f4 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java @@ -669,7 +669,8 @@ private Answer migrateVolume(DataObject srcData, DataObject destData) { final String srcVolumeId = ((VolumeInfo) srcData).getPath(); final StoragePoolVO destStoragePool = storagePoolDao.findById(destPoolId); final String destStoragePoolId = destStoragePool.getPath(); - boolean migrateStatus = client.migrateVolume(srcVolumeId, destStoragePoolId); + int migrationTimeout = StorageManager.KvmStorageOfflineMigrationWait.value(); + boolean migrateStatus = client.migrateVolume(srcVolumeId, destStoragePoolId, migrationTimeout); if (migrateStatus) { if (srcData.getId() != destData.getId()) { VolumeVO destVolume = volumeDao.findById(destData.getId()); diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index fb50b25e81ba..7d7c849bb28e 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -1336,9 +1336,8 @@ private VolumeVO orchestrateResizeVolume(long volumeId, long currentSize, long n volume.getDiskOfferingId(), volume.getTemplateId(), volume.getSize(), Volume.class.getName(), volume.getUuid()); return volume; - } catch (Exception e) { - throw new CloudRuntimeException("Exception caught during resize volume operation of volume UUID: " + volume.getUuid(), e); + throw new CloudRuntimeException("Couldn't resize volume: " + volume.getName() + ", " + e.getMessage(), e); } } @@ -2790,6 +2789,10 @@ public Snapshot allocSnapshotForVm(Long vmId, Long volumeId, String snapshotName throw new InvalidParameterValueException("VolumeId: " + volumeId + " please attach this volume to a VM before create snapshot for it"); } + if (storagePool.getPoolType() == Storage.StoragePoolType.PowerFlex) { + throw new InvalidParameterValueException("Cannot perform this operation, unsupported on storage pool type " + storagePool.getPoolType()); + } + return snapshotMgr.allocSnapshot(volumeId, Snapshot.MANUAL_POLICY_ID, snapshotName, null); } @@ -2820,7 +2823,13 @@ public String extractVolume(ExtractVolumeCmd cmd) { } if (volume.getPoolId() == null) { throw new InvalidParameterValueException("The volume doesn't belong to a storage pool so can't extract it"); + } else { + StoragePoolVO poolVO = _storagePoolDao.findById(volume.getPoolId()); + if (poolVO != null && poolVO.getPoolType() == Storage.StoragePoolType.PowerFlex) { + throw new InvalidParameterValueException("Cannot extract volume, this operation is unsupported for volumes on storage pool type " + poolVO.getPoolType()); + } } + // Extract activity only for detached volumes or for volumes whose // instance is stopped if (volume.getInstanceId() != null && ApiDBUtils.findVMInstanceById(volume.getInstanceId()).getState() != State.Stopped) { From 5cce9623e7f352f018a351eef1c744ac753709ef Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Tue, 12 Jan 2021 18:35:13 +0530 Subject: [PATCH 03/12] Added the PowerFlex/ScaleIO client connection pool to manage the ScaleIO gateway clients, which uses a single gateway client per Powerflex/ScaleIO storage pool and renews it when the session token expires. - The token is valid for 8 hours from the time it was created, unless there has been no activity for 10 minutes. Reference: https://cpsdocs.dellemc.com/bundle/PF_REST_API_RG/page/GUID-92430F19-9F44-42B6-B898-87D5307AE59B.html Other fixes included: - Fail the VM deployment when the host specified in the deployVirtualMachine cmd is not in the right state (i.e. either Resource State is not Enabled or Status is not Up) - Use the physical file size of the template to check the free space availability on the host, while downloading the direct download templates. - Perform basic tests (for connectivity and file system) on router before updating the health check config data => Validate the basic tests (connectivity and file system check) on router => Cleanup the health check results when router is destroyed --- .../VirtualNetworkApplianceService.java | 3 +- .../GetRouterHealthCheckResultsCmd.java | 2 +- .../GetRouterMonitorResultsCommand.java | 8 +- .../VirtualRoutingResource.java | 44 ++-- .../com/cloud/storage/StorageManager.java | 2 +- .../vmsnapshot/ScaleIOVMSnapshotStrategy.java | 11 +- .../kvm/storage/KVMStorageProcessor.java | 11 +- .../ScaleIOGatewayClientConnectionPool.java | 90 ++++++++ .../client/ScaleIOGatewayClientImpl.java | 39 +++- .../driver/ScaleIOPrimaryDataStoreDriver.java | 10 +- .../ScaleIOPrimaryDataStoreLifeCycle.java | 24 +-- .../provider/ScaleIOHostListener.java | 15 +- .../ScaleIOPrimaryDataStoreLifeCycleTest.java | 19 +- .../com/cloud/api/query/QueryManagerImpl.java | 11 +- .../network/router/NetworkHelperImpl.java | 4 + .../VirtualNetworkApplianceManagerImpl.java | 192 +++++++++++------- .../java/com/cloud/vm/UserVmManagerImpl.java | 16 +- ...MockVpcVirtualNetworkApplianceManager.java | 5 +- 18 files changed, 345 insertions(+), 161 deletions(-) create mode 100644 plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientConnectionPool.java diff --git a/api/src/main/java/com/cloud/network/VirtualNetworkApplianceService.java b/api/src/main/java/com/cloud/network/VirtualNetworkApplianceService.java index 98fb8be7c7a9..8504efda5093 100644 --- a/api/src/main/java/com/cloud/network/VirtualNetworkApplianceService.java +++ b/api/src/main/java/com/cloud/network/VirtualNetworkApplianceService.java @@ -26,6 +26,7 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.router.VirtualRouter; import com.cloud.user.Account; +import com.cloud.utils.Pair; public interface VirtualNetworkApplianceService { /** @@ -73,5 +74,5 @@ public interface VirtualNetworkApplianceService { * @param routerId id of the router * @return */ - boolean performRouterHealthChecks(long routerId); + Pair performRouterHealthChecks(long routerId); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/GetRouterHealthCheckResultsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/GetRouterHealthCheckResultsCmd.java index 5efc6de9e948..dc1020b4a3b5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/router/GetRouterHealthCheckResultsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/router/GetRouterHealthCheckResultsCmd.java @@ -111,7 +111,7 @@ public void execute() throws ResourceUnavailableException, InvalidParameterValue setResponseObject(routerResponse); } catch (CloudRuntimeException ex){ ex.printStackTrace(); - throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to execute command due to exception: " + ex.getLocalizedMessage()); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to get health check results due to: " + ex.getLocalizedMessage()); } } } diff --git a/core/src/main/java/com/cloud/agent/api/routing/GetRouterMonitorResultsCommand.java b/core/src/main/java/com/cloud/agent/api/routing/GetRouterMonitorResultsCommand.java index 779a0f45a57f..e32dda369053 100644 --- a/core/src/main/java/com/cloud/agent/api/routing/GetRouterMonitorResultsCommand.java +++ b/core/src/main/java/com/cloud/agent/api/routing/GetRouterMonitorResultsCommand.java @@ -19,12 +19,14 @@ public class GetRouterMonitorResultsCommand extends NetworkElementCommand { private boolean performFreshChecks; + private boolean validateBasicTestsOnly; protected GetRouterMonitorResultsCommand() { } - public GetRouterMonitorResultsCommand(boolean performFreshChecks) { + public GetRouterMonitorResultsCommand(boolean performFreshChecks, boolean validateBasicTestsOnly) { this.performFreshChecks = performFreshChecks; + this.validateBasicTestsOnly = validateBasicTestsOnly; } @Override @@ -35,4 +37,8 @@ public boolean isQuery() { public boolean shouldPerformFreshChecks() { return performFreshChecks; } + + public boolean shouldValidateBasicTestsOnly() { + return validateBasicTestsOnly; + } } \ No newline at end of file diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java index 30293a1f84ab..9a55d3bc2fa8 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java @@ -66,6 +66,7 @@ import com.cloud.agent.resource.virtualnetwork.facade.AbstractConfigItemFacade; import com.cloud.utils.ExecutionResult; import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; /** @@ -311,20 +312,14 @@ private GetRouterMonitorResultsAnswer parseLinesForHealthChecks(GetRouterMonitor private GetRouterMonitorResultsAnswer execute(GetRouterMonitorResultsCommand cmd) { String routerIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP); - ExecutionResult fsReadOnlyResult = _vrDeployer.executeInVR(routerIp, VRScripts.ROUTER_FILESYSTEM_WRITABLE_CHECK, null); - if (!fsReadOnlyResult.isSuccess()) { - s_logger.warn("Result of " + cmd + " failed with details: " + fsReadOnlyResult.getDetails()); - if (StringUtils.isNotBlank(fsReadOnlyResult.getDetails())) { - final String readOnlyFileSystemError = "Read-only file system"; - if (fsReadOnlyResult.getDetails().contains(readOnlyFileSystemError)) { - return new GetRouterMonitorResultsAnswer(cmd, false, null, readOnlyFileSystemError); - } else { - return new GetRouterMonitorResultsAnswer(cmd, false, null, fsReadOnlyResult.getDetails()); - } - } else { - s_logger.warn("Result of " + cmd + " received empty details."); - return new GetRouterMonitorResultsAnswer(cmd, false, null, "No results available."); - } + Pair fileSystemTestResult = checkRouterFileSystem(routerIp); + if (!fileSystemTestResult.first()) { + return new GetRouterMonitorResultsAnswer(cmd, false, null, fileSystemTestResult.second()); + } + + if (cmd.shouldValidateBasicTestsOnly()) { + // Basic tests (connectivity and file system checks) are already validated + return new GetRouterMonitorResultsAnswer(cmd, true, null, "success"); } String args = cmd.shouldPerformFreshChecks() ? "true" : "false"; @@ -344,6 +339,27 @@ private GetRouterMonitorResultsAnswer execute(GetRouterMonitorResultsCommand cmd return parseLinesForHealthChecks(cmd, result.getDetails()); } + private Pair checkRouterFileSystem(String routerIp) { + ExecutionResult fileSystemWritableTestResult = _vrDeployer.executeInVR(routerIp, VRScripts.ROUTER_FILESYSTEM_WRITABLE_CHECK, null); + if (fileSystemWritableTestResult.isSuccess()) { + s_logger.debug("Router connectivity and file system writable check passed"); + return new Pair(true, "success"); + } + + String resultDetails = fileSystemWritableTestResult.getDetails(); + s_logger.warn("File system writable check failed with details: " + resultDetails); + if (StringUtils.isNotBlank(resultDetails)) { + final String readOnlyFileSystemError = "Read-only file system"; + if (resultDetails.contains(readOnlyFileSystemError)) { + resultDetails = "Read-only file system"; + } + } else { + resultDetails = "No results available"; + } + + return new Pair(false, resultDetails); + } + private GetRouterAlertsAnswer execute(GetRouterAlertsCommand cmd) { String routerIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP); diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index 39e8aec633a4..3532a45afa42 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -122,7 +122,7 @@ public interface StorageManager extends StorageService { "storage.pool.client.timeout", "Storage", "60", - "Timeout (in secs) for the storage pool client timeout (for managed pools). Currently only supported for PowerFlex.", + "Timeout (in secs) for the storage pool client connection timeout (for managed pools). Currently only supported for PowerFlex.", true, ConfigKey.Scope.StoragePool, null); diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java index 0708f3df3415..396096c14afc 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java @@ -31,6 +31,7 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.api.SnapshotGroup; import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; @@ -46,14 +47,12 @@ import com.cloud.server.ManagementServerImpl; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.Storage; -import com.cloud.storage.StorageManager; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.uservm.UserVm; import com.cloud.utils.NumbersUtil; import com.cloud.utils.component.ManagerBase; -import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; import com.cloud.utils.db.TransactionCallbackWithExceptionNoReturn; @@ -481,12 +480,6 @@ private void publishUsageEvent(String type, VMSnapshot vmSnapshot, UserVm userVm } private ScaleIOGatewayClient getScaleIOClient(final Long storagePoolId) throws Exception { - final int clientTimeout = StorageManager.STORAGE_POOL_CLIENT_TIMEOUT.valueIn(storagePoolId); - final String url = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.GATEWAY_API_ENDPOINT).getValue(); - final String encryptedUsername = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.GATEWAY_API_USERNAME).getValue(); - final String username = DBEncryptionUtil.decrypt(encryptedUsername); - final String encryptedPassword = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.GATEWAY_API_PASSWORD).getValue(); - final String password = DBEncryptionUtil.decrypt(encryptedPassword); - return ScaleIOGatewayClient.getClient(url, username, password, false, clientTimeout); + return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePoolId, storagePoolDetailsDao); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index f8ce8117dea6..7024eab68f4c 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -117,6 +117,7 @@ import com.cloud.storage.template.TemplateLocation; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; +import com.cloud.utils.UriUtils; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; import com.cloud.utils.storage.S3.S3Utils; @@ -1771,8 +1772,14 @@ public Answer handleDownloadTemplateToPrimaryStorage(DirectDownloadCommand cmd) return new DirectDownloadAnswer(false, msg, true); } - s_logger.debug("Checking for free space on the host for downloading the template"); - if (!isEnoughSpaceForDownloadTemplateOnTemporaryLocation(cmd.getTemplateSize())) { + Long templateSize = null; + if (!org.apache.commons.lang.StringUtils.isBlank(cmd.getUrl())) { + String url = cmd.getUrl(); + templateSize = UriUtils.getRemoteSize(url); + } + + s_logger.debug("Checking for free space on the host for downloading the template with physical size: " + templateSize + " and virtual size: " + cmd.getTemplateSize()); + if (!isEnoughSpaceForDownloadTemplateOnTemporaryLocation(templateSize)) { String msg = "Not enough space on the defined temporary location to download the template " + cmd.getTemplateId(); s_logger.error(msg); return new DirectDownloadAnswer(false, msg, true); diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientConnectionPool.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientConnectionPool.java new file mode 100644 index 000000000000..2daf8e4635ce --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientConnectionPool.java @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.client; + +import java.net.URISyntaxException; +import java.security.KeyManagementException; +import java.security.NoSuchAlgorithmException; +import java.util.concurrent.ConcurrentHashMap; + +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.log4j.Logger; + +import com.cloud.storage.StorageManager; +import com.cloud.utils.crypt.DBEncryptionUtil; +import com.google.common.base.Preconditions; + +public class ScaleIOGatewayClientConnectionPool { + private static final Logger LOGGER = Logger.getLogger(ScaleIOGatewayClientConnectionPool.class); + + private ConcurrentHashMap gatewayClients; + + private static final ScaleIOGatewayClientConnectionPool instance; + + static { + instance = new ScaleIOGatewayClientConnectionPool(); + } + + public static ScaleIOGatewayClientConnectionPool getInstance() { + return instance; + } + + private ScaleIOGatewayClientConnectionPool() { + gatewayClients = new ConcurrentHashMap(); + } + + public ScaleIOGatewayClient getClient(Long storagePoolId, StoragePoolDetailsDao storagePoolDetailsDao) + throws NoSuchAlgorithmException, KeyManagementException, URISyntaxException { + Preconditions.checkArgument(storagePoolId != null && storagePoolId > 0, "Invalid storage pool id"); + + ScaleIOGatewayClient client = null; + synchronized (gatewayClients) { + client = gatewayClients.get(storagePoolId); + if (client == null) { + final String url = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.GATEWAY_API_ENDPOINT).getValue(); + final String encryptedUsername = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.GATEWAY_API_USERNAME).getValue(); + final String username = DBEncryptionUtil.decrypt(encryptedUsername); + final String encryptedPassword = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.GATEWAY_API_PASSWORD).getValue(); + final String password = DBEncryptionUtil.decrypt(encryptedPassword); + final int clientTimeout = StorageManager.STORAGE_POOL_CLIENT_TIMEOUT.valueIn(storagePoolId); + + client = new ScaleIOGatewayClientImpl(url, username, password, false, clientTimeout); + gatewayClients.put(storagePoolId, client); + LOGGER.debug("Added gateway client for the storage pool: " + storagePoolId); + } + } + + return client; + } + + public boolean removeClient(Long storagePoolId) { + Preconditions.checkArgument(storagePoolId != null && storagePoolId > 0, "Invalid storage pool id"); + + ScaleIOGatewayClient client = null; + synchronized (gatewayClients) { + client = gatewayClients.remove(storagePoolId); + } + + if (client != null) { + LOGGER.debug("Removed gateway client for the storage pool: " + storagePoolId); + return true; + } + + return false; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java index 5cc37699aa3e..915c49e93568 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java @@ -88,6 +88,15 @@ public class ScaleIOGatewayClientImpl implements ScaleIOGatewayClient { private String password; private String sessionKey = null; + // The session token is valid for 8 hours from the time it was created, unless there has been no activity for 10 minutes + // Reference: https://cpsdocs.dellemc.com/bundle/PF_REST_API_RG/page/GUID-92430F19-9F44-42B6-B898-87D5307AE59B.html + private static final long MAX_VALID_SESSION_TIME_IN_MILLISECS = 8 * 60 * 60 * 1000; // 8 hrs + private static final long MAX_IDLE_TIME_IN_MILLISECS = 10 * 60 * 1000; // 10 mins + private static final long BUFFER_TIME_IN_MILLISECS = 30 * 1000; // keep 30 secs buffer before the expiration (to avoid any last-minute operations) + + private long createTime = 0; + private long lastUsedTime = 0; + public ScaleIOGatewayClientImpl(final String url, final String username, final String password, final boolean validateCertificate, final int timeout) throws NoSuchAlgorithmException, KeyManagementException, URISyntaxException { @@ -119,14 +128,14 @@ public ScaleIOGatewayClientImpl(final String url, final String username, final S this.username = username; this.password = password; - authenticate(username, password); + authenticate(); } ///////////////////////////////////////////////////////////// //////////////// Private Helper Methods ///////////////////// ///////////////////////////////////////////////////////////// - private void authenticate(final String username, final String password) { + private void authenticate() { final HttpGet request = new HttpGet(apiURI.toString() + "/login"); request.setHeader(HttpHeaders.AUTHORIZATION, "Basic " + Base64.getEncoder().encodeToString((username + ":" + password).getBytes())); try { @@ -143,6 +152,24 @@ private void authenticate(final String username, final String password) { } catch (final IOException e) { throw new CloudRuntimeException("Failed to authenticate PowerFlex API Gateway due to:" + e.getMessage()); } + long now = System.currentTimeMillis(); + createTime = lastUsedTime = now; + } + + private synchronized void renewClientSessionOnExpiry() { + if (isSessionExpired()) { + LOG.debug("Session expired, renewing"); + authenticate(); + } + } + + private boolean isSessionExpired() { + long now = System.currentTimeMillis() + BUFFER_TIME_IN_MILLISECS; + if ((now - createTime) > MAX_VALID_SESSION_TIME_IN_MILLISECS || + (now - lastUsedTime) > MAX_IDLE_TIME_IN_MILLISECS) { + return true; + } + return false; } private void checkAuthFailure(final HttpResponse response) { @@ -178,9 +205,13 @@ private void checkResponseTimeOut(final Exception e) { } private HttpResponse get(final String path) throws IOException { + renewClientSessionOnExpiry(); final HttpGet request = new HttpGet(apiURI.toString() + path); request.setHeader(HttpHeaders.AUTHORIZATION, "Basic " + Base64.getEncoder().encodeToString((this.username + ":" + this.sessionKey).getBytes())); final HttpResponse response = httpClient.execute(request); + synchronized (this) { + lastUsedTime = System.currentTimeMillis(); + } String responseStatus = (response != null) ? (response.getStatusLine().getStatusCode() + " " + response.getStatusLine().getReasonPhrase()) : "nil"; LOG.debug("GET request path: " + path + ", response: " + responseStatus); checkAuthFailure(response); @@ -188,6 +219,7 @@ private HttpResponse get(final String path) throws IOException { } private HttpResponse post(final String path, final Object obj) throws IOException { + renewClientSessionOnExpiry(); final HttpPost request = new HttpPost(apiURI.toString() + path); request.setHeader(HttpHeaders.AUTHORIZATION, "Basic " + Base64.getEncoder().encodeToString((this.username + ":" + this.sessionKey).getBytes())); request.setHeader("Content-type", "application/json"); @@ -202,6 +234,9 @@ private HttpResponse post(final String path, final Object obj) throws IOExceptio } } final HttpResponse response = httpClient.execute(request); + synchronized (this) { + lastUsedTime = System.currentTimeMillis(); + } String responseStatus = (response != null) ? (response.getStatusLine().getStatusCode() + " " + response.getStatusLine().getReasonPhrase()) : "nil"; LOG.debug("POST request path: " + path + ", response: " + responseStatus); checkAuthFailure(response); diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java index f31d62b4f8f4..5d7b38b93493 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java @@ -44,6 +44,7 @@ import org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics; import org.apache.cloudstack.storage.datastore.api.VolumeStatistics; import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; @@ -77,7 +78,6 @@ import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeDetailsDao; import com.cloud.utils.Pair; -import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachineManager; import com.google.common.base.Preconditions; @@ -110,13 +110,7 @@ public ScaleIOPrimaryDataStoreDriver() { } private ScaleIOGatewayClient getScaleIOClient(final Long storagePoolId) throws Exception { - final int clientTimeout = StorageManager.STORAGE_POOL_CLIENT_TIMEOUT.valueIn(storagePoolId); - final String url = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.GATEWAY_API_ENDPOINT).getValue(); - final String encryptedUsername = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.GATEWAY_API_USERNAME).getValue(); - final String username = DBEncryptionUtil.decrypt(encryptedUsername); - final String encryptedPassword = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.GATEWAY_API_PASSWORD).getValue(); - final String password = DBEncryptionUtil.decrypt(encryptedPassword); - return ScaleIOGatewayClient.getClient(url, username, password, false, clientTimeout); + return ScaleIOGatewayClientConnectionPool.getInstance().getClient(storagePoolId, storagePoolDetailsDao); } @Override diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java index a8a3b3dd6a5e..5c9ddea47526 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java @@ -31,6 +31,8 @@ import javax.inject.Inject; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; import org.apache.commons.collections.CollectionUtils; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; @@ -80,6 +82,8 @@ public class ScaleIOPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCyc @Inject private PrimaryDataStoreDao primaryDataStoreDao; @Inject + private StoragePoolDetailsDao storagePoolDetailsDao; + @Inject private StoragePoolHostDao storagePoolHostDao; @Inject private PrimaryDataStoreHelper dataStoreHelper; @@ -255,14 +259,7 @@ public boolean attachCluster(DataStore dataStore, ClusterScope scope) { List connectedSdcIps = null; try { - Map dataStoreDetails = primaryDataStoreDao.getDetails(dataStore.getId()); - final String url = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_ENDPOINT); - final String encryptedUsername = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_USERNAME); - final String username = DBEncryptionUtil.decrypt(encryptedUsername); - final String encryptedPassword = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_PASSWORD); - final String password = DBEncryptionUtil.decrypt(encryptedPassword); - final int clientTimeout = StorageManager.STORAGE_POOL_CLIENT_TIMEOUT.value(); - ScaleIOGatewayClient client = ScaleIOGatewayClient.getClient(url, username, password, false, clientTimeout); + ScaleIOGatewayClient client = ScaleIOGatewayClientConnectionPool.getInstance().getClient(dataStore.getId(), storagePoolDetailsDao); connectedSdcIps = client.listConnectedSdcIps(); } catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) { LOGGER.error("Failed to create storage pool", e); @@ -319,14 +316,7 @@ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.Hyper List connectedSdcIps = null; try { - Map dataStoreDetails = primaryDataStoreDao.getDetails(dataStore.getId()); - String url = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_ENDPOINT); - String encryptedUsername = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_USERNAME); - final String username = DBEncryptionUtil.decrypt(encryptedUsername); - String encryptedPassword = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_PASSWORD); - final String password = DBEncryptionUtil.decrypt(encryptedPassword); - final int clientTimeout = StorageManager.STORAGE_POOL_CLIENT_TIMEOUT.value(); - ScaleIOGatewayClient client = ScaleIOGatewayClient.getClient(url, username, password, false, clientTimeout); + ScaleIOGatewayClient client = ScaleIOGatewayClientConnectionPool.getInstance().getClient(dataStore.getId(), storagePoolDetailsDao); connectedSdcIps = client.listConnectedSdcIps(); } catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) { LOGGER.error("Failed to create storage pool", e); @@ -415,6 +405,8 @@ public boolean deleteDataStore(DataStore dataStore) { } } + ScaleIOGatewayClientConnectionPool.getInstance().removeClient(dataStore.getId()); + return dataStoreHelper.deletePrimaryDataStore(dataStore); } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java index e27f8bd2608a..f6722314a5cb 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java @@ -21,14 +21,15 @@ import java.net.URISyntaxException; import java.security.KeyManagementException; import java.security.NoSuchAlgorithmException; -import java.util.Map; import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; @@ -39,11 +40,9 @@ import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.storage.DataStoreRole; -import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.dao.StoragePoolHostDao; -import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.exception.CloudRuntimeException; public class ScaleIOHostListener implements HypervisorHostListener { @@ -55,6 +54,7 @@ public class ScaleIOHostListener implements HypervisorHostListener { @Inject private HostDao _hostDao; @Inject private StoragePoolHostDao _storagePoolHostDao; @Inject private PrimaryDataStoreDao _primaryDataStoreDao; + @Inject private StoragePoolDetailsDao _storagePoolDetailsDao; @Override public boolean hostAdded(long hostId) { @@ -90,14 +90,7 @@ public boolean hostConnect(long hostId, long poolId) { private boolean isHostSdcConnected(String hostIpAddress, long poolId) { try { - Map dataStoreDetails = _primaryDataStoreDao.getDetails(poolId); - final String url = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_ENDPOINT); - final String encryptedUsername = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_USERNAME); - final String username = DBEncryptionUtil.decrypt(encryptedUsername); - final String encryptedPassword = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_PASSWORD); - final String password = DBEncryptionUtil.decrypt(encryptedPassword); - final int clientTimeout = StorageManager.STORAGE_POOL_CLIENT_TIMEOUT.valueIn(poolId); - ScaleIOGatewayClient client = ScaleIOGatewayClient.getClient(url, username, password, false, clientTimeout); + ScaleIOGatewayClient client = ScaleIOGatewayClientConnectionPool.getInstance().getClient(poolId, _storagePoolDetailsDao); return client.isSdcConnected(hostIpAddress); } catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) { s_logger.error("Failed to check host sdc connection", e); diff --git a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java index c62371f4c246..eed82ff0ed2c 100644 --- a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java +++ b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java @@ -31,9 +31,7 @@ import static org.mockito.MockitoAnnotations.initMocks; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.UUID; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -44,8 +42,10 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientConnectionPool; import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientImpl; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.provider.ScaleIOHostListener; import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; @@ -80,7 +80,6 @@ import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.template.TemplateManager; -import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.exception.CloudRuntimeException; @PrepareForTest(ScaleIOGatewayClient.class) @@ -90,6 +89,8 @@ public class ScaleIOPrimaryDataStoreLifeCycleTest { @Mock private PrimaryDataStoreDao primaryDataStoreDao; @Mock + private StoragePoolDetailsDao storagePoolDetailsDao; + @Mock private PrimaryDataStoreHelper dataStoreHelper; @Mock private ResourceManager resourceManager; @@ -135,19 +136,9 @@ public void testAttachZone() throws Exception { final DataStore dataStore = mock(DataStore.class); when(dataStore.getId()).thenReturn(1L); - Map mockDataStoreDetails = new HashMap<>(); - mockDataStoreDetails.put(ScaleIOGatewayClient.GATEWAY_API_ENDPOINT, "https://192.168.1.19/api"); - String encryptedUsername = DBEncryptionUtil.encrypt("root"); - mockDataStoreDetails.put(ScaleIOGatewayClient.GATEWAY_API_USERNAME, encryptedUsername); - String encryptedPassword = DBEncryptionUtil.encrypt("Password@123"); - mockDataStoreDetails.put(ScaleIOGatewayClient.GATEWAY_API_PASSWORD, encryptedPassword); - when(primaryDataStoreDao.getDetails(1L)).thenReturn(mockDataStoreDetails); - PowerMockito.mockStatic(ScaleIOGatewayClient.class); ScaleIOGatewayClientImpl client = mock(ScaleIOGatewayClientImpl.class); - String username = DBEncryptionUtil.decrypt(encryptedUsername); - String password = DBEncryptionUtil.decrypt(encryptedPassword); - when(ScaleIOGatewayClient.getClient("https://192.168.1.19/api", username, password, false, 60)).thenReturn(client); + when(ScaleIOGatewayClientConnectionPool.getInstance().getClient(1L, storagePoolDetailsDao)).thenReturn(client); List connectedSdcIps = new ArrayList<>(); connectedSdcIps.add("192.168.1.1"); diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index cf01b8db3409..0f75086d256b 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -4115,13 +4115,18 @@ public List listRouterHealthChecks(GetRouterHea throw new CloudRuntimeException("Router health checks are not enabled for router " + routerId); } - if (cmd.shouldPerformFreshChecks() && !routerService.performRouterHealthChecks(routerId)) { - throw new CloudRuntimeException("Unable to perform fresh checks on router."); + if (cmd.shouldPerformFreshChecks()) { + Pair healthChecksresult = routerService.performRouterHealthChecks(routerId); + if (healthChecksresult == null) { + throw new CloudRuntimeException("Failed to initiate fresh checks on router."); + } else if (!healthChecksresult.first()) { + throw new CloudRuntimeException("Unable to perform fresh checks on router - " + healthChecksresult.second()); + } } List result = new ArrayList<>(routerHealthCheckResultDao.getHealthCheckResults(routerId)); if (result == null || result.size() == 0) { - throw new CloudRuntimeException("Database had no entries for health checks for router. This could happen for " + + throw new CloudRuntimeException("No health check results found for the router. This could happen for " + "a newly created router. Please wait for periodic results to populate or manually call for checks to execute."); } diff --git a/server/src/main/java/com/cloud/network/router/NetworkHelperImpl.java b/server/src/main/java/com/cloud/network/router/NetworkHelperImpl.java index 11f03c5eab31..cc947bd05031 100644 --- a/server/src/main/java/com/cloud/network/router/NetworkHelperImpl.java +++ b/server/src/main/java/com/cloud/network/router/NetworkHelperImpl.java @@ -74,6 +74,7 @@ import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkDetailVO; import com.cloud.network.dao.NetworkDetailsDao; +import com.cloud.network.dao.RouterHealthCheckResultDao; import com.cloud.network.dao.UserIpv6AddressDao; import com.cloud.network.lb.LoadBalancingRule; import com.cloud.network.router.VirtualRouter.RedundantState; @@ -161,6 +162,8 @@ public class NetworkHelperImpl implements NetworkHelper { VpcVirtualNetworkApplianceManager _vpcRouterMgr; @Inject NetworkDetailsDao networkDetailsDao; + @Inject + RouterHealthCheckResultDao _routerHealthCheckResultDao; protected final Map> hypervisorsMap = new HashMap<>(); @@ -259,6 +262,7 @@ public VirtualRouter destroyRouter(final long routerId, final Account caller, fi _accountMgr.checkAccess(caller, null, true, router); _itMgr.expunge(router.getUuid()); + _routerHealthCheckResultDao.expungeHealthChecks(router.getId()); _routerDao.remove(router.getId()); return router; } diff --git a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index 9e0a2bfd190b..8e38fb9280b5 100644 --- a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -1271,68 +1271,68 @@ protected void runInContext() { ex.printStackTrace(); } } + } - private List getFailingChecks(DomainRouterVO router, GetRouterMonitorResultsAnswer answer) { + private List getFailingChecks(DomainRouterVO router, GetRouterMonitorResultsAnswer answer) { - if (answer == null) { - s_logger.warn("Unable to fetch monitor results for router " + router); - resetRouterHealthChecksAndConnectivity(router.getId(), false, false, "Communication failed"); - return Arrays.asList(CONNECTIVITY_TEST); - } else if (!answer.getResult()) { - s_logger.warn("Failed to fetch monitor results from router " + router + " with details: " + answer.getDetails()); - if (StringUtils.isNotBlank(answer.getDetails()) && answer.getDetails().equalsIgnoreCase(READONLY_FILESYSTEM_ERROR)) { - resetRouterHealthChecksAndConnectivity(router.getId(), true, false, "Failed to write: " + answer.getDetails()); - return Arrays.asList(FILESYSTEM_WRITABLE_TEST); - } else { - resetRouterHealthChecksAndConnectivity(router.getId(), false, false, "Failed to fetch results with details: " + answer.getDetails()); - return Arrays.asList(CONNECTIVITY_TEST); - } + if (answer == null) { + s_logger.warn("Unable to fetch monitor results for router " + router); + resetRouterHealthChecksAndConnectivity(router.getId(), false, false, "Communication failed"); + return Arrays.asList(CONNECTIVITY_TEST); + } else if (!answer.getResult()) { + s_logger.warn("Failed to fetch monitor results from router " + router + " with details: " + answer.getDetails()); + if (StringUtils.isNotBlank(answer.getDetails()) && answer.getDetails().equalsIgnoreCase(READONLY_FILESYSTEM_ERROR)) { + resetRouterHealthChecksAndConnectivity(router.getId(), true, false, "Failed to write: " + answer.getDetails()); + return Arrays.asList(FILESYSTEM_WRITABLE_TEST); } else { - resetRouterHealthChecksAndConnectivity(router.getId(), true, true, "Successfully fetched data"); - updateDbHealthChecksFromRouterResponse(router.getId(), answer.getMonitoringResults()); - return answer.getFailingChecks(); + resetRouterHealthChecksAndConnectivity(router.getId(), false, false, "Failed to fetch results with details: " + answer.getDetails()); + return Arrays.asList(CONNECTIVITY_TEST); } + } else { + resetRouterHealthChecksAndConnectivity(router.getId(), true, true, "Successfully fetched data"); + updateDbHealthChecksFromRouterResponse(router.getId(), answer.getMonitoringResults()); + return answer.getFailingChecks(); } + } - private void handleFailingChecks(DomainRouterVO router, List failingChecks) { - if (failingChecks == null || failingChecks.size() == 0) { - return; - } + private void handleFailingChecks(DomainRouterVO router, List failingChecks) { + if (failingChecks == null || failingChecks.size() == 0) { + return; + } - String alertMessage = "Health checks failed: " + failingChecks.size() + " failing checks on router " + router.getUuid(); - _alertMgr.sendAlert(AlertType.ALERT_TYPE_DOMAIN_ROUTER, router.getDataCenterId(), router.getPodIdToDeployIn(), - alertMessage, alertMessage); - s_logger.warn(alertMessage + ". Checking failed health checks to see if router needs recreate"); + String alertMessage = "Health checks failed: " + failingChecks.size() + " failing checks on router " + router.getUuid(); + _alertMgr.sendAlert(AlertType.ALERT_TYPE_DOMAIN_ROUTER, router.getDataCenterId(), router.getPodIdToDeployIn(), + alertMessage, alertMessage); + s_logger.warn(alertMessage + ". Checking failed health checks to see if router needs recreate"); - String checkFailsToRecreateVr = RouterHealthChecksFailuresToRecreateVr.valueIn(router.getDataCenterId()); - StringBuilder failingChecksEvent = new StringBuilder(); - boolean recreateRouter = false; - for (int i = 0; i < failingChecks.size(); i++) { - String failedCheck = failingChecks.get(i); - if (i == 0) { - failingChecksEvent.append("Router ") - .append(router.getUuid()) - .append(" has failing checks: "); - } + String checkFailsToRecreateVr = RouterHealthChecksFailuresToRecreateVr.valueIn(router.getDataCenterId()); + StringBuilder failingChecksEvent = new StringBuilder(); + boolean recreateRouter = false; + for (int i = 0; i < failingChecks.size(); i++) { + String failedCheck = failingChecks.get(i); + if (i == 0) { + failingChecksEvent.append("Router ") + .append(router.getUuid()) + .append(" has failing checks: "); + } - failingChecksEvent.append(failedCheck); - if (i < failingChecks.size() - 1) { - failingChecksEvent.append(", "); - } + failingChecksEvent.append(failedCheck); + if (i < failingChecks.size() - 1) { + failingChecksEvent.append(", "); + } - if (StringUtils.isNotBlank(checkFailsToRecreateVr) && checkFailsToRecreateVr.contains(failedCheck)) { - recreateRouter = true; - } + if (StringUtils.isNotBlank(checkFailsToRecreateVr) && checkFailsToRecreateVr.contains(failedCheck)) { + recreateRouter = true; } + } - ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, - Domain.ROOT_DOMAIN, EventTypes.EVENT_ROUTER_HEALTH_CHECKS, failingChecksEvent.toString()); + ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, + Domain.ROOT_DOMAIN, EventTypes.EVENT_ROUTER_HEALTH_CHECKS, failingChecksEvent.toString()); - if (recreateRouter) { - s_logger.warn("Health Check Alert: Found failing checks in " + - RouterHealthChecksFailuresToRecreateVrCK + ", attempting recreating router."); - recreateRouter(router.getId()); - } + if (recreateRouter) { + s_logger.warn("Health Check Alert: Found failing checks in " + + RouterHealthChecksFailuresToRecreateVrCK + ", attempting recreating router."); + recreateRouter(router.getId()); } } @@ -1554,7 +1554,7 @@ private GetRouterMonitorResultsAnswer fetchAndUpdateRouterHealthChecks(DomainRou String controlIP = getRouterControlIP(router); if (StringUtils.isNotBlank(controlIP) && !controlIP.equals("0.0.0.0")) { - final GetRouterMonitorResultsCommand command = new GetRouterMonitorResultsCommand(performFreshChecks); + final GetRouterMonitorResultsCommand command = new GetRouterMonitorResultsCommand(performFreshChecks, false); command.setAccessDetail(NetworkElementCommand.ROUTER_IP, controlIP); command.setAccessDetail(NetworkElementCommand.ROUTER_NAME, router.getInstanceName()); try { @@ -1579,8 +1579,40 @@ private GetRouterMonitorResultsAnswer fetchAndUpdateRouterHealthChecks(DomainRou return null; } + private GetRouterMonitorResultsAnswer performBasicTestsOnRouter(DomainRouterVO router) { + if (!RouterHealthChecksEnabled.value()) { + return null; + } + + String controlIP = getRouterControlIP(router); + if (StringUtils.isNotBlank(controlIP) && !controlIP.equals("0.0.0.0")) { + final GetRouterMonitorResultsCommand command = new GetRouterMonitorResultsCommand(false, true); + command.setAccessDetail(NetworkElementCommand.ROUTER_IP, controlIP); + command.setAccessDetail(NetworkElementCommand.ROUTER_NAME, router.getInstanceName()); + try { + final Answer answer = _agentMgr.easySend(router.getHostId(), command); + + if (answer == null) { + s_logger.warn("Unable to fetch basic router test results data from router " + router.getHostName()); + return null; + } + if (answer instanceof GetRouterMonitorResultsAnswer) { + return (GetRouterMonitorResultsAnswer) answer; + } else { + s_logger.warn("Unable to fetch basic router test results from router " + router.getHostName() + " Received answer " + answer.getDetails()); + return new GetRouterMonitorResultsAnswer(command, false, null, answer.getDetails()); + } + } catch (final Exception e) { + s_logger.warn("Error while performing basic tests on router: " + router.getInstanceName(), e); + return null; + } + } + + return null; + } + @Override - public boolean performRouterHealthChecks(long routerId) { + public Pair performRouterHealthChecks(long routerId) { DomainRouterVO router = _routerDao.findById(routerId); if (router == null) { @@ -1593,35 +1625,45 @@ public boolean performRouterHealthChecks(long routerId) { s_logger.info("Running health check results for router " + router.getUuid()); - final GetRouterMonitorResultsAnswer answer; + GetRouterMonitorResultsAnswer answer = null; + String resultDetails = ""; boolean success = true; - // Step 1: Update health check data on router and perform and retrieve health checks on router - if (!updateRouterHealthChecksConfig(router)) { - s_logger.warn("Unable to update health check config for fresh run successfully for router: " + router + ", so trying to fetch last result."); - success = false; - answer = fetchAndUpdateRouterHealthChecks(router, false); - } else { - s_logger.info("Successfully updated health check config for fresh run successfully for router: " + router); - answer = fetchAndUpdateRouterHealthChecks(router, true); - } - // Step 2: Update health checks values in database. We do this irrespective of new health check config. + // Step 1: Perform basic tests to check the connectivity and file system on router + answer = performBasicTestsOnRouter(router); if (answer == null) { + s_logger.debug("No results received for the basic tests on router: " + router); + resultDetails = "Basic tests results unavailable"; success = false; - resetRouterHealthChecksAndConnectivity(routerId, false, false, "Communication failed"); } else if (!answer.getResult()) { + s_logger.debug("Basic tests failed on router: " + router); + resultDetails = "Basic tests failed - " + answer.getMonitoringResults(); success = false; - if (StringUtils.isNotBlank(answer.getDetails()) && answer.getDetails().equalsIgnoreCase(READONLY_FILESYSTEM_ERROR)) { - resetRouterHealthChecksAndConnectivity(routerId, true, false, "Failed to write: " + answer.getDetails()); + } else { + // Step 2: Update health check data on router and perform and retrieve health checks on router + if (!updateRouterHealthChecksConfig(router)) { + s_logger.warn("Unable to update health check config for fresh run successfully for router: " + router + ", so trying to fetch last result."); + success = false; + answer = fetchAndUpdateRouterHealthChecks(router, false); } else { - resetRouterHealthChecksAndConnectivity(routerId, false, false, "Failed to fetch results with details: " + answer.getDetails()); + s_logger.info("Successfully updated health check config for fresh run successfully for router: " + router); + answer = fetchAndUpdateRouterHealthChecks(router, true); + } + + if (answer == null) { + resultDetails = "Failed to fetch and update health checks"; + success = false; + } else if (!answer.getResult()) { + resultDetails = "Get health checks failed - " + answer.getMonitoringResults(); + success = false; } - } else { - resetRouterHealthChecksAndConnectivity(routerId, true, true, "Successfully fetched data"); - updateDbHealthChecksFromRouterResponse(routerId, answer.getMonitoringResults()); } - return success; + // Step 3: Update health checks values in database. We do this irrespective of new health check config. + List failingChecks = getFailingChecks(router, answer); + handleFailingChecks(router, failingChecks); + + return new Pair(success, resultDetails); } protected class UpdateRouterHealthChecksConfigTask extends ManagedContextRunnable { @@ -1635,7 +1677,13 @@ protected void runInContext() { s_logger.debug("Found " + routers.size() + " running routers. "); for (final DomainRouterVO router : routers) { - updateRouterHealthChecksConfig(router); + GetRouterMonitorResultsAnswer answer = performBasicTestsOnRouter(router); + if (answer != null && answer.getResult()) { + updateRouterHealthChecksConfig(router); + } else { + String resultDetails = (answer == null) ? "" : ", " + answer.getMonitoringResults(); + s_logger.debug("Couldn't update health checks config on router: " + router + " as basic tests didn't succeed" + resultDetails); + } } } catch (final Exception ex) { s_logger.error("Fail to complete the UpdateRouterHealthChecksConfigTask! ", ex); @@ -1672,7 +1720,6 @@ private boolean updateRouterHealthChecksConfig(DomainRouterVO router) { return false; } - SetMonitorServiceCommand command = createMonitorServiceCommand(router, null,true, true); String controlIP = getRouterControlIP(router); if (StringUtils.isBlank(controlIP) || controlIP.equals("0.0.0.0")) { s_logger.debug("Skipping update data on router " + router.getUuid() + " because controlIp is not correct."); @@ -1682,6 +1729,7 @@ private boolean updateRouterHealthChecksConfig(DomainRouterVO router) { s_logger.info("Updating data for router health checks for router " + router.getUuid()); Answer origAnswer = null; try { + SetMonitorServiceCommand command = createMonitorServiceCommand(router, null, true, true); origAnswer = _agentMgr.easySend(router.getHostId(), command); } catch (final Exception e) { s_logger.error("Error while sending update data for health check to router: " + router.getInstanceName(), e); diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 98ecd6d5a3a1..3e00f3da9d86 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -4989,6 +4989,8 @@ private Host getDestinationHost(Long hostId, boolean isRootAdmin) { destinationHost = _hostDao.findById(hostId); if (destinationHost == null) { throw new InvalidParameterValueException("Unable to find the host to deploy the VM, host id=" + hostId); + } else if (destinationHost.getResourceState() != ResourceState.Enabled || destinationHost.getStatus() != Status.Up ) { + throw new InvalidParameterValueException("Unable to deploy the VM as the host: " + destinationHost.getName() + " is not in the right state"); } } return destinationHost; @@ -5256,8 +5258,6 @@ public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityE throw new InvalidParameterValueException("Unable to find service offering: " + serviceOfferingId); } - Long templateId = cmd.getTemplateId(); - if (!serviceOffering.isDynamic()) { for(String detail: cmd.getDetails().keySet()) { if(detail.equalsIgnoreCase(VmDetailConstants.CPU_NUMBER) || detail.equalsIgnoreCase(VmDetailConstants.CPU_SPEED) || detail.equalsIgnoreCase(VmDetailConstants.MEMORY)) { @@ -5266,6 +5266,8 @@ public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityE } } + Long templateId = cmd.getTemplateId(); + VirtualMachineTemplate template = _entityMgr.findById(VirtualMachineTemplate.class, templateId); // Make sure a valid template ID was specified if (template == null) { @@ -5301,6 +5303,14 @@ public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityE networkIds = new ArrayList<>(userVmNetworkMap.values()); } + Account caller = CallContext.current().getCallingAccount(); + Long callerId = caller.getId(); + + boolean isRootAdmin = _accountService.isRootAdmin(callerId); + + Long hostId = cmd.getHostId(); + getDestinationHost(hostId, isRootAdmin); + String ipAddress = cmd.getIpAddress(); String ip6Address = cmd.getIp6Address(); String macAddress = cmd.getMacAddress(); @@ -5351,8 +5361,6 @@ public UserVm createVirtualMachine(DeployVMCmd cmd) throws InsufficientCapacityE } // Add extraConfig to user_vm_details table - Account caller = CallContext.current().getCallingAccount(); - Long callerId = caller.getId(); String extraConfig = cmd.getExtraConfig(); if (StringUtils.isNotBlank(extraConfig)) { if (EnableAdditionalVmConfig.valueIn(callerId)) { diff --git a/server/src/test/java/com/cloud/vpc/MockVpcVirtualNetworkApplianceManager.java b/server/src/test/java/com/cloud/vpc/MockVpcVirtualNetworkApplianceManager.java index 45bf4c1763b5..abb1863a1a36 100644 --- a/server/src/test/java/com/cloud/vpc/MockVpcVirtualNetworkApplianceManager.java +++ b/server/src/test/java/com/cloud/vpc/MockVpcVirtualNetworkApplianceManager.java @@ -39,6 +39,7 @@ import com.cloud.network.vpc.PrivateGateway; import com.cloud.user.Account; import com.cloud.user.User; +import com.cloud.utils.Pair; import com.cloud.utils.component.ManagerBase; import com.cloud.vm.DomainRouterVO; import com.cloud.vm.VirtualMachineProfile; @@ -249,8 +250,8 @@ public List upgradeRouterTemplate(final UpgradeRouterTemplateCmd cmd) { } @Override - public boolean performRouterHealthChecks(long routerId) { - return false; + public Pair performRouterHealthChecks(long routerId) { + return null; } @Override From 2de8944bc31f907c85527b5bfbec3616cf1f67cf Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Tue, 12 Jan 2021 19:07:40 +0530 Subject: [PATCH 04/12] Updated PowerFlex/ScaleIO storage plugin version to 4.16.0.0 --- plugins/storage/volume/scaleio/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/storage/volume/scaleio/pom.xml b/plugins/storage/volume/scaleio/pom.xml index 859b2868235a..e95087e7257f 100644 --- a/plugins/storage/volume/scaleio/pom.xml +++ b/plugins/storage/volume/scaleio/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack-plugins - 4.15.0.0-SNAPSHOT + 4.16.0.0-SNAPSHOT ../../../pom.xml From 3a7c3db72fa49fd3b78a65581452dd9062639c85 Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Fri, 22 Jan 2021 12:44:14 +0530 Subject: [PATCH 05/12] UI Changes to support storage plugin for PowerFlex/ScaleIO storage pool. - PowerFlex pool URL generated from the UI inputs(Gateway, Username, Password, Storage Pool) when adding "PowerFlex" Primary Storage - Updated protocol to "custom" for PowerFlex provider - Allow VM Snapshot for stopped VM on KVM hypervisor and PowerFlex/ScaleIO storage pool and Minor improvements in PowerFlex/ScaleIO storage plugin code --- .../datastore/PrimaryDataStoreImpl.java | 3 +- .../kvm/storage/KVMStorageProcessor.java | 4 +- .../kvm/storage/ScaleIOStorageAdaptor.java | 4 +- .../driver/ScaleIOPrimaryDataStoreDriver.java | 2 +- ui/public/locales/en.json | 4 ++ ui/src/config/section/compute.js | 3 +- ui/src/views/infra/AddPrimaryStorage.vue | 67 ++++++++++++++++++- 7 files changed, 76 insertions(+), 11 deletions(-) diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java index 18a7f3c48903..f557ac35171e 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java @@ -203,8 +203,7 @@ public String getUuid() { @Override public String getName() { - // TODO Auto-generated method stub - return null; + return pdsv.getName(); } @Override diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index 7024eab68f4c..db0117658911 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -396,8 +396,8 @@ public Answer cloneVolumeFromBaseTemplate(final CopyCommand cmd) { BaseVol = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath); if (BaseVol == null) { - s_logger.debug("Failed to get the base template volume: " + templatePath); - throw new CloudRuntimeException(""); + s_logger.debug("Failed to get the physical disk for base template volume at path: " + templatePath); + throw new CloudRuntimeException("Failed to get the physical disk for base template volume at path: " + templatePath); } if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java index 419fa0cb2d43..04e8d93b50a5 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java @@ -252,8 +252,8 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMSt } destDisk.setFormat(QemuImg.PhysicalDiskFormat.RAW); - destDisk.setSize(disk.getVirtualSize()); - destDisk.setVirtualSize(disk.getSize()); + destDisk.setVirtualSize(disk.getVirtualSize()); + destDisk.setSize(disk.getSize()); QemuImg qemu = new QemuImg(timeout); QemuImgFile srcFile = null; diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java index 5d7b38b93493..f04d6a504ddd 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java @@ -652,7 +652,7 @@ private Answer migrateVolume(DataObject srcData, DataObject destData) { } if (Strings.isNullOrEmpty(srcPoolSystemId) || Strings.isNullOrEmpty(destPoolSystemId)) { - throw new CloudRuntimeException("Failed to validate PowerFlex pools compatibilty for migration"); + throw new CloudRuntimeException("Failed to validate PowerFlex pools compatibility for migration"); } if (!srcPoolSystemId.equals(destPoolSystemId)) { diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index 3241c82309d8..39d213667656 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -1595,6 +1595,10 @@ "label.portable.ip.ranges": "Portable IP Ranges", "label.portableipaddress": "Portable IPs", "label.portforwarding": "Port Forwarding", +"label.powerflex.gateway": "Gateway", +"label.powerflex.gateway.username": "Gateway Username", +"label.powerflex.gateway.password": "Gateway Password", +"label.powerflex.storage.pool": "Storage Pool", "label.powerstate": "Power State", "label.preferred": "Prefered", "label.presetup": "PreSetup", diff --git a/ui/src/config/section/compute.js b/ui/src/config/section/compute.js index e55e51684fe5..6acebf6e8cf3 100644 --- a/ui/src/config/section/compute.js +++ b/ui/src/config/section/compute.js @@ -156,7 +156,8 @@ export default { args: ['virtualmachineid', 'name', 'description', 'snapshotmemory', 'quiescevm'], show: (record) => { return ((['Running'].includes(record.state) && record.hypervisor !== 'LXC') || - (['Stopped'].includes(record.state) && record.hypervisor !== 'KVM' && record.hypervisor !== 'LXC')) + (['Stopped'].includes(record.state) && ((record.hypervisor !== 'KVM' && record.hypervisor !== 'LXC') || + (record.hypervisor === 'KVM' && record.pooltype === 'PowerFlex')))) }, mapping: { virtualmachineid: { diff --git a/ui/src/views/infra/AddPrimaryStorage.vue b/ui/src/views/infra/AddPrimaryStorage.vue index 8229abedddb5..c5ee8393aab9 100644 --- a/ui/src/views/infra/AddPrimaryStorage.vue +++ b/ui/src/views/infra/AddPrimaryStorage.vue @@ -202,13 +202,13 @@ + @change="updateProviderAndProtocol"> {{ provider }} -
+
{{ $t('label.ismanaged') }} @@ -248,6 +248,44 @@
+
+ + + {{ $t('label.powerflex.gateway') }} + + + + + + + + + {{ $t('label.powerflex.gateway.username') }} + + + + + + + + + {{ $t('label.powerflex.gateway.password') }} + + + + + + + + + {{ $t('label.powerflex.storage.pool') }} + + + + + + +
@@ -557,6 +595,23 @@ export default { } return url }, + powerflexURL (gateway, username, password, pool) { + var url = 'powerflex://' + encodeURIComponent(username) + ':' + encodeURIComponent(password) + '@' + + gateway + '/' + encodeURIComponent(pool) + return url + }, + updateProviderAndProtocol (value) { + if (value === 'PowerFlex') { + this.protocols = ['custom'] + this.protocolSelected = 'custom' + this.form.setFieldsValue({ + protocol: 'custom' + }) + } else { + this.fetchHypervisor(null) + } + this.providerSelected = value + }, closeModal () { this.$parent.$parent.close() }, @@ -649,7 +704,7 @@ export default { url = this.iscsiURL(server, iqn, lun) } params.url = url - if (values.provider !== 'DefaultPrimary') { + if (values.provider !== 'DefaultPrimary' && values.provider !== 'PowerFlex') { if (values.managed) { params.managed = true } else { @@ -665,6 +720,12 @@ export default { params.url = values.url } } + + if (values.provider === 'PowerFlex') { + params.url = this.powerflexURL(values.powerflexGateway, values.powerflexGatewayUsername, + values.powerflexGatewayPassword, values.powerflexStoragePool) + } + if (this.selectedTags.length > 0) { params.tags = this.selectedTags.join() } From 71252ea387914d3d917ba72f30fb0ad25e0d38f1 Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Mon, 25 Jan 2021 17:45:19 +0530 Subject: [PATCH 06/12] Added support for PowerFlex/ScaleIO volume migration across different PowerFlex storage instances. - findStoragePoolsForMigration API returns PowerFlex pool(s) of different instance as suitable pool(s), for volume(s) on PowerFlex storage pool. - Volume(s) with snapshots are not allowed to migrate to different PowerFlex instance. - Volume(s) of running VM are not allowed to migrate to other PowerFlex storage pools. - Volume migration from PowerFlex pool to Non-PowerFlex pool, and vice versa are not supported. --- .../StorageSubsystemCommandHandlerBase.java | 11 +- .../com/cloud/storage/StorageManager.java | 2 + .../orchestration/VolumeOrchestrator.java | 3 + .../cloud/storage/dao/StoragePoolHostDao.java | 2 + .../storage/dao/StoragePoolHostDaoImpl.java | 30 ++ .../storage/volume/VolumeServiceImpl.java | 214 +++++++++- .../kvm/storage/KVMStorageProcessor.java | 47 ++- .../driver/ScaleIOPrimaryDataStoreDriver.java | 78 +++- .../com/cloud/storage/StorageManagerImpl.java | 71 ++-- .../cloud/storage/VolumeApiServiceImpl.java | 6 + .../storage/snapshot/SnapshotManager.java | 2 + .../storage/snapshot/SnapshotManagerImpl.java | 9 + test/integration/plugins/scaleio/README.md | 15 +- .../plugins/scaleio/test_scaleio_volumes.py | 393 +++++++++++++++++- 14 files changed, 809 insertions(+), 74 deletions(-) diff --git a/core/src/main/java/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java b/core/src/main/java/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java index 910eb3d87905..6c5b55a3af0e 100644 --- a/core/src/main/java/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java +++ b/core/src/main/java/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java @@ -99,10 +99,13 @@ protected Answer execute(CopyCommand cmd) { //copy volume from image cache to primary return processor.copyVolumeFromImageCacheToPrimary(cmd); } else if (srcData.getObjectType() == DataObjectType.VOLUME && srcData.getDataStore().getRole() == DataStoreRole.Primary) { - if (destData.getObjectType() == DataObjectType.VOLUME && srcData instanceof VolumeObjectTO && ((VolumeObjectTO)srcData).isDirectDownload()) { - return processor.copyVolumeFromPrimaryToPrimary(cmd); - } else if (destData.getObjectType() == DataObjectType.VOLUME) { - return processor.copyVolumeFromPrimaryToSecondary(cmd); + if (destData.getObjectType() == DataObjectType.VOLUME) { + if ((srcData instanceof VolumeObjectTO && ((VolumeObjectTO)srcData).isDirectDownload()) || + destData.getDataStore().getRole() == DataStoreRole.Primary) { + return processor.copyVolumeFromPrimaryToPrimary(cmd); + } else { + return processor.copyVolumeFromPrimaryToSecondary(cmd); + } } else if (destData.getObjectType() == DataObjectType.TEMPLATE) { return processor.createTemplateFromVolume(cmd); } diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index 3532a45afa42..b20db8d32633 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -192,6 +192,8 @@ public interface StorageManager extends StorageService { StoragePoolVO findLocalStorageOnHost(long hostId); + Host findUpAndEnabledHostWithAccessToStoragePools(List poolIds); + List findStoragePoolsConnectedToHost(long hostId); boolean canHostAccessStoragePool(Host host, StoragePool pool); diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index 849787b5742a..e6260b851ecc 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -1180,6 +1180,9 @@ public Volume migrateVolume(Volume volume, StoragePool destPool) throws StorageU VolumeApiResult result = future.get(); if (result.isFailed()) { s_logger.error("Migrate volume failed:" + result.getResult()); + if (result.getResult() != null && result.getResult().contains("[UNSUPPORTED]")) { + throw new CloudRuntimeException("Migrate volume failed: " + result.getResult()); + } throw new StorageUnavailableException("Migrate volume failed: " + result.getResult(), destPool.getId()); } else { // update the volumeId for snapshots on secondary diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java index 8dd10a7c29a4..b099a6d6bdbb 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDao.java @@ -32,6 +32,8 @@ public interface StoragePoolHostDao extends GenericDao List listByHostStatus(long poolId, Status hostStatus); + List findHostsConnectedToPools(List poolIds); + List> getDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly); public void deletePrimaryRecordsForHost(long hostId); diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java index 2b7b0f7cbe43..349baf05eb84 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/StoragePoolHostDaoImpl.java @@ -21,6 +21,7 @@ import java.sql.SQLException; import java.util.ArrayList; import java.util.List; +import java.util.stream.Collectors; import org.apache.log4j.Logger; @@ -44,6 +45,8 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase listByHostStatus(long poolId, Status hostStatus) return result; } + @Override + public List findHostsConnectedToPools(List poolIds) { + List hosts = new ArrayList(); + if (poolIds == null || poolIds.isEmpty()) { + return hosts; + } + + String poolIdsInStr = poolIds.stream().map(poolId -> String.valueOf(poolId)).collect(Collectors.joining(",", "(", ")")); + String sql = HOSTS_FOR_POOLS_SEARCH.replace("(?)", poolIdsInStr); + + TransactionLegacy txn = TransactionLegacy.currentTxn(); + try(PreparedStatement pstmt = txn.prepareStatement(sql);) { + try(ResultSet rs = pstmt.executeQuery();) { + while (rs.next()) { + long hostId = rs.getLong(1); // host_id column + hosts.add(hostId); + } + } catch (SQLException e) { + s_logger.warn("findHostsConnectedToPools:Exception: ", e); + } + } catch (Exception e) { + s_logger.warn("findHostsConnectedToPools:Exception: ", e); + } + + return hosts; + } + @Override public List> getDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly) { ArrayList> l = new ArrayList>(); diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 35df85fbbc84..a3498cd80b61 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -66,6 +66,8 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; @@ -128,6 +130,7 @@ import com.cloud.utils.db.GlobalLock; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine; +import com.google.common.base.Strings; import static com.cloud.storage.resource.StorageProcessor.REQUEST_TEMPLATE_RELOAD; @@ -169,6 +172,8 @@ public class VolumeServiceImpl implements VolumeService { @Inject private PrimaryDataStoreDao storagePoolDao; @Inject + private StoragePoolDetailsDao _storagePoolDetailsDao; + @Inject private HostDetailsDao hostDetailsDao; @Inject private ManagementService mgr; @@ -182,6 +187,8 @@ public class VolumeServiceImpl implements VolumeService { private TemplateDataFactory tmplFactory; @Inject private VolumeOrchestrationService _volumeMgr; + @Inject + private StorageManager _storageMgr; private final static String SNAPSHOT_ID = "SNAPSHOT_ID"; @@ -1741,8 +1748,8 @@ protected Void copyVolumeFromPrimaryToImageCallback(AsyncCallbackDispatcher copyVolume(VolumeInfo srcVolume, DataStore destStore) { + DataStore srcStore = srcVolume.getDataStore(); if (s_logger.isDebugEnabled()) { - DataStore srcStore = srcVolume.getDataStore(); String srcRole = (srcStore != null && srcStore.getRole() != null ? srcVolume.getDataStore().getRole().toString() : ""); String msg = String.format("copying %s(id=%d, role=%s) to %s (id=%d, role=%s)" @@ -1763,6 +1770,11 @@ public AsyncCallFuture copyVolume(VolumeInfo srcVolume, DataSto return copyVolumeFromPrimaryToImage(srcVolume, destStore); } + if (srcStore.getRole() == DataStoreRole.Primary && destStore.getRole() == DataStoreRole.Primary && ((PrimaryDataStore) destStore).isManaged() && + requiresNewManagedVolumeInDestStore((PrimaryDataStore) srcStore, (PrimaryDataStore) destStore)) { + return copyManagedVolume(srcVolume, destStore); + } + // OfflineVmwareMigration: aren't we missing secondary to secondary in this logic? AsyncCallFuture future = new AsyncCallFuture(); @@ -1809,6 +1821,10 @@ protected Void copyVolumeCallBack(AsyncCallbackDispatcher copyManagedVolume(VolumeInfo srcVolume, DataStore destStore) { + AsyncCallFuture future = new AsyncCallFuture(); + VolumeApiResult res = new VolumeApiResult(srcVolume); + try { + if (!snapshotMgr.canOperateOnVolume(srcVolume)) { + s_logger.debug("There are snapshots creating for this volume, can not move this volume"); + res.setResult("There are snapshots creating for this volume, can not move this volume"); + future.complete(res); + return future; + } + + if (snapshotMgr.backedUpSnapshotsExistsForVolume(srcVolume)) { + s_logger.debug("There are backed up snapshots for this volume, can not move."); + res.setResult("[UNSUPPORTED] There are backed up snapshots for this volume, can not move. Please try again after removing them."); + future.complete(res); + return future; + } + + List poolIds = new ArrayList(); + poolIds.add(srcVolume.getPoolId()); + poolIds.add(destStore.getId()); + + Host hostWithPoolsAccess = _storageMgr.findUpAndEnabledHostWithAccessToStoragePools(poolIds); + if (hostWithPoolsAccess == null) { + s_logger.debug("No host(s) available with pool access, can not move this volume"); + res.setResult("No host(s) available with pool access, can not move this volume"); + future.complete(res); + return future; + } + + VolumeVO destVol = duplicateVolumeOnAnotherStorage(srcVolume, (StoragePool)destStore); + VolumeInfo destVolume = volFactory.getVolume(destVol.getId(), destStore); + + // Create a volume on managed storage. + AsyncCallFuture createVolumeFuture = createVolumeAsync(destVolume, destStore); + VolumeApiResult createVolumeResult = createVolumeFuture.get(); + if (createVolumeResult.isFailed()) { + throw new CloudRuntimeException("Creation of a dest volume failed: " + createVolumeResult.getResult()); + } + + // Refresh the volume info from the DB. + destVolume = volFactory.getVolume(destVolume.getId(), destStore); + + destVolume.processEvent(Event.CreateRequested); + srcVolume.processEvent(Event.MigrationRequested); + + CopyVolumeContext context = new CopyVolumeContext(null, future, srcVolume, destVolume, destStore); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().copyManagedVolumeCallBack(null, null)).setContext(context); + + PrimaryDataStore srcPrimaryDataStore = (PrimaryDataStore) srcVolume.getDataStore(); + if (srcPrimaryDataStore.isManaged()) { + Map srcPrimaryDataStoreDetails = new HashMap(); + srcPrimaryDataStoreDetails.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString()); + srcPrimaryDataStoreDetails.put(PrimaryDataStore.STORAGE_HOST, srcPrimaryDataStore.getHostAddress()); + srcPrimaryDataStoreDetails.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(srcPrimaryDataStore.getPort())); + srcPrimaryDataStoreDetails.put(PrimaryDataStore.MANAGED_STORE_TARGET, srcVolume.get_iScsiName()); + srcPrimaryDataStoreDetails.put(PrimaryDataStore.MANAGED_STORE_TARGET_ROOT_VOLUME, srcVolume.getName()); + srcPrimaryDataStoreDetails.put(PrimaryDataStore.VOLUME_SIZE, String.valueOf(srcVolume.getSize())); + srcPrimaryDataStoreDetails.put(StorageManager.STORAGE_POOL_DISK_WAIT.toString(), String.valueOf(StorageManager.STORAGE_POOL_DISK_WAIT.valueIn(srcPrimaryDataStore.getId()))); + srcPrimaryDataStore.setDetails(srcPrimaryDataStoreDetails); + grantAccess(srcVolume, hostWithPoolsAccess, srcVolume.getDataStore()); + } + + PrimaryDataStore destPrimaryDataStore = (PrimaryDataStore) destStore; + Map destPrimaryDataStoreDetails = new HashMap(); + destPrimaryDataStoreDetails.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString()); + destPrimaryDataStoreDetails.put(PrimaryDataStore.STORAGE_HOST, destPrimaryDataStore.getHostAddress()); + destPrimaryDataStoreDetails.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(destPrimaryDataStore.getPort())); + destPrimaryDataStoreDetails.put(PrimaryDataStore.MANAGED_STORE_TARGET, destVolume.get_iScsiName()); + destPrimaryDataStoreDetails.put(PrimaryDataStore.MANAGED_STORE_TARGET_ROOT_VOLUME, destVolume.getName()); + destPrimaryDataStoreDetails.put(PrimaryDataStore.VOLUME_SIZE, String.valueOf(destVolume.getSize())); + destPrimaryDataStoreDetails.put(StorageManager.STORAGE_POOL_DISK_WAIT.toString(), String.valueOf(StorageManager.STORAGE_POOL_DISK_WAIT.valueIn(destPrimaryDataStore.getId()))); + destPrimaryDataStore.setDetails(destPrimaryDataStoreDetails); + + grantAccess(destVolume, hostWithPoolsAccess, destStore); + + try { + motionSrv.copyAsync(srcVolume, destVolume, hostWithPoolsAccess, caller); + } finally { + if (srcPrimaryDataStore.isManaged()) { + revokeAccess(srcVolume, hostWithPoolsAccess, srcVolume.getDataStore()); + } + revokeAccess(destVolume, hostWithPoolsAccess, destStore); + } + } catch (Exception e) { + s_logger.error("Copy to managed volume failed due to: " + e); + if(s_logger.isDebugEnabled()) { + s_logger.debug("Copy to managed volume failed.", e); + } + res.setResult(e.toString()); + future.complete(res); + } + + return future; + } + + protected Void copyManagedVolumeCallBack(AsyncCallbackDispatcher callback, CopyVolumeContext context) { + VolumeInfo srcVolume = context.srcVolume; + VolumeInfo destVolume = context.destVolume; + CopyCommandResult result = callback.getResult(); + AsyncCallFuture future = context.future; + VolumeApiResult res = new VolumeApiResult(destVolume); + try { + if (result.isFailed()) { + res.setResult(result.getResult()); + destVolume.processEvent(Event.MigrationCopyFailed); + srcVolume.processEvent(Event.OperationFailed); + try { + destroyVolume(destVolume.getId()); + destVolume = volFactory.getVolume(destVolume.getId()); + AsyncCallFuture destVolumeDestroyFuture = expungeVolumeAsync(destVolume); + destVolumeDestroyFuture.get(); + // If dest managed volume destroy fails, wait and retry. + if (destVolumeDestroyFuture.get().isFailed()) { + Thread.sleep(5 * 1000); + destVolumeDestroyFuture = expungeVolumeAsync(destVolume); + destVolumeDestroyFuture.get(); + } + future.complete(res); + } catch (Exception e) { + s_logger.debug("failed to clean up managed volume on storage", e); + } + } else { + srcVolume.processEvent(Event.OperationSuccessed); + destVolume.processEvent(Event.MigrationCopySucceeded, result.getAnswer()); + volDao.updateUuid(srcVolume.getId(), destVolume.getId()); + try { + destroyVolume(srcVolume.getId()); + srcVolume = volFactory.getVolume(srcVolume.getId()); + AsyncCallFuture srcVolumeDestroyFuture = expungeVolumeAsync(srcVolume); + // If src volume destroy fails, wait and retry. + if (srcVolumeDestroyFuture.get().isFailed()) { + Thread.sleep(5 * 1000); + srcVolumeDestroyFuture = expungeVolumeAsync(srcVolume); + srcVolumeDestroyFuture.get(); + } + future.complete(res); + } catch (Exception e) { + s_logger.debug("failed to clean up volume on storage", e); + } + } + } catch (Exception e) { + s_logger.debug("Failed to process copy managed volume callback", e); + res.setResult(e.toString()); + future.complete(res); + } + + return null; + } + + private boolean requiresNewManagedVolumeInDestStore(PrimaryDataStore srcDataStore, PrimaryDataStore destDataStore) { + if (srcDataStore == null || destDataStore == null) { + s_logger.warn("Unable to check for new volume, either src or dest pool is null"); + return false; + } + + if (srcDataStore.getPoolType() == StoragePoolType.PowerFlex && destDataStore.getPoolType() == StoragePoolType.PowerFlex) { + if (srcDataStore.getId() == destDataStore.getId()) { + return false; + } + + final String STORAGE_POOL_SYSTEM_ID = "powerflex.storagepool.system.id"; + String srcPoolSystemId = null; + StoragePoolDetailVO srcPoolSystemIdDetail = _storagePoolDetailsDao.findDetail(srcDataStore.getId(), STORAGE_POOL_SYSTEM_ID); + if (srcPoolSystemIdDetail != null) { + srcPoolSystemId = srcPoolSystemIdDetail.getValue(); + } + + String destPoolSystemId = null; + StoragePoolDetailVO destPoolSystemIdDetail = _storagePoolDetailsDao.findDetail(destDataStore.getId(), STORAGE_POOL_SYSTEM_ID); + if (destPoolSystemIdDetail != null) { + destPoolSystemId = destPoolSystemIdDetail.getValue(); + } + + if (Strings.isNullOrEmpty(srcPoolSystemId) || Strings.isNullOrEmpty(destPoolSystemId)) { + s_logger.warn("PowerFlex src pool: " + srcDataStore.getId() + " or dest pool: " + destDataStore.getId() + + " storage instance details are not available"); + return false; + } + + if (!srcPoolSystemId.equals(destPoolSystemId)) { + s_logger.debug("PowerFlex src pool: " + srcDataStore.getId() + " and dest pool: " + destDataStore.getId() + + " belongs to different storage instances, create new managed volume"); + return true; + } + } + + // New volume not required for all other cases (address any cases required in future) + return false; + } + private class MigrateVolumeContext extends AsyncRpcContext { final VolumeInfo srcVolume; final VolumeInfo destVolume; diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index db0117658911..d1d0f0c262f9 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -1831,23 +1831,56 @@ public Answer copyVolumeFromPrimaryToPrimary(CopyCommand cmd) { final ImageFormat destFormat = destVol.getFormat(); final DataStoreTO srcStore = srcData.getDataStore(); final DataStoreTO destStore = destData.getDataStore(); - final PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)srcStore; - final PrimaryDataStoreTO primaryStoreDest = (PrimaryDataStoreTO)destStore; + final PrimaryDataStoreTO srcPrimaryStore = (PrimaryDataStoreTO)srcStore; + final PrimaryDataStoreTO destPrimaryStore = (PrimaryDataStoreTO)destStore; final String srcVolumePath = srcData.getPath(); final String destVolumePath = destData.getPath(); KVMStoragePool destPool = null; try { - final String volumeName = UUID.randomUUID().toString(); + s_logger.debug("Copying src volume (id: " + srcVol.getId() + ", format: " + srcFormat + ", path: " + srcVolumePath + ", primary storage: [id: " + srcPrimaryStore.getId() + ", type: " + srcPrimaryStore.getPoolType() + "]) to dest volume (id: " + + destVol.getId() + ", format: " + destFormat + ", path: " + destVolumePath + ", primary storage: [id: " + destPrimaryStore.getId() + ", type: " + destPrimaryStore.getPoolType() + "])."); + + if (srcPrimaryStore.isManaged()) { + if (!storagePoolMgr.connectPhysicalDisk(srcPrimaryStore.getPoolType(), srcPrimaryStore.getUuid(), srcVolumePath, srcPrimaryStore.getDetails())) { + s_logger.warn("Failed to connect src volume at path: " + srcVolumePath + ", in storage pool id: " + srcPrimaryStore.getUuid()); + } + } + + final KVMPhysicalDisk volume = storagePoolMgr.getPhysicalDisk(srcPrimaryStore.getPoolType(), srcPrimaryStore.getUuid(), srcVolumePath); + if (volume == null) { + s_logger.debug("Failed to get physical disk for volume: " + srcVolumePath); + throw new CloudRuntimeException("Failed to get physical disk for volume at path: " + srcVolumePath); + } - final String destVolumeName = volumeName + "." + destFormat.getFileExtension(); - final KVMPhysicalDisk volume = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), srcVolumePath); volume.setFormat(PhysicalDiskFormat.valueOf(srcFormat.toString())); - destPool = storagePoolMgr.getStoragePool(primaryStoreDest.getPoolType(), primaryStoreDest.getUuid()); + String destVolumeName = null; + if (destPrimaryStore.isManaged()) { + if (!storagePoolMgr.connectPhysicalDisk(destPrimaryStore.getPoolType(), destPrimaryStore.getUuid(), destVolumePath, destPrimaryStore.getDetails())) { + s_logger.warn("Failed to connect dest volume at path: " + destVolumePath + ", in storage pool id: " + destPrimaryStore.getUuid()); + } + String managedStoreTarget = destPrimaryStore.getDetails() != null ? destPrimaryStore.getDetails().get("managedStoreTarget") : null; + destVolumeName = managedStoreTarget != null ? managedStoreTarget : destVolumePath; + } else { + final String volumeName = UUID.randomUUID().toString(); + destVolumeName = volumeName + "." + destFormat.getFileExtension(); + } + + destPool = storagePoolMgr.getStoragePool(destPrimaryStore.getPoolType(), destPrimaryStore.getUuid()); storagePoolMgr.copyPhysicalDisk(volume, destVolumeName, destPool, cmd.getWaitInMillSeconds()); + + if (srcPrimaryStore.isManaged()) { + storagePoolMgr.disconnectPhysicalDisk(srcPrimaryStore.getPoolType(), srcPrimaryStore.getUuid(), srcVolumePath); + } + + if (destPrimaryStore.isManaged()) { + storagePoolMgr.disconnectPhysicalDisk(destPrimaryStore.getPoolType(), destPrimaryStore.getUuid(), destVolumePath); + } + final VolumeObjectTO newVol = new VolumeObjectTO(); - newVol.setPath(destVolumePath + File.separator + destVolumeName); + String path = destPrimaryStore.isManaged() ? destVolumeName : destVolumePath + File.separator + destVolumeName; + newVol.setPath(path); newVol.setFormat(destFormat); return new CopyCmdAnswer(newVol); } catch (final CloudRuntimeException e) { diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java index f04d6a504ddd..cc357f08af96 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java @@ -36,6 +36,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.RemoteHostEndPoint; import org.apache.cloudstack.storage.command.CommandResult; import org.apache.cloudstack.storage.command.CopyCommand; @@ -61,6 +62,7 @@ import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; import com.cloud.alert.AlertManager; +import com.cloud.configuration.Config; import com.cloud.host.Host; import com.cloud.server.ManagementServerImpl; import com.cloud.storage.DataStoreRole; @@ -77,6 +79,7 @@ import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeDetailsDao; +import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachineManager; @@ -104,6 +107,8 @@ public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { protected SnapshotDao snapshotDao; @Inject private AlertManager alertMgr; + @Inject + private ConfigurationDao configDao; public ScaleIOPrimaryDataStoreDriver() { @@ -588,7 +593,12 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, As errMsg = answer.getDetails(); } } else if (srcData.getType() == DataObjectType.VOLUME) { - answer = migrateVolume(srcData, destData); + if (isSameScaleIOStorageInstance(srcStore, destStore)) { + answer = migrateVolume(srcData, destData); + } else { + answer = copyVolume(srcData, destData, destHost); + } + if (answer == null) { errMsg = "No answer for migrate PowerFlex volume"; } else if (!answer.getResult()) { @@ -631,6 +641,27 @@ private Answer copyTemplateToVolume(DataObject srcData, DataObject destData, Hos return answer; } + private Answer copyVolume(DataObject srcData, DataObject destData, Host destHost) { + // Copy PowerFlex/ScaleIO volume + LOGGER.debug("Initiating copy from PowerFlex volume on host " + destHost != null ? destHost.getId() : ""); + String value = configDao.getValue(Config.CopyVolumeWait.key()); + int copyVolumeWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.CopyVolumeWait.getDefaultValue())); + + CopyCommand cmd = new CopyCommand(srcData.getTO(), destData.getTO(), copyVolumeWait, VirtualMachineManager.ExecuteInSequence.value()); + + Answer answer = null; + EndPoint ep = destHost != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(destHost) : selector.select(srcData.getDataStore()); + if (ep == null) { + String errorMsg = "No remote endpoint to send command, check if host or ssvm is down?"; + LOGGER.error(errorMsg); + answer = new Answer(cmd, false, errorMsg); + } else { + answer = ep.sendMessage(cmd); + } + + return answer; + } + private Answer migrateVolume(DataObject srcData, DataObject destData) { // Volume migration within same PowerFlex/ScaleIO cluster (with same System ID) DataStore srcStore = srcData.getDataStore(); @@ -638,26 +669,7 @@ private Answer migrateVolume(DataObject srcData, DataObject destData) { Answer answer = null; try { long srcPoolId = srcStore.getId(); - String srcPoolSystemId = null; - StoragePoolDetailVO srcPoolSystemIdDetail = storagePoolDetailsDao.findDetail(srcPoolId, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID); - if (srcPoolSystemIdDetail != null) { - srcPoolSystemId = srcPoolSystemIdDetail.getValue(); - } - long destPoolId = destStore.getId(); - String destPoolSystemId = null; - StoragePoolDetailVO destPoolSystemIdDetail = storagePoolDetailsDao.findDetail(destPoolId, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID); - if (destPoolSystemIdDetail != null) { - destPoolSystemId = destPoolSystemIdDetail.getValue(); - } - - if (Strings.isNullOrEmpty(srcPoolSystemId) || Strings.isNullOrEmpty(destPoolSystemId)) { - throw new CloudRuntimeException("Failed to validate PowerFlex pools compatibility for migration"); - } - - if (!srcPoolSystemId.equals(destPoolSystemId)) { - throw new CloudRuntimeException("Volume migration across different PowerFlex clusters is not supported"); - } final ScaleIOGatewayClient client = getScaleIOClient(srcPoolId); final String srcVolumeId = ((VolumeInfo) srcData).getPath(); @@ -722,6 +734,32 @@ private Answer migrateVolume(DataObject srcData, DataObject destData) { return answer; } + private boolean isSameScaleIOStorageInstance(DataStore srcStore, DataStore destStore) { + long srcPoolId = srcStore.getId(); + String srcPoolSystemId = null; + StoragePoolDetailVO srcPoolSystemIdDetail = storagePoolDetailsDao.findDetail(srcPoolId, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID); + if (srcPoolSystemIdDetail != null) { + srcPoolSystemId = srcPoolSystemIdDetail.getValue(); + } + + long destPoolId = destStore.getId(); + String destPoolSystemId = null; + StoragePoolDetailVO destPoolSystemIdDetail = storagePoolDetailsDao.findDetail(destPoolId, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID); + if (destPoolSystemIdDetail != null) { + destPoolSystemId = destPoolSystemIdDetail.getValue(); + } + + if (Strings.isNullOrEmpty(srcPoolSystemId) || Strings.isNullOrEmpty(destPoolSystemId)) { + throw new CloudRuntimeException("Failed to validate PowerFlex pools compatibility for migration as storage instance details are not available"); + } + + if (srcPoolSystemId.equals(destPoolSystemId)) { + return true; + } + + return false; + } + @Override public boolean canCopy(DataObject srcData, DataObject destData) { DataStore srcStore = destData.getDataStore(); diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 2a3a45c7d154..a7cc7724abc0 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -96,7 +96,6 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; -import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; @@ -210,7 +209,6 @@ import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.dao.VMInstanceDao; -import com.google.common.base.Strings; import static com.cloud.utils.NumbersUtil.toHumanReadableSize; @@ -1846,6 +1844,38 @@ public StoragePoolVO findLocalStorageOnHost(long hostId) { } } + @Override + public Host findUpAndEnabledHostWithAccessToStoragePools(List poolIds) { + List hostIds = _storagePoolHostDao.findHostsConnectedToPools(poolIds); + if (hostIds.isEmpty()) { + return null; + } + + for (Long hostId : hostIds) { + Host host = _hostDao.findById(hostId); + if (canHostAccessStoragePools(host, poolIds)) { + return host; + } + } + + return null; + } + + private boolean canHostAccessStoragePools(Host host, List poolIds) { + if (poolIds == null || poolIds.isEmpty()) { + return false; + } + + for (Long poolId : poolIds) { + StoragePool pool = _storagePoolDao.findById(poolId); + if (!canHostAccessStoragePool(host, pool)) { + return false; + } + } + + return true; + } + @Override @DB public List findStoragePoolsConnectedToHost(long hostId) { @@ -2278,48 +2308,25 @@ public boolean storagePoolCompatibleWithVolumePool(StoragePool pool, Volume volu return false; } - if (!pool.isManaged()) { - return true; - } - if (volume.getPoolId() == null) { - // Volume is not allocated to any pool. Not possible to check compatibility with other pool + // Volume is not allocated to any pool. Not possible to check compatibility with other pool, let it try return true; } StoragePool volumePool = _storagePoolDao.findById(volume.getPoolId()); if (volumePool == null) { - // Volume pool doesn't exist. Not possible to check compatibility with other pool + // Volume pool doesn't exist. Not possible to check compatibility with other pool, let it try return true; } - if (volume.getState() == Volume.State.Ready && volumePool.getPoolType() == Storage.StoragePoolType.PowerFlex) { - if (pool.getPoolType() != Storage.StoragePoolType.PowerFlex) { - return false; - } - - final String STORAGE_POOL_SYSTEM_ID = "powerflex.storagepool.system.id"; - String srcPoolSystemId = null; - StoragePoolDetailVO srcPoolSystemIdDetail = _storagePoolDetailsDao.findDetail(volume.getPoolId(), STORAGE_POOL_SYSTEM_ID); - if (srcPoolSystemIdDetail != null) { - srcPoolSystemId = srcPoolSystemIdDetail.getValue(); - } - - String destPoolSystemId = null; - StoragePoolDetailVO destPoolSystemIdDetail = _storagePoolDetailsDao.findDetail(pool.getId(), STORAGE_POOL_SYSTEM_ID); - if (destPoolSystemIdDetail != null) { - destPoolSystemId = destPoolSystemIdDetail.getValue(); - } - - if (Strings.isNullOrEmpty(srcPoolSystemId) || Strings.isNullOrEmpty(destPoolSystemId)) { - s_logger.debug("Unable to check PowerFlex pool: " + pool.getId() + " compatibilty for the volume: " + volume.getId()); + if (volume.getState() == Volume.State.Ready) { + if (volumePool.getPoolType() == Storage.StoragePoolType.PowerFlex && pool.getPoolType() != Storage.StoragePoolType.PowerFlex) { return false; - } - - if (!srcPoolSystemId.equals(destPoolSystemId)) { - s_logger.debug("PowerFlex pool: " + pool.getId() + " is not compatible for the volume: " + volume.getId()); + } else if (volumePool.getPoolType() != Storage.StoragePoolType.PowerFlex && pool.getPoolType() == Storage.StoragePoolType.PowerFlex) { return false; } + } else { + return false; } return true; diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 7d7c849bb28e..9ef9d2acc441 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -2271,6 +2271,12 @@ public Volume migrateVolume(MigrateVolumeCmd cmd) { if (_serviceOfferingDetailsDao.findDetail(vm.getServiceOfferingId(), GPU.Keys.pciDevice.toString()) != null) { throw new InvalidParameterValueException("Live Migration of GPU enabled VM is not supported"); } + + StoragePoolVO storagePoolVO = _storagePoolDao.findById(vol.getPoolId()); + if (storagePoolVO.getPoolType() == Storage.StoragePoolType.PowerFlex) { + throw new InvalidParameterValueException("Migrate volume of a running VM is unsupported on storage pool type " + storagePoolVO.getPoolType()); + } + // Check if the underlying hypervisor supports storage motion. Long hostId = vm.getHostId(); if (hostId != null) { diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManager.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManager.java index c900b2d14ba1..b22d3b4ca3c6 100644 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManager.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManager.java @@ -77,6 +77,8 @@ public interface SnapshotManager extends Configurable { boolean canOperateOnVolume(Volume volume); + boolean backedUpSnapshotsExistsForVolume(Volume volume); + void cleanupSnapshotsByVolume(Long volumeId); Answer sendToPool(Volume vol, Command cmd); diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java index 88e87ec819ce..630adde57dc9 100755 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -1390,6 +1390,15 @@ public boolean canOperateOnVolume(Volume volume) { return true; } + @Override + public boolean backedUpSnapshotsExistsForVolume(Volume volume) { + List snapshots = _snapshotDao.listByStatus(volume.getId(), Snapshot.State.BackedUp); + if (snapshots.size() > 0) { + return true; + } + return false; + } + @Override public void cleanupSnapshotsByVolume(Long volumeId) { List infos = snapshotFactory.getSnapshots(volumeId, DataStoreRole.Primary); diff --git a/test/integration/plugins/scaleio/README.md b/test/integration/plugins/scaleio/README.md index 4a980d86cf71..0e48fc15b464 100644 --- a/test/integration/plugins/scaleio/README.md +++ b/test/integration/plugins/scaleio/README.md @@ -4,16 +4,25 @@ This directory contains the basic VM, Volume life cycle tests for PowerFlex/Scal # Running tests =============== -To run these tests, first update the below test data of the CloudStack environment +To run the basic volume tests, first update the below test data of the CloudStack environment ```` TestData.zoneId: TestData.clusterId: TestData.domainId: TestData.url: +TestData.primaryStorage "url": ```` - -and PowerFlex/ScaleIO storage pool url at TestData.primaryStorage in the below format + +and to enable and run volume migration tests, update the below test data + +```` +TestData.migrationTests: True +TestData.primaryStorageSameInstance "url": +TestData.primaryStorageDistinctInstance "url": +```` + +PowerFlex/ScaleIO storage pool url format: ```` powerflex://:@/ diff --git a/test/integration/plugins/scaleio/test_scaleio_volumes.py b/test/integration/plugins/scaleio/test_scaleio_volumes.py index 28d591ae70e9..c67f838297bf 100644 --- a/test/integration/plugins/scaleio/test_scaleio_volumes.py +++ b/test/integration/plugins/scaleio/test_scaleio_volumes.py @@ -31,17 +31,23 @@ list_volumes # utils - utility classes for common cleanup, external library wrappers, etc. -from marvin.lib.utils import cleanup_resources +from marvin.lib.utils import cleanup_resources, validateList +from marvin.codes import PASS +from nose.plugins.attrib import attr # Prerequisites: # Only one zone # Only one pod # Only one cluster # -# One ScaleIO storage pool -# Only KVM hypervisor is supported for ScaleIO storage pool +# One PowerFlex/ScaleIO storage pool for basic tests +# Only KVM hypervisor is supported for PowerFlex/ScaleIO storage pool # KVM host(s) with ScaleIO Data Client (SDC) installed and connected to Metadata Manager (MDM) # +# For volume migration tests, additional storage pool(s) are required +# One PowerFlex/ScaleIO storage pool on the same ScaleIO storage cluster/instance +# One PowerFlex/ScaleIO storage pool on different ScaleIO storage cluster/instance +# class TestData(): # constants @@ -50,6 +56,8 @@ class TestData(): computeOffering = "computeoffering" diskName = "diskname" diskOffering = "diskoffering" + diskOfferingSameInstance = "diskOfferingSameInstance" + diskOfferingDistinctInstance = "diskOfferingDistinctInstance" domainId = "domainId" hypervisor = "hypervisor" kvm = "kvm" @@ -59,10 +67,14 @@ class TestData(): password = "password" port = "port" primaryStorage = "primarystorage" + primaryStorageSameInstance = "primaryStorageSameInstance" + primaryStorageDistinctInstance = "primaryStorageDistinctInstance" provider = "provider" scope = "scope" powerFlex = "powerflex" storageTag = "pflex" + storageTagSameInstance = "pflexsame" + storageTagDistinctInstance = "pflexdiff" tags = "tags" templateCacheNameKvm = "centos55-x86-64" testAccount = "testaccount" @@ -71,10 +83,15 @@ class TestData(): username = "username" virtualMachine = "virtualmachine" virtualMachine2 = "virtualmachine2" + virtualMachine3 = "virtualmachine3" + virtualMachine4 = "virtualmachine4" volume_1 = "volume_1" volume_2 = "volume_2" + volume_3 = "volume_3" + volume_4 = "volume_4" kvm = "kvm" zoneId = "zoneId" + migrationTests = "migrationTests" # hypervisor type to test hypervisor_type = kvm @@ -109,19 +126,27 @@ def __init__(self): TestData.primaryStorage: { "name": "PowerFlexPool-%d" % random.randint(0, 100), TestData.scope: "ZONE", - "url": "powerflex://admin:P%40ssword123@10.10.2.130/cspool", + "url": "powerflex://admin:P%40ssword123@10.10.4.141/cspool01", TestData.provider: "PowerFlex", - TestData.tags: TestData.storageTag, + TestData.tags: TestData.storageTag + "," + TestData.storageTagSameInstance + "," + TestData.storageTagDistinctInstance, TestData.hypervisor: "KVM" }, TestData.virtualMachine: { "name": "TestVM1", - "displayname": "Test VM1" + "displayname": "Test VM 1" }, TestData.virtualMachine2: { "name": "TestVM2", "displayname": "Test VM 2" }, + TestData.virtualMachine3: { + "name": "TestVM3", + "displayname": "Test VM 3" + }, + TestData.virtualMachine4: { + "name": "TestVM4", + "displayname": "Test VM 4" + }, TestData.computeOffering: { "name": "PowerFlex_Compute", "displaytext": "PowerFlex_Compute", @@ -144,10 +169,50 @@ def __init__(self): TestData.volume_2: { TestData.diskName: "test-volume-2", }, + TestData.volume_3: { + TestData.diskName: "test-volume-3", + }, + TestData.volume_4: { + TestData.diskName: "test-volume-4", + }, TestData.zoneId: 1, TestData.clusterId: 1, TestData.domainId: 1, - TestData.url: "10.10.3.226" + TestData.url: "10.10.3.226", + # for volume migration tests + TestData.migrationTests: True, + # PowerFlex/ScaleIO storage pool on the same ScaleIO storage instance + TestData.primaryStorageSameInstance: { + "name": "PowerFlexPool-%d" % random.randint(0, 100), + TestData.scope: "ZONE", + "url": "powerflex://admin:P%40ssword123@10.10.4.141/cspool02", + TestData.provider: "PowerFlex", + TestData.tags: TestData.storageTag + "," + TestData.storageTagSameInstance, + TestData.hypervisor: "KVM" + }, + # PowerFlex/ScaleIO storage pool on different ScaleIO storage instance + TestData.primaryStorageDistinctInstance: { + "name": "PowerFlexPool-%d" % random.randint(0, 100), + TestData.scope: "ZONE", + "url": "powerflex://admin:P%40ssword123@10.10.4.194/cloudstackpool", + TestData.provider: "PowerFlex", + TestData.tags: TestData.storageTag + "," + TestData.storageTagDistinctInstance, + TestData.hypervisor: "KVM" + }, + TestData.diskOfferingSameInstance: { + "name": "PowerFlex_Disk_Same_Inst", + "displaytext": "PowerFlex_Disk_Same_Inst", + "disksize": 8, + TestData.tags: TestData.storageTagSameInstance, + "storagetype": "shared" + }, + TestData.diskOfferingDistinctInstance: { + "name": "PowerFlex_Disk_Diff_Inst", + "displaytext": "PowerFlex_Disk_Diff_Inst", + "disksize": 8, + TestData.tags: TestData.storageTagDistinctInstance, + "storagetype": "shared" + }, } @@ -211,6 +276,40 @@ def setUpClass(cls): cls.testdata[TestData.diskOffering] ) + if TestData.migrationTests: + primarystorage_sameinst = cls.testdata[TestData.primaryStorageSameInstance] + cls.primary_storage_same_inst = StoragePool.create( + cls.apiClient, + primarystorage_sameinst, + scope=primarystorage_sameinst[TestData.scope], + zoneid=cls.zone.id, + provider=primarystorage_sameinst[TestData.provider], + tags=primarystorage_sameinst[TestData.tags], + hypervisor=primarystorage_sameinst[TestData.hypervisor] + ) + + primarystorage_distinctinst = cls.testdata[TestData.primaryStorageDistinctInstance] + cls.primary_storage_distinct_inst = StoragePool.create( + cls.apiClient, + primarystorage_distinctinst, + scope=primarystorage_distinctinst[TestData.scope], + zoneid=cls.zone.id, + provider=primarystorage_distinctinst[TestData.provider], + tags=primarystorage_distinctinst[TestData.tags], + hypervisor=primarystorage_distinctinst[TestData.hypervisor] + ) + + cls.disk_offering_same_inst = DiskOffering.create( + cls.apiClient, + cls.testdata[TestData.diskOfferingSameInstance] + ) + + cls.disk_offering_distinct_inst = DiskOffering.create( + cls.apiClient, + cls.testdata[TestData.diskOfferingDistinctInstance] + ) + + # Create VM and volume for tests cls.virtual_machine = VirtualMachine.create( cls.apiClient, @@ -247,10 +346,18 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): try: + if TestData.migrationTests: + cls._cleanup.append(cls.disk_offering_same_inst) + cls._cleanup.append(cls.disk_offering_distinct_inst) + cleanup_resources(cls.apiClient, cls._cleanup) cls.primary_storage.delete(cls.apiClient) + if TestData.migrationTests: + cls.primary_storage_same_inst.delete(cls.apiClient) + cls.primary_storage_distinct_inst.delete(cls.apiClient) + except Exception as e: logging.debug("Exception in tearDownClass(cls): %s" % e) @@ -264,6 +371,7 @@ def tearDown(self): cleanup_resources(self.apiClient, self.cleanup) + @attr(tags=['basic'], required_hardware=False) def test_01_create_vm_with_volume(self): '''Create VM with attached volume and expunge VM''' @@ -338,6 +446,7 @@ def test_01_create_vm_with_volume(self): "Check if VM was actually expunged" ) + @attr(tags=['basic'], required_hardware=False) def test_02_attach_new_volume_to_stopped_vm(self): '''Attach a volume to a stopped virtual machine, then start VM''' @@ -381,6 +490,7 @@ def test_02_attach_new_volume_to_stopped_vm(self): "The volume should not be attached to a VM." ) + @attr(tags=['basic'], required_hardware=False) def test_03_attach_detach_attach_volume_to_vm(self): '''Attach, detach, and attach volume to a running VM''' @@ -461,6 +571,7 @@ def test_03_attach_detach_attach_volume_to_vm(self): TestScaleIOVolumes._vm_not_in_running_state_err_msg ) + @attr(tags=['basic'], required_hardware=False) def test_04_detach_vol_stopped_vm_start(self): '''Detach volume from a stopped VM, then start.''' @@ -532,6 +643,7 @@ def test_04_detach_vol_stopped_vm_start(self): TestScaleIOVolumes._vm_not_in_running_state_err_msg ) + @attr(tags=['basic'], required_hardware=False) def test_05_attach_volume_to_stopped_vm(self): '''Attach a volume to a stopped virtual machine, then start VM''' @@ -578,6 +690,7 @@ def test_05_attach_volume_to_stopped_vm(self): TestScaleIOVolumes._vm_not_in_running_state_err_msg ) + @attr(tags=['basic'], required_hardware=False) def test_06_attached_volume_reboot_vm(self): '''Attach volume to running VM, then reboot.''' @@ -621,6 +734,7 @@ def test_06_attached_volume_reboot_vm(self): TestScaleIOVolumes._vm_not_in_running_state_err_msg ) + @attr(tags=['basic'], required_hardware=False) def test_07_detach_volume_reboot_vm(self): '''Detach volume from a running VM, then reboot.''' @@ -690,6 +804,7 @@ def test_07_detach_volume_reboot_vm(self): TestScaleIOVolumes._vm_not_in_running_state_err_msg ) + @attr(tags=['basic'], required_hardware=False) def test_08_delete_volume_was_attached(self): '''Delete volume that was attached to a VM and is detached now''' @@ -765,6 +880,270 @@ def test_08_delete_volume_was_attached(self): "Check volume was deleted" ) + @attr(tags=['advanced', 'migration'], required_hardware=False) + def test_09_migrate_volume_to_same_instance_pool(self): + '''Migrate volume to the same instance pool''' + + if not TestData.migrationTests: + self.skipTest("Volume migration tests not enabled, skipping test") + + ####################################### + # STEP 1: Create VM and Start VM # + ####################################### + + test_virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine3], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=False + ) + + TestScaleIOVolumes._start_vm(test_virtual_machine) + + ####################################### + # STEP 2: Create vol and attach to VM # + ####################################### + + new_volume = Volume.create( + self.apiClient, + self.testdata[TestData.volume_3], + account=self.account.name, + domainid=self.domain.id, + zoneid=self.zone.id, + diskofferingid=self.disk_offering_same_inst.id + ) + + volume_to_delete_later = new_volume + + new_volume = test_virtual_machine.attach_volume( + self.apiClient, + new_volume + ) + + vm = self._get_vm(test_virtual_machine.id) + + self.assertEqual( + new_volume.virtualmachineid, + vm.id, + "Check if attached to virtual machine" + ) + + self.assertEqual( + vm.state.lower(), + 'running', + str(vm.state) + ) + + ####################################### + # STEP 3: Stop VM and Migrate volume # + ####################################### + + test_virtual_machine.stop(self.apiClient) + + vm = self._get_vm(test_virtual_machine.id) + + self.assertEqual( + vm.state.lower(), + 'stopped', + str(vm.state) + ) + + pools = StoragePool.listForMigration( + self.apiClient, + id=new_volume.id + ) + + if not pools: + self.skipTest("No suitable storage pools found for volume migration, skipping test") + + self.assertEqual( + validateList(pools)[0], + PASS, + "Invalid pool response from findStoragePoolsForMigration API" + ) + + pool = pools[0] + self.debug("Migrating Volume-ID: %s to Same Instance Pool: %s" % (new_volume.id, pool.id)) + + try: + Volume.migrate( + self.apiClient, + volumeid=new_volume.id, + storageid=pool.id + ) + except Exception as e: + self.fail("Volume migration failed with error %s" % e) + + ####################################### + # STEP 4: Detach and delete volume # + ####################################### + + new_volume = test_virtual_machine.detach_volume( + self.apiClient, + new_volume + ) + + self.assertEqual( + new_volume.virtualmachineid, + None, + "Check if attached to virtual machine" + ) + + volume_to_delete_later.delete(self.apiClient) + + list_volumes_response = list_volumes( + self.apiClient, + id=new_volume.id + ) + + self.assertEqual( + list_volumes_response, + None, + "Check volume was deleted" + ) + + ####################################### + # STEP 4: Delete VM # + ####################################### + + test_virtual_machine.delete(self.apiClient, True) + + @attr(tags=['advanced', 'migration'], required_hardware=False) + def test_10_migrate_volume_to_distinct_instance_pool(self): + '''Migrate volume to distinct instance pool''' + + if not TestData.migrationTests: + self.skipTest("Volume migration tests not enabled, skipping test") + + ####################################### + # STEP 1: Create VM and Start VM # + ####################################### + + test_virtual_machine = VirtualMachine.create( + self.apiClient, + self.testdata[TestData.virtualMachine4], + accountid=self.account.name, + zoneid=self.zone.id, + serviceofferingid=self.compute_offering.id, + templateid=self.template.id, + domainid=self.domain.id, + startvm=False + ) + + TestScaleIOVolumes._start_vm(test_virtual_machine) + + ####################################### + # STEP 2: Create vol and attach to VM # + ####################################### + + new_volume = Volume.create( + self.apiClient, + self.testdata[TestData.volume_4], + account=self.account.name, + domainid=self.domain.id, + zoneid=self.zone.id, + diskofferingid=self.disk_offering_distinct_inst.id + ) + + volume_to_delete_later = new_volume + + new_volume = test_virtual_machine.attach_volume( + self.apiClient, + new_volume + ) + + vm = self._get_vm(test_virtual_machine.id) + + self.assertEqual( + new_volume.virtualmachineid, + vm.id, + "Check if attached to virtual machine" + ) + + self.assertEqual( + vm.state.lower(), + 'running', + str(vm.state) + ) + + ####################################### + # STEP 3: Stop VM and Migrate volume # + ####################################### + + test_virtual_machine.stop(self.apiClient) + + vm = self._get_vm(test_virtual_machine.id) + + self.assertEqual( + vm.state.lower(), + 'stopped', + str(vm.state) + ) + + pools = StoragePool.listForMigration( + self.apiClient, + id=new_volume.id + ) + + if not pools: + self.skipTest("No suitable storage pools found for volume migration, skipping test") + + self.assertEqual( + validateList(pools)[0], + PASS, + "Invalid pool response from findStoragePoolsForMigration API" + ) + + pool = pools[0] + self.debug("Migrating Volume-ID: %s to Distinct Instance Pool: %s" % (new_volume.id, pool.id)) + + try: + Volume.migrate( + self.apiClient, + volumeid=new_volume.id, + storageid=pool.id + ) + except Exception as e: + self.fail("Volume migration failed with error %s" % e) + + ####################################### + # STEP 4: Detach and delete volume # + ####################################### + + new_volume = test_virtual_machine.detach_volume( + self.apiClient, + new_volume + ) + + self.assertEqual( + new_volume.virtualmachineid, + None, + "Check if attached to virtual machine" + ) + + volume_to_delete_later.delete(self.apiClient) + + list_volumes_response = list_volumes( + self.apiClient, + id=new_volume.id + ) + + self.assertEqual( + list_volumes_response, + None, + "Check volume was deleted" + ) + + ####################################### + # STEP 4: Delete VM # + ####################################### + + test_virtual_machine.delete(self.apiClient, True) + def _create_vm_using_template_and_destroy_vm(self, template): vm_name = "VM-%d" % random.randint(0, 100) From 602a708662f2ab50aa633cbd122df5098d491eba Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Thu, 4 Feb 2021 15:10:11 +0530 Subject: [PATCH 07/12] Fixed change service offering smoke tests in test_service_offerings.py, test_vm_snapshots.py --- test/integration/smoke/test_service_offerings.py | 12 +++++++++--- test/integration/smoke/test_vm_snapshots.py | 3 +-- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/test/integration/smoke/test_service_offerings.py b/test/integration/smoke/test_service_offerings.py index 0ee055ad0f41..8a7682ea462d 100644 --- a/test/integration/smoke/test_service_offerings.py +++ b/test/integration/smoke/test_service_offerings.py @@ -502,22 +502,28 @@ def test_04_change_offering_small(self): self.skipTest("Skipping this test for {} due to bug CS-38153".format(self.hypervisor)) try: self.medium_virtual_machine.stop(self.apiclient) + timeout = self.services["timeout"] + while True: time.sleep(self.services["sleep"]) + # Ensure that VM is in stopped state list_vm_response = list_virtual_machines( - self.apiclient, - id=self.medium_virtual_machine.id - ) + self.apiclient, + id=self.medium_virtual_machine.id + ) + if isinstance(list_vm_response, list): vm = list_vm_response[0] if vm.state == 'Stopped': self.debug("VM state: %s" % vm.state) break + if timeout == 0: raise Exception( "Failed to stop VM (ID: %s) in change service offering" % vm.id) + timeout = timeout - 1 except Exception as e: self.fail("Failed to stop VM: %s" % e) diff --git a/test/integration/smoke/test_vm_snapshots.py b/test/integration/smoke/test_vm_snapshots.py index 047ed200001b..b8590bc77a01 100644 --- a/test/integration/smoke/test_vm_snapshots.py +++ b/test/integration/smoke/test_vm_snapshots.py @@ -457,6 +457,7 @@ def test_change_service_offering_for_vm_with_snapshots(self): self.debug("Stopping VM - ID: %s" % virtual_machine.id) try: virtual_machine.stop(self.apiclient) + timeout = self.services["timeout"] while True: @@ -469,7 +470,6 @@ def test_change_service_offering_for_vm_with_snapshots(self): ) if isinstance(list_vm_response, list): - vm = list_vm_response[0] if vm.state == 'Stopped': self.debug("VM state: %s" % vm.state) @@ -480,7 +480,6 @@ def test_change_service_offering_for_vm_with_snapshots(self): "Failed to stop VM (ID: %s) in change service offering" % vm.id) timeout = timeout - 1 - except Exception as e: self.fail("Failed to stop VM: %s" % e) From 9e92647210268c21ec2f1769816da28505f718f4 Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Fri, 5 Feb 2021 17:53:35 +0530 Subject: [PATCH 08/12] Added the PowerFlex/ScaleIO volume/snapshot name to the paths of respective CloudStack resources (Templates, Volumes, Snapshots and VM Snapshots) --- .../vmsnapshot/ScaleIOVMSnapshotStrategy.java | 10 +- .../kvm/storage/ScaleIOStorageAdaptor.java | 16 ++-- .../kvm/storage/ScaleIOStoragePoolTest.java | 17 ++-- .../driver/ScaleIOPrimaryDataStoreDriver.java | 93 ++++++++++--------- .../storage/datastore/util/ScaleIOUtil.java | 21 +++++ 5 files changed, 97 insertions(+), 60 deletions(-) diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java index 396096c14afc..a124a4adf2b1 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java @@ -141,7 +141,7 @@ public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { for (VolumeObjectTO volume : volumeTOs) { String volumeSnapshotName = String.format("%s-%s-%s-%s-%s", ScaleIOUtil.VMSNAPSHOT_PREFIX, vmSnapshotVO.getId(), volume.getId(), storagePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value()); - srcVolumeDestSnapshotMap.put(volume.getPath(), volumeSnapshotName); + srcVolumeDestSnapshotMap.put(ScaleIOUtil.getVolumePath(volume.getPath()), volumeSnapshotName); virtual_size += volume.getSize(); VolumeVO volumeVO = volumeDao.findById(volume.getId()); @@ -173,7 +173,9 @@ public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { vmSnapshotDetails.add(new VMSnapshotDetailsVO(vmSnapshot.getId(), "SnapshotGroupId", snapshotGroupId, false)); for (int index = 0; index < volumeIds.size(); index++) { - vmSnapshotDetails.add(new VMSnapshotDetailsVO(vmSnapshot.getId(), "Vol_" + volumeTOs.get(index).getId() + "_Snapshot", volumeIds.get(index), false)); + String volumeSnapshotName = srcVolumeDestSnapshotMap.get(ScaleIOUtil.getVolumePath(volumeTOs.get(index).getPath())); + String pathWithScaleIOVolumeName = ScaleIOUtil.updatedPathWithVolumeName(volumeIds.get(index), volumeSnapshotName); + vmSnapshotDetails.add(new VMSnapshotDetailsVO(vmSnapshot.getId(), "Vol_" + volumeTOs.get(index).getId() + "_Snapshot", pathWithScaleIOVolumeName, false)); } vmSnapshotDetailsDao.saveDetails(vmSnapshotDetails); @@ -265,8 +267,8 @@ public boolean revertVMSnapshot(VMSnapshot vmSnapshot) { Map srcSnapshotDestVolumeMap = new HashMap<>(); for (VolumeObjectTO volume : volumeTOs) { VMSnapshotDetailsVO vmSnapshotDetail = vmSnapshotDetailsDao.findDetail(vmSnapshotVO.getId(), "Vol_" + volume.getId() + "_Snapshot"); - String srcSnapshotVolumeId = vmSnapshotDetail.getValue(); - String destVolumeId = volume.getPath(); + String srcSnapshotVolumeId = ScaleIOUtil.getVolumePath(vmSnapshotDetail.getValue()); + String destVolumeId = ScaleIOUtil.getVolumePath(volume.getPath()); srcSnapshotDestVolumeMap.put(srcSnapshotVolumeId, destVolumeId); } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java index 04e8d93b50a5..62eb54404683 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java @@ -67,12 +67,14 @@ public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) { } @Override - public KVMPhysicalDisk getPhysicalDisk(String volumeId, KVMStoragePool pool) { - if (Strings.isNullOrEmpty(volumeId) || pool == null) { - LOGGER.error("Unable to get physical disk, unspecified volumeid or pool"); + public KVMPhysicalDisk getPhysicalDisk(String volumePath, KVMStoragePool pool) { + if (Strings.isNullOrEmpty(volumePath) || pool == null) { + LOGGER.error("Unable to get physical disk, volume path or pool not specified"); return null; } + String volumeId = ScaleIOUtil.getVolumePath(volumePath); + try { String diskFilePath = null; String systemId = ScaleIOUtil.getSystemIdForVolume(volumeId); @@ -98,7 +100,7 @@ public KVMPhysicalDisk getPhysicalDisk(String volumeId, KVMStoragePool pool) { } } - KVMPhysicalDisk disk = new KVMPhysicalDisk(diskFilePath, volumeId, pool); + KVMPhysicalDisk disk = new KVMPhysicalDisk(diskFilePath, volumePath, pool); disk.setFormat(QemuImg.PhysicalDiskFormat.RAW); long diskSize = getPhysicalDiskSize(diskFilePath); @@ -107,8 +109,8 @@ public KVMPhysicalDisk getPhysicalDisk(String volumeId, KVMStoragePool pool) { return disk; } catch (Exception e) { - LOGGER.error("Failed to get the physical disk: " + volumeId + " on the storage pool: " + pool.getUuid() + " due to " + e.getMessage()); - throw new CloudRuntimeException("Failed to get the physical disk: " + volumeId + " on the storage pool: " + pool.getUuid()); + LOGGER.error("Failed to get the physical disk: " + volumePath + " on the storage pool: " + pool.getUuid() + " due to " + e.getMessage()); + throw new CloudRuntimeException("Failed to get the physical disk: " + volumePath + " on the storage pool: " + pool.getUuid()); } } @@ -136,6 +138,8 @@ public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map capacityBytes ? capacityBytes : usedBytes); storagePoolDao.update(storagePoolId, storagePool); - return volume.getPath(); + return volumePath; } catch (Exception e) { String errMsg = "Unable to create PowerFlex Volume due to " + e.getMessage(); LOGGER.warn(errMsg); @@ -465,7 +466,8 @@ private String createTemplateVolume(TemplateInfo templateInfo, long storagePool } VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(storagePoolId, templateInfo.getId(), null); - templatePoolRef.setInstallPath(scaleIOVolume.getId()); + String templatePath = ScaleIOUtil.updatedPathWithVolumeName(scaleIOVolume.getId(), scaleIOVolumeName); + templatePoolRef.setInstallPath(templatePath); templatePoolRef.setLocalDownloadPath(scaleIOVolume.getId()); templatePoolRef.setTemplateSize(scaleIOVolume.getSizeInKb() * 1024); vmTemplatePoolDao.update(templatePoolRef.getId(), templatePoolRef); @@ -476,7 +478,7 @@ private String createTemplateVolume(TemplateInfo templateInfo, long storagePool storagePool.setUsedBytes(usedBytes > capacityBytes ? capacityBytes : usedBytes); storagePoolDao.update(storagePoolId, storagePool); - return scaleIOVolume.getId(); + return templatePath; } catch (Exception e) { String errMsg = "Unable to create PowerFlex template volume due to " + e.getMessage(); LOGGER.warn(errMsg); @@ -486,15 +488,15 @@ private String createTemplateVolume(TemplateInfo templateInfo, long storagePool @Override public void createAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) { - String scaleIOVolId = null; + String scaleIOVolumePath = null; String errMsg = null; try { if (dataObject.getType() == DataObjectType.VOLUME) { LOGGER.debug("createAsync - creating volume"); - scaleIOVolId = createVolume((VolumeInfo) dataObject, dataStore.getId()); + scaleIOVolumePath = createVolume((VolumeInfo) dataObject, dataStore.getId()); } else if (dataObject.getType() == DataObjectType.TEMPLATE) { LOGGER.debug("createAsync - creating template"); - scaleIOVolId = createTemplateVolume((TemplateInfo)dataObject, dataStore.getId()); + scaleIOVolumePath = createTemplateVolume((TemplateInfo)dataObject, dataStore.getId()); } else { errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync"; LOGGER.error(errMsg); @@ -508,7 +510,7 @@ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncComplet } if (callback != null) { - CreateCmdResult result = new CreateCmdResult(scaleIOVolId, new Answer(null, errMsg == null, errMsg)); + CreateCmdResult result = new CreateCmdResult(scaleIOVolumePath, new Answer(null, errMsg == null, errMsg)); result.setResult(errMsg); callback.complete(result); } @@ -524,23 +526,26 @@ public void deleteAsync(DataStore dataStore, DataObject dataObject, AsyncComplet Preconditions.checkArgument(storagePool != null && storagePool.getHostAddress() != null, "storagePool and host address should not be null"); String errMsg = null; - String scaleIOVolumeId = null; + String scaleIOVolumePath = null; try { boolean deleteResult = false; if (dataObject.getType() == DataObjectType.VOLUME) { LOGGER.debug("deleteAsync - deleting volume"); - scaleIOVolumeId = ((VolumeInfo) dataObject).getPath(); + scaleIOVolumePath = ((VolumeInfo) dataObject).getPath(); } else if (dataObject.getType() == DataObjectType.SNAPSHOT) { LOGGER.debug("deleteAsync - deleting snapshot"); - scaleIOVolumeId = ((SnapshotInfo) dataObject).getPath(); + scaleIOVolumePath = ((SnapshotInfo) dataObject).getPath(); } else if (dataObject.getType() == DataObjectType.TEMPLATE) { LOGGER.debug("deleteAsync - deleting template"); - scaleIOVolumeId = ((TemplateInfo) dataObject).getInstallPath(); + scaleIOVolumePath = ((TemplateInfo) dataObject).getInstallPath(); } else { errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to deleteAsync"; + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg); } try { + String scaleIOVolumeId = ScaleIOUtil.getVolumePath(scaleIOVolumePath); final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); deleteResult = client.deleteVolume(scaleIOVolumeId); if (!deleteResult) { @@ -552,7 +557,7 @@ public void deleteAsync(DataStore dataStore, DataObject dataObject, AsyncComplet storagePool.setUsedBytes(usedBytes < 0 ? 0 : usedBytes); storagePoolDao.update(storagePoolId, storagePool); } catch (Exception e) { - errMsg = "Unable to delete PowerFlex volume: " + scaleIOVolumeId + " due to " + e.getMessage(); + errMsg = "Unable to delete PowerFlex volume: " + scaleIOVolumePath + " due to " + e.getMessage(); LOGGER.warn(errMsg); throw new CloudRuntimeException(errMsg, e); } @@ -672,16 +677,21 @@ private Answer migrateVolume(DataObject srcData, DataObject destData) { long destPoolId = destStore.getId(); final ScaleIOGatewayClient client = getScaleIOClient(srcPoolId); - final String srcVolumeId = ((VolumeInfo) srcData).getPath(); + final String srcVolumeId = ScaleIOUtil.getVolumePath(((VolumeInfo) srcData).getPath()); final StoragePoolVO destStoragePool = storagePoolDao.findById(destPoolId); final String destStoragePoolId = destStoragePool.getPath(); int migrationTimeout = StorageManager.KvmStorageOfflineMigrationWait.value(); boolean migrateStatus = client.migrateVolume(srcVolumeId, destStoragePoolId, migrationTimeout); if (migrateStatus) { + String newVolumeName = String.format("%s-%s-%s-%s", ScaleIOUtil.VOLUME_PREFIX, destData.getId(), + destStoragePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value()); + client.renameVolume(srcVolumeId, newVolumeName); + if (srcData.getId() != destData.getId()) { VolumeVO destVolume = volumeDao.findById(destData.getId()); - destVolume.set_iScsiName(srcVolumeId); - destVolume.setPath(srcVolumeId); + String newVolumePath = ScaleIOUtil.updatedPathWithVolumeName(srcVolumeId, newVolumeName); + destVolume.set_iScsiName(newVolumePath); + destVolume.setPath(newVolumePath); volumeDao.update(destData.getId(), destVolume); VolumeVO srcVolume = volumeDao.findById(srcData.getId()); @@ -698,10 +708,6 @@ private Answer migrateVolume(DataObject srcData, DataObject destData) { volumeDao.update(srcData.getId(), volume); } - String newVolumeName = String.format("%s-%s-%s-%s", ScaleIOUtil.VOLUME_PREFIX, destData.getId(), - destStoragePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value()); - client.renameVolume(srcVolumeId, newVolumeName); - List snapshots = snapshotDao.listByVolumeId(srcData.getId()); if (CollectionUtils.isNotEmpty(snapshots)) { for (SnapshotVO snapshot : snapshots) { @@ -710,13 +716,14 @@ private Answer migrateVolume(DataObject srcData, DataObject destData) { continue; } - snapshotStore.setDataStoreId(destPoolId); - snapshotDataStoreDao.update(snapshotStore.getId(), snapshotStore); - - String snapshotVolumeId = snapshotStore.getInstallPath(); + String snapshotVolumeId = ScaleIOUtil.getVolumePath(snapshotStore.getInstallPath()); String newSnapshotName = String.format("%s-%s-%s-%s", ScaleIOUtil.SNAPSHOT_PREFIX, snapshot.getId(), destStoragePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value()); client.renameVolume(snapshotVolumeId, newSnapshotName); + + snapshotStore.setDataStoreId(destPoolId); + snapshotStore.setInstallPath(ScaleIOUtil.updatedPathWithVolumeName(snapshotVolumeId, newSnapshotName)); + snapshotDataStoreDao.update(snapshotStore.getId(), snapshotStore); } } @@ -782,7 +789,7 @@ private void resizeVolume(VolumeInfo volumeInfo) { Preconditions.checkArgument(volumeInfo != null, "volumeInfo cannot be null"); try { - String scaleIOVolumeId = volumeInfo.getPath(); + String scaleIOVolumeId = ScaleIOUtil.getVolumePath(volumeInfo.getPath()); Long storagePoolId = volumeInfo.getPoolId(); ResizeVolumePayload payload = (ResizeVolumePayload)volumeInfo.getpayload(); @@ -823,11 +830,11 @@ private void resizeVolume(VolumeInfo volumeInfo) { @Override public void resize(DataObject dataObject, AsyncCompletionCallback callback) { - String scaleIOVolumeId = null; + String scaleIOVolumePath = null; String errMsg = null; try { if (dataObject.getType() == DataObjectType.VOLUME) { - scaleIOVolumeId = ((VolumeInfo) dataObject).getPath(); + scaleIOVolumePath = ((VolumeInfo) dataObject).getPath(); resizeVolume((VolumeInfo) dataObject); } else { errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to resize"; @@ -841,7 +848,7 @@ public void resize(DataObject dataObject, AsyncCompletionCallback getVolumeStats(StoragePool storagePool, String volumeId) { + public Pair getVolumeStats(StoragePool storagePool, String volumePath) { Preconditions.checkArgument(storagePool != null, "storagePool cannot be null"); - Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "volumeId cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumePath), "volumePath cannot be null"); try { final ScaleIOGatewayClient client = getScaleIOClient(storagePool.getId()); - VolumeStatistics volumeStatistics = client.getVolumeStatistics(volumeId); + VolumeStatistics volumeStatistics = client.getVolumeStatistics(ScaleIOUtil.getVolumePath(volumePath)); if (volumeStatistics != null) { Long provisionedSizeInBytes = volumeStatistics.getNetProvisionedAddressesInBytes(); Long allocatedSizeInBytes = volumeStatistics.getAllocatedSizeInBytes(); return new Pair(provisionedSizeInBytes, allocatedSizeInBytes); } } catch (Exception e) { - String errMsg = "Unable to get stats for the volume: " + volumeId + " in the pool: " + storagePool.getId() + " due to " + e.getMessage(); + String errMsg = "Unable to get stats for the volume: " + volumePath + " in the pool: " + storagePool.getId() + " due to " + e.getMessage(); LOGGER.warn(errMsg); throw new CloudRuntimeException(errMsg, e); } diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/util/ScaleIOUtil.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/util/ScaleIOUtil.java index d28d72c51ca3..0180f17cdd7a 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/util/ScaleIOUtil.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/util/ScaleIOUtil.java @@ -20,6 +20,7 @@ import org.apache.log4j.Logger; import com.cloud.utils.script.Script; +import com.google.common.base.Strings; public class ScaleIOUtil { private static final Logger LOGGER = Logger.getLogger(ScaleIOUtil.class); @@ -95,4 +96,24 @@ public static final String getSystemIdForVolume(String volumeId) { return result; } + + public static final String getVolumePath(String volumePathWithName) { + if (Strings.isNullOrEmpty(volumePathWithName)) { + return volumePathWithName; + } + + if (volumePathWithName.contains(":")) { + return volumePathWithName.substring(0, volumePathWithName.indexOf(':')); + } + + return volumePathWithName; + } + + public static final String updatedPathWithVolumeName(String volumePath, String volumeName) { + if (Strings.isNullOrEmpty(volumePath) || Strings.isNullOrEmpty(volumeName)) { + return volumePath; + } + + return String.format("%s:%s", volumePath, volumeName); + } } From 1c16a1f97e499def2c50ba63aaaad3b867d8fbc2 Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Mon, 8 Feb 2021 11:56:43 +0530 Subject: [PATCH 09/12] =?UTF-8?q?Added=20new=20response=20parameter=20?= =?UTF-8?q?=E2=80=9CsupportsStorageSnapshot=E2=80=9D=20(true/false)=20to?= =?UTF-8?q?=20volume=20response,=20and=20Updated=20UI=20to=20hide=20the=20?= =?UTF-8?q?async=20backup=20option=20while=20taking=20snapshot=20for=20vol?= =?UTF-8?q?ume(s)=20with=20storage=20snapshot=20support.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../org/apache/cloudstack/api/ApiConstants.java | 1 + .../cloudstack/api/response/VolumeResponse.java | 14 +++++++++++++- .../java/com/cloud/api/query/QueryManagerImpl.java | 3 +++ ui/src/views/compute/CreateSnapshotWizard.vue | 9 +++++++-- ui/src/views/storage/TakeSnapshot.vue | 4 +++- 5 files changed, 27 insertions(+), 4 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 6d1cda92fe72..8b9df63605ef 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -339,6 +339,7 @@ public class ApiConstants { public static final String SNAPSHOT_POLICY_ID = "snapshotpolicyid"; public static final String SNAPSHOT_TYPE = "snapshottype"; public static final String SNAPSHOT_QUIESCEVM = "quiescevm"; + public static final String SUPPORTS_STORAGE_SNAPSHOT = "supportsstoragesnapshot"; public static final String SOURCE_ZONE_ID = "sourcezoneid"; public static final String START_DATE = "startdate"; public static final String START_ID = "startid"; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/VolumeResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/VolumeResponse.java index 1cdd69673cfb..e9254ef164d4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/VolumeResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/VolumeResponse.java @@ -248,8 +248,12 @@ public class VolumeResponse extends BaseResponseWithTagInformation implements Co @Param(description = "need quiesce vm or not when taking snapshot", since = "4.3") private boolean needQuiescevm; + @SerializedName(ApiConstants.SUPPORTS_STORAGE_SNAPSHOT) + @Param(description = "true if storage snapshot is supported for the volume, false otherwise", since = "4.16") + private boolean supportsStorageSnapshot; + @SerializedName(ApiConstants.PHYSICAL_SIZE) - @Param(description = "the bytes alloaated") + @Param(description = "the bytes allocated") private Long physicalsize; @SerializedName(ApiConstants.VIRTUAL_SIZE) @@ -538,6 +542,14 @@ public boolean isNeedQuiescevm() { return this.needQuiescevm; } + public void setSupportsStorageSnapshot(boolean supportsStorageSnapshot) { + this.supportsStorageSnapshot = supportsStorageSnapshot; + } + + public boolean getSupportsStorageSnapshot() { + return this.supportsStorageSnapshot; + } + public String getIsoId() { return isoId; } diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index 0f75086d256b..6cad8d1985b9 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -1901,6 +1901,9 @@ public ListResponse searchForVolumes(ListVolumesCmd cmd) { if (caps != null) { boolean quiescevm = Boolean.parseBoolean(caps.get(DataStoreCapabilities.VOLUME_SNAPSHOT_QUIESCEVM.toString())); vr.setNeedQuiescevm(quiescevm); + + boolean supportsStorageSnapshot = Boolean.parseBoolean(caps.get(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString())); + vr.setSupportsStorageSnapshot(supportsStorageSnapshot); } } response.setResponses(volumeResponses, result.second()); diff --git a/ui/src/views/compute/CreateSnapshotWizard.vue b/ui/src/views/compute/CreateSnapshotWizard.vue index bf9d9ddd25b5..eb58cee8ebf2 100644 --- a/ui/src/views/compute/CreateSnapshotWizard.vue +++ b/ui/src/views/compute/CreateSnapshotWizard.vue @@ -64,7 +64,7 @@ - + {{ $t('label.asyncbackup') }} @@ -97,6 +97,7 @@ export default { return { loading: false, isQuiesceVm: false, + supportsStorageSnapshot: false, listVolumes: [] } }, @@ -131,7 +132,10 @@ export default { const params = {} params.volumeid = values.volumeid params.name = values.name - params.asyncbackup = values.asyncbackup + params.asyncbackup = false + if (values.asyncbackup) { + params.asyncbackup = values.asyncbackup + } params.quiescevm = values.quiescevm const title = this.$t('label.action.vmstoragesnapshot.create') @@ -176,6 +180,7 @@ export default { const volumeFilter = this.listVolumes.filter(volume => volume.id === volumeId) if (volumeFilter && volumeFilter.length > 0) { this.isQuiesceVm = volumeFilter[0].quiescevm + this.supportsStorageSnapshot = volumeFilter[0].supportsstoragesnapshot } }, closeAction () { diff --git a/ui/src/views/storage/TakeSnapshot.vue b/ui/src/views/storage/TakeSnapshot.vue index 8aec2363531a..782df436ed1f 100644 --- a/ui/src/views/storage/TakeSnapshot.vue +++ b/ui/src/views/storage/TakeSnapshot.vue @@ -34,7 +34,7 @@ :placeholder="apiParams.name.description" /> - + @@ -113,6 +113,7 @@ export default { return { actionLoading: false, quiescevm: false, + supportsStorageSnapshot: false, inputValue: '', inputKey: '', inputVisible: '', @@ -130,6 +131,7 @@ export default { }, mounted () { this.quiescevm = this.resource.quiescevm + this.supportsStorageSnapshot = this.resource.supportsstoragesnapshot }, methods: { handleSubmit (e) { From ea6f7f1f4affa5419fbb57a2e084656e83fa74bc Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Fri, 12 Feb 2021 18:14:54 +0530 Subject: [PATCH 10/12] Fix to remove the duplicate zone wide pools listed while finding storage pools for migration --- .../cloud/server/ManagementServerImpl.java | 24 +++++++------------ 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index aae32925ca9e..7c0cba346e98 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -1508,7 +1508,7 @@ public Pair, List> listStorag } StoragePool srcVolumePool = _poolDao.findById(volume.getPoolId()); - allPools = getAllStoragePoolCompatileWithVolumeSourceStoragePool(srcVolumePool); + allPools = getAllStoragePoolsCompatibleWithVolumeSourceStoragePool(srcVolumePool); if (vm != null) { suitablePools = findAllSuitableStoragePoolsForVm(volume, vm, srcVolumePool); } else { @@ -1546,23 +1546,17 @@ private void abstractDataStoreClustersList(List storagePools, List< *
  • We also all storage available filtering by data center, pod and cluster as the current storage pool used by the given volume.
  • * */ - private List getAllStoragePoolCompatileWithVolumeSourceStoragePool(StoragePool srcVolumePool) { + private List getAllStoragePoolsCompatibleWithVolumeSourceStoragePool(StoragePool srcVolumePool) { List storagePools = new ArrayList<>(); - List clusterAndLocalStoragePools = _poolDao.listBy(srcVolumePool.getDataCenterId(), srcVolumePool.getPodId(), srcVolumePool.getClusterId(), null); - if (CollectionUtils.isNotEmpty(clusterAndLocalStoragePools)) { - clusterAndLocalStoragePools.remove(srcVolumePool); - storagePools.addAll(clusterAndLocalStoragePools); - } - if (srcVolumePool.getClusterId() == null) { - // Return the pools as the above storage pools list would also contain zone wide pools when srcVolumePool is a zone wide pool - return storagePools; + // Storage pool with Zone Scope holds valid DataCenter Id only, Pod Id and Cluster Id are null + // Storage pool with Cluster/Host Scope holds valid DataCenter Id, Pod Id and Cluster Id + // Below methods call returns all the compatible pools with scope : ZONE, CLUSTER, HOST (as they are listed with Scope: null here) + List compatibleStoragePools = _poolDao.listBy(srcVolumePool.getDataCenterId(), srcVolumePool.getPodId(), srcVolumePool.getClusterId(), null); + if (CollectionUtils.isNotEmpty(compatibleStoragePools)) { + compatibleStoragePools.remove(srcVolumePool); + storagePools.addAll(compatibleStoragePools); } - List zoneWideStoragePools = _poolDao.findZoneWideStoragePoolsByTags(srcVolumePool.getDataCenterId(), null); - if (CollectionUtils.isNotEmpty(zoneWideStoragePools)) { - zoneWideStoragePools.remove(srcVolumePool); - storagePools.addAll(zoneWideStoragePools); - } return storagePools; } From dc91003e987b3c9a89db99792f54758608ac44c3 Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Mon, 15 Feb 2021 15:57:47 +0530 Subject: [PATCH 11/12] Updated PowerFlex/ScaleIO volume migration checks and rollback migration on failure --- .../storage/volume/VolumeServiceImpl.java | 66 ++++--- .../client/ScaleIOGatewayClientImpl.java | 169 ++++++++++++++++-- 2 files changed, 193 insertions(+), 42 deletions(-) diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index a3498cd80b61..68940d48b362 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -1638,7 +1638,6 @@ public CopyVolumeContext(AsyncCompletionCallback callback, AsyncCallFuture copyVolumeFromImageToPrimary(VolumeInfo srcVolume, DataStore destStore) { @@ -1821,10 +1820,10 @@ protected Void copyVolumeCallBack(AsyncCallbackDispatcher extends AsyncRpcContext { + final VolumeInfo srcVolume; + final VolumeInfo destVolume; + final Host host; + final AsyncCallFuture future; + + public CopyManagedVolumeContext(AsyncCompletionCallback callback, AsyncCallFuture future, VolumeInfo srcVolume, VolumeInfo destVolume, Host host) { + super(callback); + this.srcVolume = srcVolume; + this.destVolume = destVolume; + this.host = host; + this.future = future; + } + } + private AsyncCallFuture copyManagedVolume(VolumeInfo srcVolume, DataStore destStore) { AsyncCallFuture future = new AsyncCallFuture(); VolumeApiResult res = new VolumeApiResult(srcVolume); @@ -1911,14 +1925,7 @@ private AsyncCallFuture copyManagedVolume(VolumeInfo srcVolume, // Refresh the volume info from the DB. destVolume = volFactory.getVolume(destVolume.getId(), destStore); - destVolume.processEvent(Event.CreateRequested); - srcVolume.processEvent(Event.MigrationRequested); - - CopyVolumeContext context = new CopyVolumeContext(null, future, srcVolume, destVolume, destStore); - AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); - caller.setCallback(caller.getTarget().copyManagedVolumeCallBack(null, null)).setContext(context); - - PrimaryDataStore srcPrimaryDataStore = (PrimaryDataStore) srcVolume.getDataStore(); + PrimaryDataStore srcPrimaryDataStore = (PrimaryDataStore) srcVolume.getDataStore(); if (srcPrimaryDataStore.isManaged()) { Map srcPrimaryDataStoreDetails = new HashMap(); srcPrimaryDataStoreDetails.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString()); @@ -1945,14 +1952,14 @@ private AsyncCallFuture copyManagedVolume(VolumeInfo srcVolume, grantAccess(destVolume, hostWithPoolsAccess, destStore); - try { - motionSrv.copyAsync(srcVolume, destVolume, hostWithPoolsAccess, caller); - } finally { - if (srcPrimaryDataStore.isManaged()) { - revokeAccess(srcVolume, hostWithPoolsAccess, srcVolume.getDataStore()); - } - revokeAccess(destVolume, hostWithPoolsAccess, destStore); - } + destVolume.processEvent(Event.CreateRequested); + srcVolume.processEvent(Event.MigrationRequested); + + CopyManagedVolumeContext context = new CopyManagedVolumeContext(null, future, srcVolume, destVolume, hostWithPoolsAccess); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().copyManagedVolumeCallBack(null, null)).setContext(context); + + motionSrv.copyAsync(srcVolume, destVolume, hostWithPoolsAccess, caller); } catch (Exception e) { s_logger.error("Copy to managed volume failed due to: " + e); if(s_logger.isDebugEnabled()) { @@ -1965,13 +1972,20 @@ private AsyncCallFuture copyManagedVolume(VolumeInfo srcVolume, return future; } - protected Void copyManagedVolumeCallBack(AsyncCallbackDispatcher callback, CopyVolumeContext context) { + protected Void copyManagedVolumeCallBack(AsyncCallbackDispatcher callback, CopyManagedVolumeContext context) { VolumeInfo srcVolume = context.srcVolume; VolumeInfo destVolume = context.destVolume; + Host host = context.host; CopyCommandResult result = callback.getResult(); AsyncCallFuture future = context.future; VolumeApiResult res = new VolumeApiResult(destVolume); + try { + if (srcVolume.getDataStore() != null && ((PrimaryDataStore) srcVolume.getDataStore()).isManaged()) { + revokeAccess(srcVolume, host, srcVolume.getDataStore()); + } + revokeAccess(destVolume, host, destVolume.getDataStore()); + if (result.isFailed()) { res.setResult(result.getResult()); destVolume.processEvent(Event.MigrationCopyFailed); diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java index 915c49e93568..e39f59ee1e93 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java @@ -788,11 +788,62 @@ public boolean migrateVolume(final String srcVolumeId, final String destPoolId, HttpResponse response = null; try { + Volume volume = getVolume(srcVolumeId); + if (volume == null || Strings.isNullOrEmpty(volume.getVtreeId())) { + LOG.warn("Couldn't find the volume(-tree), can not migrate the volume " + srcVolumeId); + return false; + } + + String srcPoolId = volume.getStoragePoolId(); + LOG.debug("Migrating the volume: " + srcVolumeId + " on the src pool: " + srcPoolId + " to the dest pool: " + destPoolId + + " in the same PowerFlex cluster"); + response = post( "/instances/Volume::" + srcVolumeId + "/action/migrateVTree", String.format("{\"destSPId\":\"%s\"}", destPoolId)); checkResponseOK(response); - return waitForVolumeMigrationToComplete(srcVolumeId, timeoutInSecs); + + LOG.debug("Wait until the migration is complete for the volume: " + srcVolumeId); + long migrationStartTime = System.currentTimeMillis(); + boolean status = waitForVolumeMigrationToComplete(volume.getVtreeId(), timeoutInSecs); + + // Check volume storage pool and migration status + // volume, v-tree, snapshot ids remains same after the migration + volume = getVolume(srcVolumeId); + if (volume == null || volume.getStoragePoolId() == null) { + LOG.warn("Couldn't get the volume: " + srcVolumeId + " details after migration"); + return status; + } else { + String volumeOnPoolId = volume.getStoragePoolId(); + // confirm whether the volume is on the dest storage pool or not + if (status && destPoolId.equalsIgnoreCase(volumeOnPoolId)) { + LOG.debug("Migration success for the volume: " + srcVolumeId); + return true; + } else { + try { + // Check and pause any migration activity on the volume + status = false; + VTreeMigrationInfo.MigrationStatus migrationStatus = getVolumeTreeMigrationStatus(volume.getVtreeId()); + if (migrationStatus != null && migrationStatus != VTreeMigrationInfo.MigrationStatus.NotInMigration) { + long timeElapsedInSecs = (System.currentTimeMillis() - migrationStartTime) / 1000; + int timeRemainingInSecs = (int) (timeoutInSecs - timeElapsedInSecs); + if (timeRemainingInSecs > (timeoutInSecs / 2)) { + // Try to pause gracefully (continue the migration) if atleast half of the time is remaining + pauseVolumeMigration(srcVolumeId, false); + status = waitForVolumeMigrationToComplete(volume.getVtreeId(), timeRemainingInSecs); + } + } + + if (!status) { + rollbackVolumeMigration(srcVolumeId); + } + + return status; + } catch (Exception ex) { + LOG.warn("Exception on pause/rollback migration of the volume: " + srcVolumeId + " - " + ex.getLocalizedMessage()); + } + } + } } catch (final IOException e) { LOG.error("Failed to migrate PowerFlex volume due to:", e); checkResponseTimeOut(e); @@ -801,39 +852,44 @@ public boolean migrateVolume(final String srcVolumeId, final String destPoolId, EntityUtils.consumeQuietly(response.getEntity()); } } + LOG.debug("Migration failed for the volume: " + srcVolumeId); return false; } - private boolean waitForVolumeMigrationToComplete(final String volumeId, int waitTimeInSec) { - LOG.debug("Waiting for the migration to complete for the volume " + volumeId); - Volume volume = getVolume(volumeId); - if (volume == null || Strings.isNullOrEmpty(volume.getVtreeId())) { - LOG.warn("Failed to get volume details, unable to check the migration status for the volume " + volumeId); + private boolean waitForVolumeMigrationToComplete(final String volumeTreeId, int waitTimeInSec) { + LOG.debug("Waiting for the migration to complete for the volume-tree " + volumeTreeId); + if (Strings.isNullOrEmpty(volumeTreeId)) { + LOG.warn("Invalid volume-tree id, unable to check the migration status of the volume-tree " + volumeTreeId); return false; } - String volumeTreeId = volume.getVtreeId(); while (waitTimeInSec > 0) { - VTreeMigrationInfo.MigrationStatus migrationStatus = getVolumeTreeMigrationStatus(volumeTreeId); - if (migrationStatus != null && migrationStatus == VTreeMigrationInfo.MigrationStatus.NotInMigration) { - LOG.debug("Migration completed for the volume " + volumeId); - return true; - } - - waitTimeInSec--; - try { Thread.sleep(1000); // Try every sec and return after migration is complete + + VTreeMigrationInfo.MigrationStatus migrationStatus = getVolumeTreeMigrationStatus(volumeTreeId); + if (migrationStatus != null && migrationStatus == VTreeMigrationInfo.MigrationStatus.NotInMigration) { + LOG.debug("Migration completed for the volume-tree " + volumeTreeId); + return true; + } } catch (Exception ex) { + LOG.warn("Exception while checking for migration status of the volume-tree: " + volumeTreeId + " - " + ex.getLocalizedMessage()); // don't do anything + } finally { + waitTimeInSec--; } } - LOG.debug("Unable to complete the migration for the volume " + volumeId); + LOG.debug("Unable to complete the migration for the volume-tree " + volumeTreeId); return false; } private VTreeMigrationInfo.MigrationStatus getVolumeTreeMigrationStatus(final String volumeTreeId) { + if (Strings.isNullOrEmpty(volumeTreeId)) { + LOG.warn("Invalid volume-tree id, unable to get the migration status of the volume-tree " + volumeTreeId); + return null; + } + HttpResponse response = null; try { response = get("/instances/VTree::" + volumeTreeId); @@ -854,6 +910,87 @@ private VTreeMigrationInfo.MigrationStatus getVolumeTreeMigrationStatus(final St return null; } + private boolean rollbackVolumeMigration(final String srcVolumeId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(srcVolumeId), "src volume id cannot be null"); + + HttpResponse response = null; + try { + Volume volume = getVolume(srcVolumeId); + VTreeMigrationInfo.MigrationStatus migrationStatus = getVolumeTreeMigrationStatus(volume.getVtreeId()); + if (migrationStatus != null && migrationStatus == VTreeMigrationInfo.MigrationStatus.NotInMigration) { + LOG.debug("Volume: " + srcVolumeId + " is not migrating, no need to rollback"); + return true; + } + + pauseVolumeMigration(srcVolumeId, true); // Pause forcefully + // Wait few secs for volume migration to change to Paused state + boolean paused = false; + int retryCount = 5; + while (retryCount > 0) { + try { + Thread.sleep(1000); // Try every sec + migrationStatus = getVolumeTreeMigrationStatus(volume.getVtreeId()); // Get updated migration status + if (migrationStatus != null && migrationStatus == VTreeMigrationInfo.MigrationStatus.Paused) { + LOG.debug("Migration for the volume: " + srcVolumeId + " paused"); + paused = true; + break; + } + } catch (Exception ex) { + LOG.warn("Exception while checking for migration pause status of the volume: " + srcVolumeId + " - " + ex.getLocalizedMessage()); + // don't do anything + } finally { + retryCount--; + } + } + + if (paused) { + // Rollback migration to the src pool (should be quick) + response = post( + "/instances/Volume::" + srcVolumeId + "/action/migrateVTree", + String.format("{\"destSPId\":\"%s\"}", volume.getStoragePoolId())); + checkResponseOK(response); + return true; + } else { + LOG.warn("Migration for the volume: " + srcVolumeId + " didn't pause, couldn't rollback"); + } + } catch (final IOException e) { + LOG.error("Failed to rollback volume migration due to: ", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return false; + } + + private boolean pauseVolumeMigration(final String volumeId, final boolean forced) { + if (Strings.isNullOrEmpty(volumeId)) { + LOG.warn("Invalid Volume Id, Unable to pause migration of the volume " + volumeId); + return false; + } + + HttpResponse response = null; + try { + // When paused gracefully, all data currently being moved is allowed to complete the migration. + // When paused forcefully, migration of unfinished data is aborted and data is left at the source, if possible. + // Pausing forcefully carries a potential risk to data. + response = post( + "/instances/Volume::" + volumeId + "/action/pauseVTreeMigration", + String.format("{\"pauseType\":\"%s\"}", forced ? "Forcefully" : "Gracefully")); + checkResponseOK(response); + return true; + } catch (final IOException e) { + LOG.error("Failed to pause migration of the volume due to: ", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return false; + } + /////////////////////////////////////////////////////// //////////////// StoragePool APIs ///////////////////// /////////////////////////////////////////////////////// From 010dfeef1bfd6f26e58724d998833f7657b29e6e Mon Sep 17 00:00:00 2001 From: Suresh Kumar Anaparti Date: Thu, 18 Feb 2021 18:39:25 +0530 Subject: [PATCH 12/12] Fixed the PowerFlex/ScaleIO volume name inconsistency issue in the volume path after migration, due to rename failure --- .../client/ScaleIOGatewayClientImpl.java | 47 +++++++++++-------- .../driver/ScaleIOPrimaryDataStoreDriver.java | 26 +++++++--- 2 files changed, 47 insertions(+), 26 deletions(-) diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java index e39f59ee1e93..5e8568dede1f 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java @@ -376,7 +376,7 @@ public boolean renameVolume(final String volumeId, final String newName) { checkResponseOK(response); return true; } catch (final IOException e) { - LOG.error("Failed to rename PowerFlex volume due to:", e); + LOG.error("Failed to rename PowerFlex volume due to: ", e); checkResponseTimeOut(e); } finally { if (response != null) { @@ -786,7 +786,6 @@ public boolean migrateVolume(final String srcVolumeId, final String destPoolId, Preconditions.checkArgument(!Strings.isNullOrEmpty(destPoolId), "dest pool id cannot be null"); Preconditions.checkArgument(timeoutInSecs > 0, "timeout must be greater than 0"); - HttpResponse response = null; try { Volume volume = getVolume(srcVolumeId); if (volume == null || Strings.isNullOrEmpty(volume.getVtreeId())) { @@ -798,10 +797,21 @@ public boolean migrateVolume(final String srcVolumeId, final String destPoolId, LOG.debug("Migrating the volume: " + srcVolumeId + " on the src pool: " + srcPoolId + " to the dest pool: " + destPoolId + " in the same PowerFlex cluster"); - response = post( - "/instances/Volume::" + srcVolumeId + "/action/migrateVTree", - String.format("{\"destSPId\":\"%s\"}", destPoolId)); - checkResponseOK(response); + HttpResponse response = null; + try { + response = post( + "/instances/Volume::" + srcVolumeId + "/action/migrateVTree", + String.format("{\"destSPId\":\"%s\"}", destPoolId)); + checkResponseOK(response); + } catch (final IOException e) { + LOG.error("Unable to migrate PowerFlex volume due to: ", e); + checkResponseTimeOut(e); + throw e; + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } LOG.debug("Wait until the migration is complete for the volume: " + srcVolumeId); long migrationStartTime = System.currentTimeMillis(); @@ -844,28 +854,27 @@ public boolean migrateVolume(final String srcVolumeId, final String destPoolId, } } } - } catch (final IOException e) { - LOG.error("Failed to migrate PowerFlex volume due to:", e); - checkResponseTimeOut(e); - } finally { - if (response != null) { - EntityUtils.consumeQuietly(response.getEntity()); - } + } catch (final Exception e) { + LOG.error("Failed to migrate PowerFlex volume due to: " + e.getMessage(), e); + throw new CloudRuntimeException("Failed to migrate PowerFlex volume due to: " + e.getMessage()); } + LOG.debug("Migration failed for the volume: " + srcVolumeId); return false; } - private boolean waitForVolumeMigrationToComplete(final String volumeTreeId, int waitTimeInSec) { + private boolean waitForVolumeMigrationToComplete(final String volumeTreeId, int waitTimeoutInSecs) { LOG.debug("Waiting for the migration to complete for the volume-tree " + volumeTreeId); if (Strings.isNullOrEmpty(volumeTreeId)) { LOG.warn("Invalid volume-tree id, unable to check the migration status of the volume-tree " + volumeTreeId); return false; } - while (waitTimeInSec > 0) { + int delayTimeInSecs = 3; + while (waitTimeoutInSecs > 0) { try { - Thread.sleep(1000); // Try every sec and return after migration is complete + // Wait and try after few secs (reduce no. of client API calls to check the migration status) and return after migration is complete + Thread.sleep(delayTimeInSecs * 1000); VTreeMigrationInfo.MigrationStatus migrationStatus = getVolumeTreeMigrationStatus(volumeTreeId); if (migrationStatus != null && migrationStatus == VTreeMigrationInfo.MigrationStatus.NotInMigration) { @@ -876,7 +885,7 @@ private boolean waitForVolumeMigrationToComplete(final String volumeTreeId, int LOG.warn("Exception while checking for migration status of the volume-tree: " + volumeTreeId + " - " + ex.getLocalizedMessage()); // don't do anything } finally { - waitTimeInSec--; + waitTimeoutInSecs = waitTimeoutInSecs - delayTimeInSecs; } } @@ -925,10 +934,10 @@ private boolean rollbackVolumeMigration(final String srcVolumeId) { pauseVolumeMigration(srcVolumeId, true); // Pause forcefully // Wait few secs for volume migration to change to Paused state boolean paused = false; - int retryCount = 5; + int retryCount = 3; while (retryCount > 0) { try { - Thread.sleep(1000); // Try every sec + Thread.sleep(3000); // Try after few secs migrationStatus = getVolumeTreeMigrationStatus(volume.getVtreeId()); // Get updated migration status if (migrationStatus != null && migrationStatus == VTreeMigrationInfo.MigrationStatus.Paused) { LOG.debug("Migration for the volume: " + srcVolumeId + " paused"); diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java index f716bac4ee98..f840bae01683 100644 --- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java @@ -677,7 +677,8 @@ private Answer migrateVolume(DataObject srcData, DataObject destData) { long destPoolId = destStore.getId(); final ScaleIOGatewayClient client = getScaleIOClient(srcPoolId); - final String srcVolumeId = ScaleIOUtil.getVolumePath(((VolumeInfo) srcData).getPath()); + final String srcVolumePath = ((VolumeInfo) srcData).getPath(); + final String srcVolumeId = ScaleIOUtil.getVolumePath(srcVolumePath); final StoragePoolVO destStoragePool = storagePoolDao.findById(destPoolId); final String destStoragePoolId = destStoragePool.getPath(); int migrationTimeout = StorageManager.KvmStorageOfflineMigrationWait.value(); @@ -685,13 +686,20 @@ private Answer migrateVolume(DataObject srcData, DataObject destData) { if (migrateStatus) { String newVolumeName = String.format("%s-%s-%s-%s", ScaleIOUtil.VOLUME_PREFIX, destData.getId(), destStoragePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value()); - client.renameVolume(srcVolumeId, newVolumeName); + boolean renamed = client.renameVolume(srcVolumeId, newVolumeName); if (srcData.getId() != destData.getId()) { VolumeVO destVolume = volumeDao.findById(destData.getId()); - String newVolumePath = ScaleIOUtil.updatedPathWithVolumeName(srcVolumeId, newVolumeName); - destVolume.set_iScsiName(newVolumePath); - destVolume.setPath(newVolumePath); + // Volume Id in the PowerFlex/ScaleIO pool remains the same after the migration + // Update PowerFlex volume name only after it is renamed, to maintain the consistency + if (renamed) { + String newVolumePath = ScaleIOUtil.updatedPathWithVolumeName(srcVolumeId, newVolumeName); + destVolume.set_iScsiName(newVolumePath); + destVolume.setPath(newVolumePath); + } else { + destVolume.set_iScsiName(srcVolumePath); + destVolume.setPath(srcVolumePath); + } volumeDao.update(destData.getId(), destVolume); VolumeVO srcVolume = volumeDao.findById(srcData.getId()); @@ -719,10 +727,14 @@ private Answer migrateVolume(DataObject srcData, DataObject destData) { String snapshotVolumeId = ScaleIOUtil.getVolumePath(snapshotStore.getInstallPath()); String newSnapshotName = String.format("%s-%s-%s-%s", ScaleIOUtil.SNAPSHOT_PREFIX, snapshot.getId(), destStoragePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value()); - client.renameVolume(snapshotVolumeId, newSnapshotName); + renamed = client.renameVolume(snapshotVolumeId, newSnapshotName); snapshotStore.setDataStoreId(destPoolId); - snapshotStore.setInstallPath(ScaleIOUtil.updatedPathWithVolumeName(snapshotVolumeId, newSnapshotName)); + // Snapshot Id in the PowerFlex/ScaleIO pool remains the same after the migration + // Update PowerFlex snapshot name only after it is renamed, to maintain the consistency + if (renamed) { + snapshotStore.setInstallPath(ScaleIOUtil.updatedPathWithVolumeName(snapshotVolumeId, newSnapshotName)); + } snapshotDataStoreDao.update(snapshotStore.getId(), snapshotStore); } }