diff --git a/agent/conf/agent.properties b/agent/conf/agent.properties index 325e12da5e2b..06d8f3f2a1ef 100644 --- a/agent/conf/agent.properties +++ b/agent/conf/agent.properties @@ -143,6 +143,9 @@ hypervisor.type=kvm # This parameter specifies a directory on the host local storage for temporary storing direct download templates #direct.download.temporary.download.location=/var/lib/libvirt/images +# This parameter specifies a directory on the host local storage for creating and hosting the config drives +#host.cache.location=/var/cache/cloud + # set the rolling maintenance hook scripts directory #rolling.maintenance.hooks.dir=/etc/cloudstack/agent/hooks.d diff --git a/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java b/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java index dceacf0e65bc..473c92645bc6 100644 --- a/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java @@ -22,6 +22,7 @@ import com.cloud.agent.api.LogLevel; import com.cloud.agent.api.storage.OVFPropertyTO; +import com.cloud.network.element.NetworkElement; import com.cloud.template.VirtualMachineTemplate.BootloaderType; import com.cloud.utils.Pair; import com.cloud.vm.VirtualMachine; @@ -76,6 +77,7 @@ public class VirtualMachineTO { String configDriveLabel = null; String configDriveIsoRootFolder = null; String configDriveIsoFile = null; + NetworkElement.Location configDriveLocation = NetworkElement.Location.SECONDARY; Double cpuQuotaPercentage = null; @@ -353,6 +355,18 @@ public void setConfigDriveIsoFile(String configDriveIsoFile) { this.configDriveIsoFile = configDriveIsoFile; } + public boolean isConfigDriveOnHostCache() { + return (this.configDriveLocation == NetworkElement.Location.HOST); + } + + public NetworkElement.Location getConfigDriveLocation() { + return configDriveLocation; + } + + public void setConfigDriveLocation(NetworkElement.Location configDriveLocation) { + this.configDriveLocation = configDriveLocation; + } + public Map getGuestOsDetails() { return guestOsDetails; } diff --git a/api/src/main/java/com/cloud/exception/StorageAccessException.java b/api/src/main/java/com/cloud/exception/StorageAccessException.java new file mode 100644 index 000000000000..eefbcf5518a3 --- /dev/null +++ b/api/src/main/java/com/cloud/exception/StorageAccessException.java @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.exception; + +import com.cloud.utils.SerialVersionUID; + +/** + * If the cause is due to storage pool not accessible on host, calling + * problem with. + * + */ +public class StorageAccessException extends RuntimeException { + private static final long serialVersionUID = SerialVersionUID.StorageAccessException; + + public StorageAccessException(String message) { + super(message); + } +} diff --git a/api/src/main/java/com/cloud/network/element/NetworkElement.java b/api/src/main/java/com/cloud/network/element/NetworkElement.java index 951732f727cd..fa67575edd35 100644 --- a/api/src/main/java/com/cloud/network/element/NetworkElement.java +++ b/api/src/main/java/com/cloud/network/element/NetworkElement.java @@ -39,6 +39,10 @@ */ public interface NetworkElement extends Adapter { + enum Location { + SECONDARY, PRIMARY, HOST + } + Map> getCapabilities(); /** diff --git a/api/src/main/java/com/cloud/storage/Storage.java b/api/src/main/java/com/cloud/storage/Storage.java index 82bc5f6d4e5a..b9cbaf10c9d6 100644 --- a/api/src/main/java/com/cloud/storage/Storage.java +++ b/api/src/main/java/com/cloud/storage/Storage.java @@ -135,6 +135,7 @@ public static enum StoragePoolType { OCFS2(true, false), SMB(true, false), Gluster(true, false), + PowerFlex(true, true), // Dell EMC PowerFlex/ScaleIO (formerly VxFlexOS) ManagedNFS(true, false); private final boolean shared; diff --git a/api/src/main/java/com/cloud/storage/Volume.java b/api/src/main/java/com/cloud/storage/Volume.java index 5fd78efb307e..daabec501720 100644 --- a/api/src/main/java/com/cloud/storage/Volume.java +++ b/api/src/main/java/com/cloud/storage/Volume.java @@ -29,6 +29,11 @@ import com.cloud.utils.fsm.StateObject; public interface Volume extends ControlledEntity, Identity, InternalIdentity, BasedOn, StateObject, Displayable { + + // Managed storage volume parameters (specified in the compute/disk offering for PowerFlex) + String BANDWIDTH_LIMIT_IN_MBPS = "bandwidthLimitInMbps"; + String IOPS_LIMIT = "iopsLimit"; + enum Type { UNKNOWN, ROOT, SWAP, DATADISK, ISO }; diff --git a/api/src/main/java/com/cloud/vm/VirtualMachineProfile.java b/api/src/main/java/com/cloud/vm/VirtualMachineProfile.java index c17a716666d4..f87939a13f4b 100644 --- a/api/src/main/java/com/cloud/vm/VirtualMachineProfile.java +++ b/api/src/main/java/com/cloud/vm/VirtualMachineProfile.java @@ -20,7 +20,9 @@ import java.util.Map; import com.cloud.agent.api.to.DiskTO; +import com.cloud.host.Host; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.network.element.NetworkElement; import com.cloud.offering.ServiceOffering; import com.cloud.template.VirtualMachineTemplate; import com.cloud.template.VirtualMachineTemplate.BootloaderType; @@ -54,6 +56,10 @@ public interface VirtualMachineProfile { void setConfigDriveIsoFile(String isoFile); + NetworkElement.Location getConfigDriveLocation(); + + void setConfigDriveLocation(NetworkElement.Location location); + public static class Param { public static final Param VmPassword = new Param("VmPassword"); @@ -100,6 +106,10 @@ public boolean equals(Object obj) { } } + Long getHostId(); + + void setHost(Host host); + String getHostName(); String getInstanceName(); diff --git a/api/src/main/java/com/cloud/vm/VmDetailConstants.java b/api/src/main/java/com/cloud/vm/VmDetailConstants.java index 3812aa211445..8589287ad6f8 100644 --- a/api/src/main/java/com/cloud/vm/VmDetailConstants.java +++ b/api/src/main/java/com/cloud/vm/VmDetailConstants.java @@ -56,6 +56,8 @@ public interface VmDetailConstants { String PASSWORD = "password"; String ENCRYPTED_PASSWORD = "Encrypted.Password"; + String CONFIG_DRIVE_LOCATION = "configDriveLocation"; + // VM import with nic, disk and custom params for custom compute offering String NIC = "nic"; String NETWORK = "network"; diff --git a/api/src/main/java/org/apache/cloudstack/alert/AlertService.java b/api/src/main/java/org/apache/cloudstack/alert/AlertService.java index 26c3f3cf3ab4..c2cd1b22332e 100644 --- a/api/src/main/java/org/apache/cloudstack/alert/AlertService.java +++ b/api/src/main/java/org/apache/cloudstack/alert/AlertService.java @@ -16,12 +16,12 @@ // under the License. package org.apache.cloudstack.alert; -import com.cloud.capacity.Capacity; -import com.cloud.exception.InvalidParameterValueException; - import java.util.HashSet; import java.util.Set; +import com.cloud.capacity.Capacity; +import com.cloud.exception.InvalidParameterValueException; + public interface AlertService { public static class AlertType { private static Set defaultAlertTypes = new HashSet(); @@ -69,6 +69,7 @@ private AlertType(short type, String name, boolean isDefault) { public static final AlertType ALERT_TYPE_OOBM_AUTH_ERROR = new AlertType((short)29, "ALERT.OOBM.AUTHERROR", true); public static final AlertType ALERT_TYPE_HA_ACTION = new AlertType((short)30, "ALERT.HA.ACTION", true); public static final AlertType ALERT_TYPE_CA_CERT = new AlertType((short)31, "ALERT.CA.CERT", true); + public static final AlertType ALERT_TYPE_VM_SNAPSHOT = new AlertType((short)32, "ALERT.VM.SNAPSHOT", true); public short getType() { return type; diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 88f083b50e6c..81cc756fbc55 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -825,6 +825,8 @@ public class ApiConstants { public static final String BOOT_MODE = "bootmode"; public static final String BOOT_INTO_SETUP = "bootintosetup"; + public static final String POOL_TYPE ="pooltype"; + public enum BootType { UEFI, BIOS; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java index f0ca5fb851a1..09617b079f85 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateDiskOfferingCmd.java @@ -16,8 +16,11 @@ // under the License. package org.apache.cloudstack.api.command.admin.offering; +import java.util.Collection; +import java.util.HashMap; import java.util.LinkedHashSet; import java.util.List; +import java.util.Map; import java.util.Set; import org.apache.cloudstack.api.APICommand; @@ -30,6 +33,7 @@ import org.apache.cloudstack.api.response.DomainResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.collections.MapUtils; import org.apache.log4j.Logger; import com.cloud.offering.DiskOffering; @@ -151,6 +155,9 @@ public class CreateDiskOfferingCmd extends BaseCmd { since = "4.14") private String cacheMode; + @Parameter(name = ApiConstants.DETAILS, type = CommandType.MAP, description = "details to specify disk offering parameters") + private Map details; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -273,6 +280,20 @@ public String getCacheMode() { return cacheMode; } + public Map getDetails() { + Map detailsMap = new HashMap<>(); + if (MapUtils.isNotEmpty(details)) { + Collection props = details.values(); + for (Object prop : props) { + HashMap detail = (HashMap) prop; + for (Map.Entry entry: detail.entrySet()) { + detailsMap.put(entry.getKey(),entry.getValue()); + } + } + } + return detailsMap; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java index 5015f7c51b87..d01c456bcd1b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/CreateServiceOfferingCmd.java @@ -314,7 +314,15 @@ public Map getDetails() { Collection props = details.values(); for (Object prop : props) { HashMap detail = (HashMap) prop; - detailsMap.put(detail.get("key"), detail.get("value")); + // Compatibility with key and value pairs input from API cmd for details map parameter + if (!Strings.isNullOrEmpty(detail.get("key")) && !Strings.isNullOrEmpty(detail.get("value"))) { + detailsMap.put(detail.get("key"), detail.get("value")); + continue; + } + + for (Map.Entry entry: detail.entrySet()) { + detailsMap.put(entry.getKey(),entry.getValue()); + } } } return detailsMap; diff --git a/api/src/main/java/org/apache/cloudstack/api/response/UserVmResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/UserVmResponse.java index 44eaba7f313f..587c9d2249c6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/UserVmResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/UserVmResponse.java @@ -310,6 +310,10 @@ public class UserVmResponse extends BaseResponseWithTagInformation implements Co @Param(description = "Guest vm Boot Type") private String bootType; + @SerializedName(ApiConstants.POOL_TYPE) + @Param(description = "the pool type of the virtual machine") + private String poolType; + public UserVmResponse() { securityGroupList = new LinkedHashSet(); nics = new LinkedHashSet(); @@ -901,4 +905,8 @@ public String getOsDisplayName() { public String getBootMode() { return bootMode; } public void setBootMode(String bootMode) { this.bootMode = bootMode; } + + public String getPoolType() { return poolType; } + + public void setPoolType(String poolType) { this.poolType = poolType; } } diff --git a/api/src/test/java/com/cloud/storage/StorageTest.java b/api/src/test/java/com/cloud/storage/StorageTest.java index 332a8060d08d..6925c307e92a 100644 --- a/api/src/test/java/com/cloud/storage/StorageTest.java +++ b/api/src/test/java/com/cloud/storage/StorageTest.java @@ -16,11 +16,12 @@ // under the License. package com.cloud.storage; -import com.cloud.storage.Storage.StoragePoolType; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import com.cloud.storage.Storage.StoragePoolType; + public class StorageTest { @Before public void setUp() { @@ -37,6 +38,7 @@ public void isSharedStoragePool() { Assert.assertFalse(StoragePoolType.LVM.isShared()); Assert.assertTrue(StoragePoolType.CLVM.isShared()); Assert.assertTrue(StoragePoolType.RBD.isShared()); + Assert.assertTrue(StoragePoolType.PowerFlex.isShared()); Assert.assertTrue(StoragePoolType.SharedMountPoint.isShared()); Assert.assertTrue(StoragePoolType.VMFS.isShared()); Assert.assertTrue(StoragePoolType.PreSetup.isShared()); @@ -58,6 +60,7 @@ public void supportsOverprovisioningStoragePool() { Assert.assertFalse(StoragePoolType.LVM.supportsOverProvisioning()); Assert.assertFalse(StoragePoolType.CLVM.supportsOverProvisioning()); Assert.assertTrue(StoragePoolType.RBD.supportsOverProvisioning()); + Assert.assertTrue(StoragePoolType.PowerFlex.supportsOverProvisioning()); Assert.assertFalse(StoragePoolType.SharedMountPoint.supportsOverProvisioning()); Assert.assertTrue(StoragePoolType.VMFS.supportsOverProvisioning()); Assert.assertTrue(StoragePoolType.PreSetup.supportsOverProvisioning()); diff --git a/client/pom.xml b/client/pom.xml index 7b9808ba6fc4..33bd6e2f3649 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -87,6 +87,11 @@ cloud-plugin-storage-volume-datera ${project.version} + + org.apache.cloudstack + cloud-plugin-storage-volume-scaleio + ${project.version} + org.apache.cloudstack cloud-server diff --git a/core/src/main/java/com/cloud/agent/api/HandleConfigDriveIsoAnswer.java b/core/src/main/java/com/cloud/agent/api/HandleConfigDriveIsoAnswer.java new file mode 100644 index 000000000000..769f886cc046 --- /dev/null +++ b/core/src/main/java/com/cloud/agent/api/HandleConfigDriveIsoAnswer.java @@ -0,0 +1,55 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package com.cloud.agent.api; + +import com.cloud.network.element.NetworkElement; +import com.cloud.utils.exception.ExceptionUtil; + +public class HandleConfigDriveIsoAnswer extends Answer { + + @LogLevel(LogLevel.Log4jLevel.Off) + private NetworkElement.Location location = NetworkElement.Location.SECONDARY; + + public HandleConfigDriveIsoAnswer(final HandleConfigDriveIsoCommand cmd) { + super(cmd); + } + + public HandleConfigDriveIsoAnswer(final HandleConfigDriveIsoCommand cmd, final NetworkElement.Location location) { + super(cmd); + this.location = location; + } + + public HandleConfigDriveIsoAnswer(final HandleConfigDriveIsoCommand cmd, final NetworkElement.Location location, final String details) { + super(cmd, true, details); + this.location = location; + } + + public HandleConfigDriveIsoAnswer(final HandleConfigDriveIsoCommand cmd, final String details) { + super(cmd, false, details); + } + + public HandleConfigDriveIsoAnswer(final HandleConfigDriveIsoCommand cmd, final Exception e) { + this(cmd, ExceptionUtil.toString(e)); + } + + public NetworkElement.Location getConfigDriveLocation() { + return location; + } +} diff --git a/core/src/main/java/com/cloud/agent/api/HandleConfigDriveIsoCommand.java b/core/src/main/java/com/cloud/agent/api/HandleConfigDriveIsoCommand.java index 3d8d8f7e10e3..062274f264ef 100644 --- a/core/src/main/java/com/cloud/agent/api/HandleConfigDriveIsoCommand.java +++ b/core/src/main/java/com/cloud/agent/api/HandleConfigDriveIsoCommand.java @@ -25,16 +25,19 @@ public class HandleConfigDriveIsoCommand extends Command { @LogLevel(LogLevel.Log4jLevel.Off) private String isoData; - private String isoFile; private boolean create = false; private DataStoreTO destStore; + private boolean useHostCacheOnUnsupportedPool = false; + private boolean preferHostCache = false; - public HandleConfigDriveIsoCommand(String isoFile, String isoData, DataStoreTO destStore, boolean create) { + public HandleConfigDriveIsoCommand(String isoFile, String isoData, DataStoreTO destStore, boolean useHostCacheOnUnsupportedPool, boolean preferHostCache, boolean create) { this.isoFile = isoFile; this.isoData = isoData; this.destStore = destStore; this.create = create; + this.useHostCacheOnUnsupportedPool = useHostCacheOnUnsupportedPool; + this.preferHostCache = preferHostCache; } @Override @@ -57,4 +60,12 @@ public DataStoreTO getDestStore() { public String getIsoFile() { return isoFile; } + + public boolean isHostCachePreferred() { + return preferHostCache; + } + + public boolean getUseHostCacheOnUnsupportedPool() { + return useHostCacheOnUnsupportedPool; + } } diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VRScripts.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VRScripts.java index f8cf6d451b84..834a11c8d6eb 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VRScripts.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VRScripts.java @@ -75,4 +75,6 @@ public class VRScripts { public static final String DIAGNOSTICS = "diagnostics.py"; public static final String RETRIEVE_DIAGNOSTICS = "get_diagnostics_files.py"; public static final String VR_FILE_CLEANUP = "cleanup.sh"; + + public static final String ROUTER_FILESYSTEM_WRITABLE_CHECK = "filesystem_writable_check.py"; } \ No newline at end of file diff --git a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java index 839f34ac658d..8b32842d40d2 100644 --- a/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java +++ b/core/src/main/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java @@ -44,6 +44,7 @@ import org.apache.cloudstack.diagnostics.PrepareFilesAnswer; import org.apache.cloudstack.diagnostics.PrepareFilesCommand; import org.apache.cloudstack.utils.security.KeyStoreUtils; +import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; import org.joda.time.Duration; @@ -311,6 +312,22 @@ private GetRouterMonitorResultsAnswer parseLinesForHealthChecks(GetRouterMonitor private GetRouterMonitorResultsAnswer execute(GetRouterMonitorResultsCommand cmd) { String routerIp = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP); + ExecutionResult fsReadOnlyResult = _vrDeployer.executeInVR(routerIp, VRScripts.ROUTER_FILESYSTEM_WRITABLE_CHECK, null); + if (!fsReadOnlyResult.isSuccess()) { + s_logger.warn("Result of " + cmd + " failed with details: " + fsReadOnlyResult.getDetails()); + if (StringUtils.isNotBlank(fsReadOnlyResult.getDetails())) { + final String readOnlyFileSystemError = "Read-only file system"; + if (fsReadOnlyResult.getDetails().contains(readOnlyFileSystemError)) { + return new GetRouterMonitorResultsAnswer(cmd, false, null, readOnlyFileSystemError); + } else { + return new GetRouterMonitorResultsAnswer(cmd, false, null, fsReadOnlyResult.getDetails()); + } + } else { + s_logger.warn("Result of " + cmd + " received empty details."); + return new GetRouterMonitorResultsAnswer(cmd, false, null, "No results available."); + } + } + String args = cmd.shouldPerformFreshChecks() ? "true" : "false"; s_logger.info("Fetching health check result for " + routerIp + " and executing fresh checks: " + args); ExecutionResult result = _vrDeployer.executeInVR(routerIp, VRScripts.ROUTER_MONITOR_RESULTS, args); diff --git a/core/src/main/java/org/apache/cloudstack/agent/directdownload/CheckUrlCommand.java b/core/src/main/java/org/apache/cloudstack/agent/directdownload/CheckUrlCommand.java index ed499974f5a8..e8618d54209f 100644 --- a/core/src/main/java/org/apache/cloudstack/agent/directdownload/CheckUrlCommand.java +++ b/core/src/main/java/org/apache/cloudstack/agent/directdownload/CheckUrlCommand.java @@ -23,14 +23,20 @@ public class CheckUrlCommand extends Command { + private String format; private String url; + public String getFormat() { + return format; + } + public String getUrl() { return url; } - public CheckUrlCommand(final String url) { + public CheckUrlCommand(final String format,final String url) { super(); + this.format = format; this.url = url; } diff --git a/core/src/main/java/org/apache/cloudstack/agent/directdownload/DirectDownloadCommand.java b/core/src/main/java/org/apache/cloudstack/agent/directdownload/DirectDownloadCommand.java index aafcb5370a57..7e1ff0b34c40 100644 --- a/core/src/main/java/org/apache/cloudstack/agent/directdownload/DirectDownloadCommand.java +++ b/core/src/main/java/org/apache/cloudstack/agent/directdownload/DirectDownloadCommand.java @@ -23,6 +23,9 @@ import org.apache.cloudstack.storage.command.StorageSubSystemCommand; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.storage.to.TemplateObjectTO; + +import com.cloud.storage.Storage; public abstract class DirectDownloadCommand extends StorageSubSystemCommand { @@ -32,6 +35,7 @@ public enum DownloadProtocol { private String url; private Long templateId; + private TemplateObjectTO destData; private PrimaryDataStoreTO destPool; private String checksum; private Map headers; @@ -39,11 +43,12 @@ public enum DownloadProtocol { private Integer soTimeout; private Integer connectionRequestTimeout; private Long templateSize; - private boolean iso; + private Storage.ImageFormat format; protected DirectDownloadCommand (final String url, final Long templateId, final PrimaryDataStoreTO destPool, final String checksum, final Map headers, final Integer connectTimeout, final Integer soTimeout, final Integer connectionRequestTimeout) { this.url = url; this.templateId = templateId; + this.destData = destData; this.destPool = destPool; this.checksum = checksum; this.headers = headers; @@ -60,6 +65,14 @@ public Long getTemplateId() { return templateId; } + public TemplateObjectTO getDestData() { + return destData; + } + + public void setDestData(TemplateObjectTO destData) { + this.destData = destData; + } + public PrimaryDataStoreTO getDestPool() { return destPool; } @@ -104,12 +117,12 @@ public void setTemplateSize(Long templateSize) { this.templateSize = templateSize; } - public boolean isIso() { - return iso; + public Storage.ImageFormat getFormat() { + return format; } - public void setIso(boolean iso) { - this.iso = iso; + public void setFormat(Storage.ImageFormat format) { + this.format = format; } @Override @@ -120,4 +133,8 @@ public void setExecuteInSequence(boolean inSeq) { public boolean executeInSequence() { return false; } + + public int getWaitInMillSeconds() { + return getWait() * 1000; + } } diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java index 1572efe621af..24b648a25d7f 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java @@ -19,12 +19,13 @@ package org.apache.cloudstack.storage.to; +import java.util.Map; + +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; + import com.cloud.agent.api.to.DataStoreTO; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Storage.StoragePoolType; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; - -import java.util.Map; public class PrimaryDataStoreTO implements DataStoreTO { public static final String MANAGED = PrimaryDataStore.MANAGED; diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java index e47d13ed6693..88f9ab9ac6bd 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java @@ -19,7 +19,6 @@ package org.apache.cloudstack.storage.to; -import com.cloud.storage.MigrationOptions; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import com.cloud.agent.api.to.DataObjectType; @@ -27,6 +26,7 @@ import com.cloud.agent.api.to.DataTO; import com.cloud.hypervisor.Hypervisor; import com.cloud.offering.DiskOffering.DiskCacheMode; +import com.cloud.storage.MigrationOptions; import com.cloud.storage.Storage; import com.cloud.storage.Volume; @@ -43,6 +43,7 @@ public class VolumeObjectTO implements DataTO { private String chainInfo; private Storage.ImageFormat format; private Storage.ProvisioningType provisioningType; + private Long poolId; private long id; private Long deviceId; @@ -85,6 +86,7 @@ public VolumeObjectTO(VolumeInfo volume) { setId(volume.getId()); format = volume.getFormat(); provisioningType = volume.getProvisioningType(); + poolId = volume.getPoolId(); bytesReadRate = volume.getBytesReadRate(); bytesReadRateMax = volume.getBytesReadRateMax(); bytesReadRateMaxLength = volume.getBytesReadRateMaxLength(); @@ -221,6 +223,14 @@ public void setProvisioningType(Storage.ProvisioningType provisioningType){ this.provisioningType = provisioningType; } + public Long getPoolId(){ + return poolId; + } + + public void setPoolId(Long poolId){ + this.poolId = poolId; + } + @Override public String toString() { return new StringBuilder("volumeTO[uuid=").append(uuid).append("|path=").append(path).append("|datastore=").append(dataStore).append("]").toString(); diff --git a/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java b/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java index 49b13e1e698b..cf8ebc4668cb 100644 --- a/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java +++ b/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java @@ -59,7 +59,13 @@ public interface VirtualMachineManager extends Manager { "The default label name for the config drive", false); ConfigKey VmConfigDriveOnPrimaryPool = new ConfigKey<>("Advanced", Boolean.class, "vm.configdrive.primarypool.enabled", "false", - "If config drive need to be created and hosted on primary storage pool. Currently only supported for KVM.", true); + "If config drive need to be created and hosted on primary storage pool. Currently only supported for KVM.", true, ConfigKey.Scope.Zone); + + ConfigKey VmConfigDriveUseHostCacheOnUnsupportedPool = new ConfigKey<>("Advanced", Boolean.class, "vm.configdrive.use.host.cache.on.unsupported.pool", "true", + "If true, config drive is created on the host cache storage when vm.configdrive.primarypool.enabled is true and the primary pool type doesn't support config drive.", true, ConfigKey.Scope.Zone); + + ConfigKey VmConfigDriveForceHostCacheUse = new ConfigKey<>("Advanced", Boolean.class, "vm.configdrive.force.host.cache.use", "false", + "If true, config drive is forced to create on the host cache storage. Currently only supported for KVM.", true, ConfigKey.Scope.Zone); ConfigKey ResoureCountRunningVMsonly = new ConfigKey("Advanced", Boolean.class, "resource.count.running.vms.only", "false", "Count the resources of only running VMs in resource limitation.", true); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java index 9458de763538..eccddbf0e5ad 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java @@ -32,6 +32,7 @@ import com.cloud.deploy.DeployDestination; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientStorageCapacityException; +import com.cloud.exception.StorageAccessException; import com.cloud.exception.StorageUnavailableException; import com.cloud.host.Host; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -101,6 +102,8 @@ DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Lon void release(VirtualMachineProfile profile); + void release(long vmId, long hostId); + void cleanupVolumes(long vmId) throws ConcurrentOperationException; void revokeAccess(DataObject dataObject, Host host, DataStore dataStore); @@ -113,7 +116,7 @@ DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Lon void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest); - void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException; + void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException, StorageAccessException; boolean canVmRestartOnAnotherServer(long vmId); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreDriver.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreDriver.java index 3d73721c74d7..b197afad863a 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreDriver.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreDriver.java @@ -25,6 +25,7 @@ import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; +import com.cloud.host.Host; public interface DataStoreDriver { Map getCapabilities(); @@ -37,7 +38,9 @@ public interface DataStoreDriver { void deleteAsync(DataStore store, DataObject data, AsyncCompletionCallback callback); - void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCallback callback); + void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCallback callback); + + void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback); boolean canCopy(DataObject srcData, DataObject destData); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java index 6021a4391783..622dda31f987 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java @@ -23,6 +23,7 @@ import com.cloud.host.Host; import com.cloud.storage.StoragePool; +import com.cloud.utils.Pair; public interface PrimaryDataStoreDriver extends DataStoreDriver { enum QualityOfServiceState { MIGRATION, NO_MIGRATION } @@ -72,4 +73,34 @@ enum QualityOfServiceState { MIGRATION, NO_MIGRATION } void revertSnapshot(SnapshotInfo snapshotOnImageStore, SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback callback); void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, QualityOfServiceState qualityOfServiceState); + + /** + * intended for managed storage + * returns true if the storage can provide the stats (capacity and used bytes) + */ + boolean canProvideStorageStats(); + + /** + * intended for managed storage + * returns the total capacity and used size in bytes + */ + Pair getStorageStats(StoragePool storagePool); + + /** + * intended for managed storage + * returns true if the storage can provide the volume stats (physical and virtual size) + */ + boolean canProvideVolumeStats(); + + /** + * intended for managed storage + * returns the volume's physical and virtual size in bytes + */ + Pair getVolumeStats(StoragePool storagePool, String volumeId); + + /** + * intended for managed storage + * returns true if the host can access the storage pool + */ + boolean canHostAccessStoragePool(Host host, StoragePool pool); } diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateDataFactory.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateDataFactory.java index b213625efadd..d6f358292c13 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateDataFactory.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateDataFactory.java @@ -23,6 +23,8 @@ import com.cloud.storage.DataStoreRole; public interface TemplateDataFactory { + TemplateInfo getTemplate(long templateId); + TemplateInfo getTemplate(long templateId, DataStore store); TemplateInfo getReadyTemplateOnImageStore(long templateId, Long zoneId); @@ -39,5 +41,7 @@ public interface TemplateDataFactory { TemplateInfo getReadyBypassedTemplateOnPrimaryStore(long templateId, Long poolId, Long hostId); + TemplateInfo getReadyBypassedTemplateOnManagedStorage(long templateId, TemplateInfo templateOnPrimary, Long poolId, Long hostId); + boolean isTemplateMarkedForDirectDownload(long templateId); } diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java index 0f7cc6f9de56..2ef3868d1d14 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/TemplateInfo.java @@ -27,4 +27,6 @@ public interface TemplateInfo extends DataObject, VirtualMachineTemplate { String getInstallPath(); boolean isDirectDownload(); + + boolean canBeDeletedFromDataStore(); } diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java index e8b533db0fd6..d194bbbc1f9c 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java @@ -25,6 +25,7 @@ import org.apache.cloudstack.storage.command.CommandResult; import com.cloud.agent.api.to.VirtualMachineTO; +import com.cloud.exception.StorageAccessException; import com.cloud.host.Host; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.offering.DiskOffering; @@ -62,13 +63,17 @@ public VolumeInfo getVolume() { */ AsyncCallFuture expungeVolumeAsync(VolumeInfo volume); + void ensureVolumeIsExpungeReady(long volumeId); + boolean cloneVolume(long volumeId, long baseVolId); AsyncCallFuture createVolumeFromSnapshot(VolumeInfo volume, DataStore store, SnapshotInfo snapshot); VolumeEntity getVolumeEntity(long volumeId); - AsyncCallFuture createManagedStorageVolumeFromTemplateAsync(VolumeInfo volumeInfo, long destDataStoreId, TemplateInfo srcTemplateInfo, long destHostId); + TemplateInfo createManagedStorageTemplate(long srcTemplateId, long destDataStoreId, long destHostId) throws StorageAccessException; + + AsyncCallFuture createManagedStorageVolumeFromTemplateAsync(VolumeInfo volumeInfo, long destDataStoreId, TemplateInfo srcTemplateInfo, long destHostId) throws StorageAccessException; AsyncCallFuture createVolumeFromTemplateAsync(VolumeInfo volume, long dataStoreId, TemplateInfo template); diff --git a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java index db7a27ff41c0..ade2eeb3f84a 100755 --- a/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java +++ b/engine/components-api/src/main/java/com/cloud/resource/ResourceManager.java @@ -21,6 +21,9 @@ import java.util.List; import java.util.Map; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; + import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupRoutingCommand; import com.cloud.agent.api.VgpuTypesInfo; @@ -38,8 +41,6 @@ import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.resource.ResourceState.Event; import com.cloud.utils.fsm.NoTransitionException; -import org.apache.cloudstack.framework.config.ConfigKey; -import org.apache.cloudstack.framework.config.Configurable; /** * ResourceManager manages how physical resources are organized within the @@ -204,7 +205,7 @@ public interface ResourceManager extends ResourceService, Configurable { */ HashMap> getGPUStatistics(HostVO host); - HostVO findOneRandomRunningHostByHypervisor(HypervisorType type); + HostVO findOneRandomRunningHostByHypervisor(HypervisorType type, Long dcId); boolean cancelMaintenance(final long hostId); } diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index 0f52206dd785..2a7ced614e24 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -109,6 +109,24 @@ public interface StorageManager extends StorageService { ConfigKey.Scope.Cluster, null); + ConfigKey STORAGE_POOL_DISK_WAIT = new ConfigKey<>(Integer.class, + "storage.pool.disk.wait", + "Storage", + "60", + "Timeout (in secs) for the storage pool disk (of managed pool) to become available in the host. Currently only supported for PowerFlex.", + true, + ConfigKey.Scope.StoragePool, + null); + + ConfigKey STORAGE_POOL_CLIENT_TIMEOUT = new ConfigKey<>(Integer.class, + "storage.pool.client.timeout", + "Storage", + "60", + "Timeout (in secs) for the storage pool client timeout (for managed pools). Currently only supported for PowerFlex.", + true, + ConfigKey.Scope.StoragePool, + null); + ConfigKey PRIMARY_STORAGE_DOWNLOAD_WAIT = new ConfigKey("Storage", Integer.class, "primary.storage.download.wait", "10800", "In second, timeout for download template to primary storage", false); @@ -144,6 +162,8 @@ public interface StorageManager extends StorageService { Pair sendToPool(StoragePool pool, long[] hostIdsToTryFirst, List hostIdsToAvoid, Command cmd) throws StorageUnavailableException; + public Answer getVolumeStats(StoragePool pool, Command cmd); + /** * Checks if a host has running VMs that are using its local storage pool. * @return true if local storage is active on the host @@ -172,6 +192,12 @@ public interface StorageManager extends StorageService { StoragePoolVO findLocalStorageOnHost(long hostId); + List findStoragePoolsConnectedToHost(long hostId); + + boolean canHostAccessStoragePool(Host host, StoragePool pool); + + Host getHost(long hostId); + Host updateSecondaryStorage(long secStorageId, String newUrl); void removeStoragePoolFromCluster(long hostId, String iScsiName, StoragePool storagePool); @@ -216,6 +242,8 @@ public interface StorageManager extends StorageService { void connectHostToSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException; + void disconnectHostFromSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException; + void createCapacityEntry(long poolId); DataStore createLocalStorage(Host host, StoragePoolInfo poolInfo) throws ConnectionException; diff --git a/engine/components-api/src/main/java/com/cloud/vm/VirtualMachineProfileImpl.java b/engine/components-api/src/main/java/com/cloud/vm/VirtualMachineProfileImpl.java index 4d03396c1cbb..efe4e2e570c9 100644 --- a/engine/components-api/src/main/java/com/cloud/vm/VirtualMachineProfileImpl.java +++ b/engine/components-api/src/main/java/com/cloud/vm/VirtualMachineProfileImpl.java @@ -22,7 +22,9 @@ import java.util.Map; import com.cloud.agent.api.to.DiskTO; +import com.cloud.host.Host; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.network.element.NetworkElement; import com.cloud.offering.ServiceOffering; import com.cloud.service.ServiceOfferingVO; import com.cloud.template.VirtualMachineTemplate; @@ -49,6 +51,8 @@ public class VirtualMachineProfileImpl implements VirtualMachineProfile { Float cpuOvercommitRatio = 1.0f; Float memoryOvercommitRatio = 1.0f; + Host _host = null; + VirtualMachine.Type _type; List vmData = null; @@ -57,6 +61,7 @@ public class VirtualMachineProfileImpl implements VirtualMachineProfile { String configDriveIsoBaseLocation = "/tmp/"; String configDriveIsoRootFolder = null; String configDriveIsoFile = null; + NetworkElement.Location configDriveLocation = NetworkElement.Location.SECONDARY; public VirtualMachineProfileImpl(VirtualMachine vm, VirtualMachineTemplate template, ServiceOffering offering, Account owner, Map params) { _vm = vm; @@ -219,6 +224,19 @@ public Object getParameter(Param name) { return _params.get(name); } + @Override + public Long getHostId() { + if (_host != null) { + return _host.getId(); + } + return _vm.getHostId(); + } + + @Override + public void setHost(Host host) { + this._host = host; + } + @Override public String getHostName() { return _vm.getHostName(); @@ -311,4 +329,14 @@ public String getConfigDriveIsoFile() { public void setConfigDriveIsoFile(String isoFile) { this.configDriveIsoFile = isoFile; } + + @Override + public NetworkElement.Location getConfigDriveLocation() { + return configDriveLocation; + } + + @Override + public void setConfigDriveLocation(NetworkElement.Location location) { + this.configDriveLocation = location; + } } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 14b3078cb4c4..dba786ef18be 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -153,6 +153,7 @@ import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.OperationTimedoutException; import com.cloud.exception.ResourceUnavailableException; +import com.cloud.exception.StorageAccessException; import com.cloud.exception.StorageUnavailableException; import com.cloud.ha.HighAvailabilityManager; import com.cloud.ha.HighAvailabilityManager.WorkType; @@ -733,12 +734,11 @@ public void start(final String vmUuid, final Map> getVolumesToDisconnect(VirtualMachine vm) { info.put(DiskTO.STORAGE_HOST, storagePool.getHostAddress()); info.put(DiskTO.STORAGE_PORT, String.valueOf(storagePool.getPort())); info.put(DiskTO.IQN, volume.get_iScsiName()); + info.put(DiskTO.PROTOCOL_TYPE, (volume.getPoolType() != null) ? volume.getPoolType().toString() : null); volumesToDisconnect.add(info); } @@ -1741,20 +1745,34 @@ protected boolean cleanup(final VirtualMachineGuru guru, final VirtualMachinePro } } } finally { - try { - _networkMgr.release(profile, cleanUpEvenIfUnableToStop); - s_logger.debug("Successfully released network resources for the vm " + vm); - } catch (final Exception e) { - s_logger.warn("Unable to release some network resources.", e); - } - - volumeMgr.release(profile); - s_logger.debug(String.format("Successfully cleaned up resources for the VM %s in %s state", vm, state)); + releaseVmResources(profile, cleanUpEvenIfUnableToStop); } return true; } + protected void releaseVmResources(final VirtualMachineProfile profile, final boolean forced) { + final VirtualMachine vm = profile.getVirtualMachine(); + final State state = vm.getState(); + try { + _networkMgr.release(profile, forced); + s_logger.debug(String.format("Successfully released network resources for the VM %s in %s state", vm, state)); + } catch (final Exception e) { + s_logger.warn(String.format("Unable to release some network resources for the VM %s in %s state", vm, state), e); + } + + try { + if (vm.getHypervisorType() != HypervisorType.BareMetal) { + volumeMgr.release(profile); + s_logger.debug(String.format("Successfully released storage resources for the VM %s in %s state", vm, state)); + } + } catch (final Exception e) { + s_logger.warn(String.format("Unable to release storage resources for the VM %s in %s state", vm, state), e); + } + + s_logger.debug(String.format("Successfully cleaned up resources for the VM %s in %s state", vm, state)); + } + @Override public void advanceStop(final String vmUuid, final boolean cleanUpEvenIfUnableToStop) throws AgentUnavailableException, OperationTimedoutException, ConcurrentOperationException { @@ -1964,21 +1982,7 @@ private void advanceStop(final VMInstanceVO vm, final boolean cleanUpEvenIfUnabl s_logger.debug(vm + " is stopped on the host. Proceeding to release resource held."); } - try { - _networkMgr.release(profile, cleanUpEvenIfUnableToStop); - s_logger.debug("Successfully released network resources for the vm " + vm); - } catch (final Exception e) { - s_logger.warn("Unable to release some network resources.", e); - } - - try { - if (vm.getHypervisorType() != HypervisorType.BareMetal) { - volumeMgr.release(profile); - s_logger.debug("Successfully released storage resources for the vm " + vm); - } - } catch (final Exception e) { - s_logger.warn("Unable to release storage resources.", e); - } + releaseVmResources(profile, cleanUpEvenIfUnableToStop); try { if (work != null) { @@ -2498,11 +2502,14 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy } final VirtualMachineProfile vmSrc = new VirtualMachineProfileImpl(vm); + vmSrc.setHost(fromHost); for (final NicProfile nic : _networkMgr.getNicProfiles(vm)) { vmSrc.addNic(nic); } final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm, null, _offeringDao.findById(vm.getId(), vm.getServiceOfferingId()), null, null); + profile.setHost(dest.getHost()); + _networkMgr.prepareNicForMigration(profile, dest); volumeMgr.prepareForMigration(profile, dest); profile.setConfigDriveLabel(VmConfigDriveLabel.value()); @@ -2530,6 +2537,7 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy } finally { if (pfma == null) { _networkMgr.rollbackNicForMigration(vmSrc, profile); + volumeMgr.release(vm.getId(), dstHostId); work.setStep(Step.Done); _workDao.update(work.getId(), work); } @@ -2539,15 +2547,21 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy try { if (vm == null || vm.getHostId() == null || vm.getHostId() != srcHostId || !changeState(vm, Event.MigrationRequested, dstHostId, work, Step.Migrating)) { _networkMgr.rollbackNicForMigration(vmSrc, profile); + if (vm != null) { + volumeMgr.release(vm.getId(), dstHostId); + } + s_logger.info("Migration cancelled because state has changed: " + vm); throw new ConcurrentOperationException("Migration cancelled because state has changed: " + vm); } } catch (final NoTransitionException e1) { _networkMgr.rollbackNicForMigration(vmSrc, profile); + volumeMgr.release(vm.getId(), dstHostId); s_logger.info("Migration cancelled because " + e1.getMessage()); throw new ConcurrentOperationException("Migration cancelled because " + e1.getMessage()); } catch (final CloudRuntimeException e2) { _networkMgr.rollbackNicForMigration(vmSrc, profile); + volumeMgr.release(vm.getId(), dstHostId); s_logger.info("Migration cancelled because " + e2.getMessage()); work.setStep(Step.Done); _workDao.update(work.getId(), work); @@ -2615,6 +2629,7 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy if (!migrated) { s_logger.info("Migration was unsuccessful. Cleaning up: " + vm); _networkMgr.rollbackNicForMigration(vmSrc, profile); + volumeMgr.release(vm.getId(), dstHostId); _alertMgr.sendAlert(alertType, fromHost.getDataCenterId(), fromHost.getPodId(), "Unable to migrate vm " + vm.getInstanceName() + " from host " + fromHost.getName() + " in zone " + dest.getDataCenter().getName() + " and pod " + @@ -2632,6 +2647,7 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy } } else { _networkMgr.commitNicForMigration(vmSrc, profile); + volumeMgr.release(vm.getId(), srcHostId); _networkMgr.setHypervisorHostname(profile, dest, true); } @@ -2900,8 +2916,16 @@ private void orchestrateMigrateWithStorage(final String vmUuid, final long srcHo final Cluster cluster = _clusterDao.findById(destHost.getClusterId()); final DeployDestination destination = new DeployDestination(dc, pod, cluster, destHost); + final VirtualMachineProfile vmSrc = new VirtualMachineProfileImpl(vm); + vmSrc.setHost(srcHost); + for (final NicProfile nic : _networkMgr.getNicProfiles(vm)) { + vmSrc.addNic(nic); + } + + final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm, null, _offeringDao.findById(vm.getId(), vm.getServiceOfferingId()), null, null); + profile.setHost(destHost); + // Create a map of which volume should go in which storage pool. - final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); final Map volumeToPoolMap = createMappingVolumeAndStoragePool(profile, destHost, volumeToPool); // If none of the volumes have to be migrated, fail the call. Administrator needs to make a call for migrating @@ -2929,7 +2953,6 @@ private void orchestrateMigrateWithStorage(final String vmUuid, final long srcHo work.setResourceId(destHostId); work = _workDao.persist(work); - // Put the vm in migrating state. vm.setLastHostId(srcHostId); vm.setPodIdToDeployIn(destHost.getPodId()); @@ -3000,6 +3023,9 @@ private void orchestrateMigrateWithStorage(final String vmUuid, final long srcHo } finally { if (!migrated) { s_logger.info("Migration was unsuccessful. Cleaning up: " + vm); + _networkMgr.rollbackNicForMigration(vmSrc, profile); + volumeMgr.release(vm.getId(), destHostId); + _alertMgr.sendAlert(alertType, srcHost.getDataCenterId(), srcHost.getPodId(), "Unable to migrate vm " + vm.getInstanceName() + " from host " + srcHost.getName() + " in zone " + dc.getName() + " and pod " + dc.getName(), "Migrate Command failed. Please check logs."); @@ -3014,6 +3040,8 @@ private void orchestrateMigrateWithStorage(final String vmUuid, final long srcHo } _networkMgr.setHypervisorHostname(profile, destination, false); } else { + _networkMgr.commitNicForMigration(vmSrc, profile); + volumeMgr.release(vm.getId(), srcHostId); _networkMgr.setHypervisorHostname(profile, destination, true); } @@ -3288,7 +3316,7 @@ private void orchestrateReboot(final String vmUuid, final Map[] getConfigKeys() { - return new ConfigKey[] {ClusterDeltaSyncInterval, StartRetry, VmDestroyForcestop, VmOpCancelInterval, VmOpCleanupInterval, VmOpCleanupWait, - VmOpLockStateRetry, - VmOpWaitInterval, ExecuteInSequence, VmJobCheckInterval, VmJobTimeout, VmJobStateReportInterval, VmConfigDriveLabel, VmConfigDriveOnPrimaryPool, HaVmRestartHostUp, - ResoureCountRunningVMsonly, AllowExposeHypervisorHostname, AllowExposeHypervisorHostnameAccountLevel, - VmServiceOfferingMaxCPUCores, VmServiceOfferingMaxRAMSize }; + return new ConfigKey[] { ClusterDeltaSyncInterval, StartRetry, VmDestroyForcestop, VmOpCancelInterval, VmOpCleanupInterval, VmOpCleanupWait, + VmOpLockStateRetry, VmOpWaitInterval, ExecuteInSequence, VmJobCheckInterval, VmJobTimeout, VmJobStateReportInterval, + VmConfigDriveLabel, VmConfigDriveOnPrimaryPool, VmConfigDriveForceHostCacheUse, VmConfigDriveUseHostCacheOnUnsupportedPool, + HaVmRestartHostUp, ResoureCountRunningVMsonly, AllowExposeHypervisorHostname, AllowExposeHypervisorHostnameAccountLevel, + VmServiceOfferingMaxCPUCores, VmServiceOfferingMaxRAMSize }; } public List getStoragePoolAllocators() { @@ -4650,12 +4678,12 @@ private void handlePowerOffReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { String.format("VM %s is at %s and we received a %s report while there is no pending jobs on it" , vm.getInstanceName(), vm.getState(), vm.getPowerState())); } - if(vm.isHaEnabled() && vm.getState() == State.Running + if (vm.isHaEnabled() && vm.getState() == State.Running && HaVmRestartHostUp.value() && vm.getHypervisorType() != HypervisorType.VMware && vm.getHypervisorType() != HypervisorType.Hyperv) { s_logger.info("Detected out-of-band stop of a HA enabled VM " + vm.getInstanceName() + ", will schedule restart"); - if(!_haMgr.hasPendingHaWork(vm.getId())) { + if (!_haMgr.hasPendingHaWork(vm.getId())) { _haMgr.scheduleRestart(vm, true); } else { s_logger.info("VM " + vm.getInstanceName() + " already has an pending HA task working on it"); @@ -4664,13 +4692,20 @@ private void handlePowerOffReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { } // not when report is missing - if(PowerState.PowerOff.equals(vm.getPowerState())) { + if (PowerState.PowerOff.equals(vm.getPowerState())) { final VirtualMachineGuru vmGuru = getVmGuru(vm); final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); if (!sendStop(vmGuru, profile, true, true)) { // In case StopCommand fails, don't proceed further return; + } else { + // Release resources on StopCommand success + releaseVmResources(profile, true); } + } else if (PowerState.PowerReportMissing.equals(vm.getPowerState())) { + final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); + // VM will be sync-ed to Stopped state, release the resources + releaseVmResources(profile, true); } try { @@ -5440,10 +5475,9 @@ private Pair orchestrateStart(final VmWorkStart work) th s_logger.trace(String.format("orchestrating VM start for '%s' %s set to %s", vm.getInstanceName(), VirtualMachineProfile.Param.BootIntoSetup, enterSetup)); } - try{ + try { orchestrateStart(vm.getUuid(), work.getParams(), work.getPlan(), _dpMgr.getDeploymentPlannerByName(work.getDeploymentPlanner())); - } - catch (CloudRuntimeException e){ + } catch (CloudRuntimeException e) { e.printStackTrace(); s_logger.info("Caught CloudRuntimeException, returning job failed " + e); CloudRuntimeException ex = new CloudRuntimeException("Unable to start VM instance"); diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index 3e68d3a4ab05..b07596812f6c 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -61,6 +61,8 @@ import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.jobs.AsyncJobManager; import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; +import org.apache.cloudstack.resourcedetail.DiskOfferingDetailVO; +import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao; import org.apache.cloudstack.storage.command.CommandResult; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; @@ -87,6 +89,7 @@ import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientStorageCapacityException; import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.StorageAccessException; import com.cloud.exception.StorageUnavailableException; import com.cloud.host.Host; import com.cloud.host.HostVO; @@ -106,6 +109,7 @@ import com.cloud.storage.Volume; import com.cloud.storage.Volume.Type; import com.cloud.storage.VolumeApiService; +import com.cloud.storage.VolumeDetailVO; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.VolumeDao; @@ -168,6 +172,8 @@ public enum UserVmCloneType { @Inject protected ResourceLimitService _resourceLimitMgr; @Inject + DiskOfferingDetailsDao _diskOfferingDetailDao; + @Inject VolumeDetailsDao _volDetailDao; @Inject DataStoreManager dataStoreMgr; @@ -695,6 +701,19 @@ public DiskProfile allocateRawVolume(Type type, String name, DiskOffering offeri vol.setFormat(getSupportedImageFormatForCluster(vm.getHypervisorType())); vol = _volsDao.persist(vol); + List volumeDetailsVO = new ArrayList(); + DiskOfferingDetailVO bandwidthLimitDetail = _diskOfferingDetailDao.findDetail(offering.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS); + if (bandwidthLimitDetail != null) { + volumeDetailsVO.add(new VolumeDetailVO(vol.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS, bandwidthLimitDetail.getValue(), false)); + } + DiskOfferingDetailVO iopsLimitDetail = _diskOfferingDetailDao.findDetail(offering.getId(), Volume.IOPS_LIMIT); + if (iopsLimitDetail != null) { + volumeDetailsVO.add(new VolumeDetailVO(vol.getId(), Volume.IOPS_LIMIT, iopsLimitDetail.getValue(), false)); + } + if (!volumeDetailsVO.isEmpty()) { + _volDetailDao.saveDetails(volumeDetailsVO); + } + // Save usage event and update resource count for user vm volumes if (vm.getType() == VirtualMachine.Type.User) { UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VOLUME_CREATE, vol.getAccountId(), vol.getDataCenterId(), vol.getId(), vol.getName(), offering.getId(), null, size, @@ -748,6 +767,19 @@ public DiskProfile allocateTemplatedVolume(Type type, String name, DiskOffering vol = _volsDao.persist(vol); + List volumeDetailsVO = new ArrayList(); + DiskOfferingDetailVO bandwidthLimitDetail = _diskOfferingDetailDao.findDetail(offering.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS); + if (bandwidthLimitDetail != null) { + volumeDetailsVO.add(new VolumeDetailVO(vol.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS, bandwidthLimitDetail.getValue(), false)); + } + DiskOfferingDetailVO iopsLimitDetail = _diskOfferingDetailDao.findDetail(offering.getId(), Volume.IOPS_LIMIT); + if (iopsLimitDetail != null) { + volumeDetailsVO.add(new VolumeDetailVO(vol.getId(), Volume.IOPS_LIMIT, iopsLimitDetail.getValue(), false)); + } + if (!volumeDetailsVO.isEmpty()) { + _volDetailDao.saveDetails(volumeDetailsVO); + } + // Create event and update resource count for volumes if vm is a user vm if (vm.getType() == VirtualMachine.Type.User) { @@ -880,8 +912,39 @@ public VolumeVO doInTransaction(TransactionStatus status) { } @Override - public void release(VirtualMachineProfile profile) { - // add code here + public void release(VirtualMachineProfile vmProfile) { + Long hostId = vmProfile.getVirtualMachine().getHostId(); + if (hostId != null) { + revokeAccess(vmProfile.getId(), hostId); + } + } + + @Override + public void release(long vmId, long hostId) { + List volumesForVm = _volsDao.findUsableVolumesForInstance(vmId); + if (volumesForVm == null || volumesForVm.isEmpty()) { + return; + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Releasing " + volumesForVm.size() + " volumes for VM: " + vmId + " from host: " + hostId); + } + + for (VolumeVO volumeForVm : volumesForVm) { + VolumeInfo volumeInfo = volFactory.getVolume(volumeForVm.getId()); + + // pool id can be null for the VM's volumes in Allocated state + if (volumeForVm.getPoolId() != null) { + DataStore dataStore = dataStoreMgr.getDataStore(volumeForVm.getPoolId(), DataStoreRole.Primary); + PrimaryDataStore primaryDataStore = (PrimaryDataStore)dataStore; + HostVO host = _hostDao.findById(hostId); + + // This might impact other managed storages, grant access for PowerFlex storage pool only + if (primaryDataStore.isManaged() && primaryDataStore.getPoolType() == Storage.StoragePoolType.PowerFlex) { + volService.revokeAccess(volumeInfo, host, dataStore); + } + } + } } @Override @@ -1116,6 +1179,12 @@ public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest disk.setDetails(getDetails(volumeInfo, dataStore)); + PrimaryDataStore primaryDataStore = (PrimaryDataStore)dataStore; + // This might impact other managed storages, grant access for PowerFlex storage pool only + if (primaryDataStore.isManaged() && primaryDataStore.getPoolType() == Storage.StoragePoolType.PowerFlex) { + volService.grantAccess(volFactory.getVolume(vol.getId()), dest.getHost(), dataStore); + } + vm.addDisk(disk); } @@ -1143,6 +1212,7 @@ private Map getDetails(VolumeInfo volumeInfo, DataStore dataStor VolumeVO volume = _volumeDao.findById(volumeInfo.getId()); details.put(DiskTO.PROTOCOL_TYPE, (volume.getPoolType() != null) ? volume.getPoolType().toString() : null); + details.put(StorageManager.STORAGE_POOL_DISK_WAIT.toString(), String.valueOf(StorageManager.STORAGE_POOL_DISK_WAIT.valueIn(storagePool.getId()))); ChapInfo chapInfo = volService.getChapInfo(volumeInfo, dataStore); @@ -1253,7 +1323,7 @@ private List getTasks(List vols, Map return tasks; } - private Pair recreateVolume(VolumeVO vol, VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException { + private Pair recreateVolume(VolumeVO vol, VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, StorageAccessException { VolumeVO newVol; boolean recreate = RecreatableSystemVmEnabled.value(); DataStore destPool = null; @@ -1297,19 +1367,28 @@ private Pair recreateVolume(VolumeVO vol, VirtualMachinePro future = volService.createVolumeAsync(volume, destPool); } else { TemplateInfo templ = tmplFactory.getReadyTemplateOnImageStore(templateId, dest.getDataCenter().getId()); + PrimaryDataStore primaryDataStore = (PrimaryDataStore)destPool; if (templ == null) { if (tmplFactory.isTemplateMarkedForDirectDownload(templateId)) { // Template is marked for direct download bypassing Secondary Storage - templ = tmplFactory.getReadyBypassedTemplateOnPrimaryStore(templateId, destPool.getId(), dest.getHost().getId()); + if (!primaryDataStore.isManaged()) { + templ = tmplFactory.getReadyBypassedTemplateOnPrimaryStore(templateId, destPool.getId(), dest.getHost().getId()); + } else { + s_logger.debug("Direct download template: " + templateId + " on host: " + dest.getHost().getId() + " and copy to the managed storage pool: " + destPool.getId()); + templ = volService.createManagedStorageTemplate(templateId, destPool.getId(), dest.getHost().getId()); + } + + if (templ == null) { + s_logger.debug("Failed to spool direct download template: " + templateId + " for data center " + dest.getDataCenter().getId()); + throw new CloudRuntimeException("Failed to spool direct download template: " + templateId + " for data center " + dest.getDataCenter().getId()); + } } else { s_logger.debug("can't find ready template: " + templateId + " for data center " + dest.getDataCenter().getId()); throw new CloudRuntimeException("can't find ready template: " + templateId + " for data center " + dest.getDataCenter().getId()); } } - PrimaryDataStore primaryDataStore = (PrimaryDataStore)destPool; - if (primaryDataStore.isManaged()) { DiskOffering diskOffering = _entityMgr.findById(DiskOffering.class, volume.getDiskOfferingId()); HypervisorType hyperType = vm.getVirtualMachine().getHypervisorType(); @@ -1343,11 +1422,17 @@ private Pair recreateVolume(VolumeVO vol, VirtualMachinePro long hostId = vm.getVirtualMachine().getHostId(); Host host = _hostDao.findById(hostId); - volService.grantAccess(volFactory.getVolume(newVol.getId()), host, destPool); + try { + volService.grantAccess(volFactory.getVolume(newVol.getId()), host, destPool); + } catch (Exception e) { + throw new StorageAccessException("Unable to grant access to volume: " + newVol.getId() + " on host: " + host.getId()); + } } newVol = _volsDao.findById(newVol.getId()); break; //break out of template-redeploy retry loop + } catch (StorageAccessException e) { + throw e; } catch (InterruptedException | ExecutionException e) { s_logger.error("Unable to create " + newVol, e); throw new StorageUnavailableException("Unable to create " + newVol + ":" + e.toString(), destPool.getId()); @@ -1358,7 +1443,7 @@ private Pair recreateVolume(VolumeVO vol, VirtualMachinePro } @Override - public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException { + public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws StorageUnavailableException, InsufficientStorageCapacityException, ConcurrentOperationException, StorageAccessException { if (dest == null) { if (s_logger.isDebugEnabled()) { @@ -1401,7 +1486,20 @@ public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws Sto volService.revokeAccess(volFactory.getVolume(vol.getId()), lastHost, storagePool); } - volService.grantAccess(volFactory.getVolume(vol.getId()), host, (DataStore)pool); + try { + volService.grantAccess(volFactory.getVolume(vol.getId()), host, (DataStore)pool); + } catch (Exception e) { + throw new StorageAccessException("Unable to grant access to volume: " + vol.getId() + " on host: " + host.getId()); + } + } else { + // This might impact other managed storages, grant access for PowerFlex storage pool only + if (pool.getPoolType() == Storage.StoragePoolType.PowerFlex) { + try { + volService.grantAccess(volFactory.getVolume(vol.getId()), host, (DataStore)pool); + } catch (Exception e) { + throw new StorageAccessException("Unable to grant access to volume: " + vol.getId() + " on host: " + host.getId()); + } + } } } } else if (task.type == VolumeTaskType.MIGRATE) { diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java index ac6c8555da96..71c1dce8904e 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java @@ -61,10 +61,10 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, As } if (srcData.getDataStore().getDriver().canCopy(srcData, destData)) { - srcData.getDataStore().getDriver().copyAsync(srcData, destData, callback); + srcData.getDataStore().getDriver().copyAsync(srcData, destData, destHost, callback); return; } else if (destData.getDataStore().getDriver().canCopy(srcData, destData)) { - destData.getDataStore().getDriver().copyAsync(srcData, destData, callback); + destData.getDataStore().getDriver().copyAsync(srcData, destData, destHost, callback); return; } diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java index e6b5c85b924b..f4a64e1966dd 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java @@ -53,6 +53,7 @@ import com.cloud.storage.StoragePool; import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VMTemplateStorageResourceAssoc; +import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.utils.exception.CloudRuntimeException; @@ -195,6 +196,10 @@ protected boolean shouldMigrateVolume(StoragePoolVO sourceStoragePool, Host dest @Override protected void copyTemplateToTargetFilesystemStorageIfNeeded(VolumeInfo srcVolumeInfo, StoragePool srcStoragePool, DataStore destDataStore, StoragePool destStoragePool, Host destHost) { + if (srcVolumeInfo.getVolumeType() != Volume.Type.ROOT || srcVolumeInfo.getTemplateId() == null) { + return; + } + VMTemplateStoragePoolVO sourceVolumeTemplateStoragePoolVO = vmTemplatePoolDao.findByPoolTemplate(destStoragePool.getId(), srcVolumeInfo.getTemplateId()); if (sourceVolumeTemplateStoragePoolVO == null && destStoragePool.getPoolType() == StoragePoolType.Filesystem) { DataStore sourceTemplateDataStore = dataStoreManagerImpl.getRandomImageStore(srcVolumeInfo.getDataCenterId()); diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index 4d3ec184ac16..2bf30dbe3472 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -735,6 +735,7 @@ private void handleVolumeMigrationForXenServer(VolumeInfo srcVolumeInfo, VolumeI details.put(DiskTO.MANAGED, Boolean.TRUE.toString()); details.put(DiskTO.IQN, destVolumeInfo.get_iScsiName()); details.put(DiskTO.STORAGE_HOST, destPool.getHostAddress()); + details.put(DiskTO.PROTOCOL_TYPE, (destPool.getPoolType() != null) ? destPool.getPoolType().toString() : null); command.setDestDetails(details); @@ -1786,6 +1787,11 @@ public void copyAsync(Map volumeDataStoreMap, VirtualMach StoragePoolVO destStoragePool = _storagePoolDao.findById(destDataStore.getId()); StoragePoolVO sourceStoragePool = _storagePoolDao.findById(srcVolumeInfo.getPoolId()); + // do not initiate migration for the same PowerFlex/ScaleIO pool + if (sourceStoragePool.getId() == destStoragePool.getId() && sourceStoragePool.getPoolType() == Storage.StoragePoolType.PowerFlex) { + continue; + } + if (!shouldMigrateVolume(sourceStoragePool, destHost, destStoragePool)) { continue; } @@ -1894,13 +1900,11 @@ public void copyAsync(Map volumeDataStoreMap, VirtualMach throw new CloudRuntimeException(errMsg); } - } - catch (Exception ex) { + } catch (Exception ex) { errMsg = "Copy operation failed in 'StorageSystemDataMotionStrategy.copyAsync': " + ex.getMessage(); - + LOGGER.error(errMsg, ex); throw new CloudRuntimeException(errMsg); - } - finally { + } finally { CopyCmdAnswer copyCmdAnswer = new CopyCmdAnswer(errMsg); CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); @@ -2197,10 +2201,6 @@ protected void verifyLiveMigrationForKVM(Map volumeDataSt throw new CloudRuntimeException("Volume with ID " + volumeInfo.getId() + " is not associated with a storage pool."); } - if (srcStoragePoolVO.isManaged()) { - throw new CloudRuntimeException("Migrating a volume online with KVM from managed storage is not currently supported."); - } - DataStore dataStore = entry.getValue(); StoragePoolVO destStoragePoolVO = _storagePoolDao.findById(dataStore.getId()); @@ -2208,6 +2208,10 @@ protected void verifyLiveMigrationForKVM(Map volumeDataSt throw new CloudRuntimeException("Destination storage pool with ID " + dataStore.getId() + " was not located."); } + if (srcStoragePoolVO.isManaged() && srcStoragePoolVO.getId() != destStoragePoolVO.getId()) { + throw new CloudRuntimeException("Migrating a volume online with KVM from managed storage is not currently supported."); + } + if (storageTypeConsistency == null) { storageTypeConsistency = destStoragePoolVO.isManaged(); } else if (storageTypeConsistency != destStoragePoolVO.isManaged()) { @@ -2415,6 +2419,8 @@ private Map getVolumeDetails(VolumeInfo volumeInfo) { volumeDetails.put(DiskTO.STORAGE_HOST, storagePoolVO.getHostAddress()); volumeDetails.put(DiskTO.STORAGE_PORT, String.valueOf(storagePoolVO.getPort())); volumeDetails.put(DiskTO.IQN, volumeVO.get_iScsiName()); + volumeDetails.put(DiskTO.PROTOCOL_TYPE, (volumeVO.getPoolType() != null) ? volumeVO.getPoolType().toString() : null); + volumeDetails.put(StorageManager.STORAGE_POOL_DISK_WAIT.toString(), String.valueOf(StorageManager.STORAGE_POOL_DISK_WAIT.valueIn(storagePoolVO.getId()))); volumeDetails.put(DiskTO.VOLUME_SIZE, String.valueOf(volumeVO.getSize())); volumeDetails.put(DiskTO.SCSI_NAA_DEVICE_ID, getVolumeProperty(volumeInfo.getId(), DiskTO.SCSI_NAA_DEVICE_ID)); diff --git a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java index 6971444bcad0..fee198299654 100644 --- a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java +++ b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java @@ -69,6 +69,7 @@ import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StoragePool; import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.Volume; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.utils.exception.CloudRuntimeException; @@ -326,6 +327,7 @@ private void configureAndTestcopyTemplateToTargetStorageIfNeeded(VMTemplateStora VolumeInfo srcVolumeInfo = Mockito.mock(VolumeInfo.class); Mockito.when(srcVolumeInfo.getTemplateId()).thenReturn(0l); + Mockito.when(srcVolumeInfo.getVolumeType()).thenReturn(Volume.Type.ROOT); StoragePool srcStoragePool = Mockito.mock(StoragePool.class); @@ -464,6 +466,8 @@ public void testVerifyLiveMigrationMapForKVMNotExistingDest() { @Test(expected = CloudRuntimeException.class) public void testVerifyLiveMigrationMapForKVMMixedManagedUnmagedStorage() { when(pool1.isManaged()).thenReturn(true); + when(pool1.getId()).thenReturn(POOL_1_ID); + when(pool2.getId()).thenReturn(POOL_2_ID); lenient().when(pool2.isManaged()).thenReturn(false); kvmNonManagedStorageDataMotionStrategy.verifyLiveMigrationForKVM(migrationMap, host2); } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java index 043af9a49ac9..62a10cc7d00d 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java @@ -42,6 +42,7 @@ import com.cloud.hypervisor.Hypervisor; import com.cloud.storage.DataStoreRole; import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplatePoolDao; @@ -65,6 +66,16 @@ public class TemplateDataFactoryImpl implements TemplateDataFactory { @Inject PrimaryDataStoreDao primaryDataStoreDao; + @Override + public TemplateInfo getTemplate(long templateId) { + VMTemplateVO templ = imageDataDao.findById(templateId); + if (templ != null) { + TemplateObject tmpl = TemplateObject.getTemplate(templ, null); + return tmpl; + } + return null; + } + @Override public TemplateInfo getTemplate(long templateId, DataStore store) { VMTemplateVO templ = imageDataDao.findById(templateId); @@ -225,6 +236,33 @@ public TemplateInfo getReadyBypassedTemplateOnPrimaryStore(long templateId, Long return this.getTemplate(templateId, store); } + @Override + public TemplateInfo getReadyBypassedTemplateOnManagedStorage(long templateId, TemplateInfo templateOnPrimary, Long poolId, Long hostId) { + VMTemplateVO templateVO = imageDataDao.findById(templateId); + if (templateVO == null || !templateVO.isDirectDownload()) { + return null; + } + + if (poolId == null) { + throw new CloudRuntimeException("No storage pool specified to download template: " + templateId); + } + + StoragePoolVO poolVO = primaryDataStoreDao.findById(poolId); + if (poolVO == null || !poolVO.isManaged()) { + return null; + } + + VMTemplateStoragePoolVO spoolRef = templatePoolDao.findByPoolTemplate(poolId, templateId); + if (spoolRef == null) { + throw new CloudRuntimeException("Template not created on managed storage pool: " + poolId + " to copy the download template: " + templateId); + } else if (spoolRef.getDownloadState() == VMTemplateStorageResourceAssoc.Status.NOT_DOWNLOADED) { + directDownloadManager.downloadTemplate(templateId, poolId, hostId); + } + + DataStore store = storeMgr.getDataStore(poolId, DataStoreRole.Primary); + return this.getTemplate(templateId, store); + } + @Override public boolean isTemplateMarkedForDirectDownload(long templateId) { VMTemplateVO templateVO = imageDataDao.findById(templateId); diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java index 00bc7e4208b2..67eada7cd8ec 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java @@ -907,7 +907,14 @@ public AsyncCallFuture deleteTemplateAsync(TemplateInfo templ TemplateOpContext context = new TemplateOpContext(null, to, future); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().deleteTemplateCallback(null, null)).setContext(context); - to.getDataStore().getDriver().deleteAsync(to.getDataStore(), to, caller); + + if (to.canBeDeletedFromDataStore()) { + to.getDataStore().getDriver().deleteAsync(to.getDataStore(), to, caller); + } else { + CommandResult result = new CommandResult(); + caller.complete(result); + } + return future; } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java index 86030f226f63..2a5239dd37bf 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java @@ -365,6 +365,35 @@ public boolean isDirectDownload() { return this.imageVO.isDirectDownload(); } + @Override + public boolean canBeDeletedFromDataStore() { + Status downloadStatus = Status.UNKNOWN; + int downloadPercent = -1; + if (getDataStore().getRole() == DataStoreRole.Primary) { + VMTemplateStoragePoolVO templatePoolRef = templatePoolDao.findByPoolTemplate(getDataStore().getId(), getId()); + if (templatePoolRef != null) { + downloadStatus = templatePoolRef.getDownloadState(); + downloadPercent = templatePoolRef.getDownloadPercent(); + } + } else if (dataStore.getRole() == DataStoreRole.Image || dataStore.getRole() == DataStoreRole.ImageCache) { + TemplateDataStoreVO templateStoreRef = templateStoreDao.findByStoreTemplate(dataStore.getId(), getId()); + if (templateStoreRef != null) { + downloadStatus = templateStoreRef.getDownloadState(); + downloadPercent = templateStoreRef.getDownloadPercent(); + templateStoreRef.getState(); + } + } + + // Marking downloaded templates for deletion, but might skip any deletion handled for failed templates. + // Only templates not downloaded and in error state (with no install path) cannot be deleted from the datastore, so doesn't impact last behavior for templates with other states + if (downloadStatus == null || downloadStatus == Status.NOT_DOWNLOADED || (downloadStatus == Status.DOWNLOAD_ERROR && downloadPercent == 0)) { + s_logger.debug("Template: " + getId() + " cannot be deleted from the store: " + getDataStore().getId()); + return false; + } + + return true; + } + public void setInstallPath(String installPath) { this.installPath = installPath; } diff --git a/engine/storage/snapshot/pom.xml b/engine/storage/snapshot/pom.xml index f7aadcc62c95..ab8b00d49470 100644 --- a/engine/storage/snapshot/pom.xml +++ b/engine/storage/snapshot/pom.xml @@ -50,6 +50,12 @@ cloud-engine-storage-volume ${project.version} + + org.apache.cloudstack + cloud-plugin-storage-volume-scaleio + ${project.version} + compile + diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/ScaleIOSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/ScaleIOSnapshotStrategy.java new file mode 100644 index 000000000000..dfe475004f78 --- /dev/null +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/ScaleIOSnapshotStrategy.java @@ -0,0 +1,93 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.snapshot; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; + +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.Snapshot; +import com.cloud.storage.Storage; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; + +public class ScaleIOSnapshotStrategy extends StorageSystemSnapshotStrategy { + @Inject + private SnapshotDataStoreDao snapshotStoreDao; + @Inject + private PrimaryDataStoreDao primaryDataStoreDao; + @Inject + private VolumeDao volumeDao; + + private static final Logger LOG = Logger.getLogger(ScaleIOSnapshotStrategy.class); + + @Override + public StrategyPriority canHandle(Snapshot snapshot, SnapshotOperation op) { + long volumeId = snapshot.getVolumeId(); + VolumeVO volumeVO = volumeDao.findByIdIncludingRemoved(volumeId); + boolean baseVolumeExists = volumeVO.getRemoved() == null; + if (!baseVolumeExists) { + return StrategyPriority.CANT_HANDLE; + } + + if (!isSnapshotStoredOnScaleIOStoragePool(snapshot)) { + return StrategyPriority.CANT_HANDLE; + } + + if (SnapshotOperation.REVERT.equals(op)) { + return StrategyPriority.HIGHEST; + } + + if (SnapshotOperation.DELETE.equals(op)) { + return StrategyPriority.HIGHEST; + } + + return StrategyPriority.CANT_HANDLE; + } + + @Override + public boolean revertSnapshot(SnapshotInfo snapshotInfo) { + VolumeInfo volumeInfo = snapshotInfo.getBaseVolume(); + Storage.ImageFormat imageFormat = volumeInfo.getFormat(); + if (!Storage.ImageFormat.RAW.equals(imageFormat)) { + LOG.error(String.format("Does not support revert snapshot of the image format [%s] on PowerFlex. Can only rollback snapshots of format RAW", imageFormat)); + return false; + } + + executeRevertSnapshot(snapshotInfo, volumeInfo); + + return true; + } + + protected boolean isSnapshotStoredOnScaleIOStoragePool(Snapshot snapshot) { + SnapshotDataStoreVO snapshotStore = snapshotStoreDao.findBySnapshot(snapshot.getId(), DataStoreRole.Primary); + if (snapshotStore == null) { + return false; + } + StoragePoolVO storagePoolVO = primaryDataStoreDao.findById(snapshotStore.getDataStoreId()); + return storagePoolVO != null && storagePoolVO.getPoolType() == Storage.StoragePoolType.PowerFlex; + } +} diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java index 33d43d708b08..6401f8a8e1c9 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/StorageSystemSnapshotStrategy.java @@ -16,6 +16,37 @@ // under the License. package org.apache.cloudstack.storage.snapshot; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Random; +import java.util.UUID; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotResult; +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; +import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer; +import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.ModifyTargetsCommand; @@ -38,18 +69,18 @@ import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Volume; +import com.cloud.storage.VolumeDetailVO; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.SnapshotDetailsDao; import com.cloud.storage.dao.SnapshotDetailsVO; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeDetailsDao; -import com.cloud.storage.VolumeDetailVO; import com.cloud.utils.db.DB; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; -import com.cloud.vm.VirtualMachine; import com.cloud.vm.VMInstanceVO; +import com.cloud.vm.VirtualMachine; import com.cloud.vm.dao.VMInstanceDao; import com.cloud.vm.snapshot.VMSnapshot; import com.cloud.vm.snapshot.VMSnapshotService; @@ -57,37 +88,6 @@ import com.cloud.vm.snapshot.dao.VMSnapshotDao; import com.google.common.base.Preconditions; -import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotResult; -import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; -import org.apache.cloudstack.storage.command.SnapshotAndCopyAnswer; -import org.apache.cloudstack.storage.command.SnapshotAndCopyCommand; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - -import javax.inject.Inject; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Random; -import java.util.UUID; - @Component public class StorageSystemSnapshotStrategy extends SnapshotStrategyBase { private static final Logger s_logger = Logger.getLogger(StorageSystemSnapshotStrategy.class); @@ -241,15 +241,16 @@ private boolean cleanupSnapshotOnPrimaryStore(long snapshotId) { } private boolean isAcceptableRevertFormat(VolumeVO volumeVO) { - return ImageFormat.VHD.equals(volumeVO.getFormat()) || ImageFormat.OVA.equals(volumeVO.getFormat()) || ImageFormat.QCOW2.equals(volumeVO.getFormat()); + return ImageFormat.VHD.equals(volumeVO.getFormat()) || ImageFormat.OVA.equals(volumeVO.getFormat()) + || ImageFormat.QCOW2.equals(volumeVO.getFormat()) || ImageFormat.RAW.equals(volumeVO.getFormat()); } private void verifyFormat(VolumeInfo volumeInfo) { ImageFormat imageFormat = volumeInfo.getFormat(); - if (imageFormat != ImageFormat.VHD && imageFormat != ImageFormat.OVA && imageFormat != ImageFormat.QCOW2) { + if (imageFormat != ImageFormat.VHD && imageFormat != ImageFormat.OVA && imageFormat != ImageFormat.QCOW2 && imageFormat != ImageFormat.RAW) { throw new CloudRuntimeException("Only the following image types are currently supported: " + - ImageFormat.VHD.toString() + ", " + ImageFormat.OVA.toString() + ", and " + ImageFormat.QCOW2); + ImageFormat.VHD.toString() + ", " + ImageFormat.OVA.toString() + ", " + ImageFormat.QCOW2 + ", and " + ImageFormat.RAW); } } @@ -456,7 +457,7 @@ public SnapshotInfo takeSnapshot(SnapshotInfo snapshotInfo) { computeClusterSupportsVolumeClone = clusterDao.getSupportsResigning(hostVO.getClusterId()); } - else if (volumeInfo.getFormat() == ImageFormat.OVA || volumeInfo.getFormat() == ImageFormat.QCOW2) { + else if (volumeInfo.getFormat() == ImageFormat.OVA || volumeInfo.getFormat() == ImageFormat.QCOW2 || volumeInfo.getFormat() == ImageFormat.RAW) { computeClusterSupportsVolumeClone = true; } else { @@ -760,6 +761,7 @@ private Map getSourceDetails(VolumeInfo volumeInfo) { sourceDetails.put(DiskTO.STORAGE_HOST, storagePoolVO.getHostAddress()); sourceDetails.put(DiskTO.STORAGE_PORT, String.valueOf(storagePoolVO.getPort())); sourceDetails.put(DiskTO.IQN, volumeVO.get_iScsiName()); + sourceDetails.put(DiskTO.PROTOCOL_TYPE, (storagePoolVO.getPoolType() != null) ? storagePoolVO.getPoolType().toString() : null); ChapInfo chapInfo = volService.getChapInfo(volumeInfo, volumeInfo.getDataStore()); @@ -778,6 +780,7 @@ private Map getDestDetails(StoragePoolVO storagePoolVO, Snapshot destDetails.put(DiskTO.STORAGE_HOST, storagePoolVO.getHostAddress()); destDetails.put(DiskTO.STORAGE_PORT, String.valueOf(storagePoolVO.getPort())); + destDetails.put(DiskTO.PROTOCOL_TYPE, (storagePoolVO.getPoolType() != null) ? storagePoolVO.getPoolType().toString() : null); long snapshotId = snapshotInfo.getId(); diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java new file mode 100644 index 000000000000..89a779a1bdfc --- /dev/null +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/ScaleIOVMSnapshotStrategy.java @@ -0,0 +1,489 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.vmsnapshot; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; +import org.apache.cloudstack.engine.subsystem.api.storage.VMSnapshotStrategy; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.storage.datastore.api.SnapshotGroup; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.log4j.Logger; + +import com.cloud.agent.api.VMSnapshotTO; +import com.cloud.alert.AlertManager; +import com.cloud.event.EventTypes; +import com.cloud.event.UsageEventUtils; +import com.cloud.event.UsageEventVO; +import com.cloud.server.ManagementServerImpl; +import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.uservm.UserVm; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackWithExceptionNoReturn; +import com.cloud.utils.db.TransactionStatus; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.snapshot.VMSnapshot; +import com.cloud.vm.snapshot.VMSnapshotDetailsVO; +import com.cloud.vm.snapshot.VMSnapshotVO; +import com.cloud.vm.snapshot.dao.VMSnapshotDao; +import com.cloud.vm.snapshot.dao.VMSnapshotDetailsDao; + +public class ScaleIOVMSnapshotStrategy extends ManagerBase implements VMSnapshotStrategy { + private static final Logger LOGGER = Logger.getLogger(ScaleIOVMSnapshotStrategy.class); + @Inject + VMSnapshotHelper vmSnapshotHelper; + @Inject + UserVmDao userVmDao; + @Inject + VMSnapshotDao vmSnapshotDao; + @Inject + protected VMSnapshotDetailsDao vmSnapshotDetailsDao; + int _wait; + @Inject + ConfigurationDao configurationDao; + @Inject + VolumeDao volumeDao; + @Inject + DiskOfferingDao diskOfferingDao; + @Inject + PrimaryDataStoreDao storagePoolDao; + @Inject + StoragePoolDetailsDao storagePoolDetailsDao; + @Inject + AlertManager alertManager; + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + String value = configurationDao.getValue("vmsnapshot.create.wait"); + _wait = NumbersUtil.parseInt(value, 1800); + return true; + } + + @Override + public StrategyPriority canHandle(VMSnapshot vmSnapshot) { + List volumeTOs = vmSnapshotHelper.getVolumeTOList(vmSnapshot.getVmId()); + if (volumeTOs == null) { + throw new CloudRuntimeException("Failed to get the volumes for the vm snapshot: " + vmSnapshot.getUuid()); + } + + if (volumeTOs != null && !volumeTOs.isEmpty()) { + for (VolumeObjectTO volumeTO: volumeTOs) { + Long poolId = volumeTO.getPoolId(); + Storage.StoragePoolType poolType = vmSnapshotHelper.getStoragePoolType(poolId); + if (poolType != Storage.StoragePoolType.PowerFlex) { + return StrategyPriority.CANT_HANDLE; + } + } + } + + return StrategyPriority.HIGHEST; + } + + @Override + public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { + UserVm userVm = userVmDao.findById(vmSnapshot.getVmId()); + VMSnapshotVO vmSnapshotVO = (VMSnapshotVO)vmSnapshot; + + try { + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshotVO, VMSnapshot.Event.CreateRequested); + } catch (NoTransitionException e) { + throw new CloudRuntimeException(e.getMessage()); + } + + boolean result = false; + try { + Map srcVolumeDestSnapshotMap = new HashMap<>(); + List volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId()); + + final Long storagePoolId = vmSnapshotHelper.getStoragePoolForVM(userVm.getId()); + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + long prev_chain_size = 0; + long virtual_size=0; + for (VolumeObjectTO volume : volumeTOs) { + String volumeSnapshotName = String.format("%s-%s-%s-%s", ScaleIOUtil.VMSNAPSHOT_PREFIX, vmSnapshotVO.getId(), + storagePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value()); + srcVolumeDestSnapshotMap.put(volume.getPath(), volumeSnapshotName); + + virtual_size += volume.getSize(); + VolumeVO volumeVO = volumeDao.findById(volume.getId()); + prev_chain_size += volumeVO.getVmSnapshotChainSize() == null ? 0 : volumeVO.getVmSnapshotChainSize(); + } + + VMSnapshotTO current = null; + VMSnapshotVO currentSnapshot = vmSnapshotDao.findCurrentSnapshotByVmId(userVm.getId()); + if (currentSnapshot != null) { + current = vmSnapshotHelper.getSnapshotWithParents(currentSnapshot); + } + + if (current == null) + vmSnapshotVO.setParent(null); + else + vmSnapshotVO.setParent(current.getId()); + + try { + final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + SnapshotGroup snapshotGroup = client.takeSnapshot(srcVolumeDestSnapshotMap); + if (snapshotGroup == null) { + throw new CloudRuntimeException("Failed to take VM snapshot on PowerFlex storage pool"); + } + + String snapshotGroupId = snapshotGroup.getSnapshotGroupId(); + List volumeIds = snapshotGroup.getVolumeIds(); + if (volumeIds != null && !volumeIds.isEmpty()) { + List vmSnapshotDetails = new ArrayList(); + vmSnapshotDetails.add(new VMSnapshotDetailsVO(vmSnapshot.getId(), "SnapshotGroupId", snapshotGroupId, false)); + + for (int index = 0; index < volumeIds.size(); index++) { + vmSnapshotDetails.add(new VMSnapshotDetailsVO(vmSnapshot.getId(), "Vol_" + volumeTOs.get(index).getId() + "_Snapshot", volumeIds.get(index), false)); + } + + vmSnapshotDetailsDao.saveDetails(vmSnapshotDetails); + } + + finalizeCreate(vmSnapshotVO, volumeTOs); + result = true; + LOGGER.debug("Create vm snapshot " + vmSnapshot.getName() + " succeeded for vm: " + userVm.getInstanceName()); + + long new_chain_size=0; + for (VolumeObjectTO volumeTo : volumeTOs) { + publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_CREATE, vmSnapshot, userVm, volumeTo); + new_chain_size += volumeTo.getSize(); + } + publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_ON_PRIMARY, vmSnapshot, userVm, new_chain_size - prev_chain_size, virtual_size); + return vmSnapshot; + } catch (Exception e) { + String errMsg = "Unable to take vm snapshot due to: " + e.getMessage(); + LOGGER.warn(errMsg, e); + throw new CloudRuntimeException(errMsg); + } + } finally { + if (!result) { + try { + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed); + + String subject = "Take snapshot failed for VM: " + userVm.getDisplayName(); + String message = "Snapshot operation failed for VM: " + userVm.getDisplayName() + ", Please check and delete if any stale volumes created with VM snapshot id: " + vmSnapshot.getVmId(); + alertManager.sendAlert(AlertManager.AlertType.ALERT_TYPE_VM_SNAPSHOT, userVm.getDataCenterId(), userVm.getPodIdToDeployIn(), subject, message); + } catch (NoTransitionException e1) { + LOGGER.error("Cannot set vm snapshot state due to: " + e1.getMessage()); + } + } + } + } + + @DB + protected void finalizeCreate(VMSnapshotVO vmSnapshot, List volumeTOs) { + try { + Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) throws NoTransitionException { + // update chain size for the volumes in the VM snapshot + for (VolumeObjectTO volume : volumeTOs) { + VolumeVO volumeVO = volumeDao.findById(volume.getId()); + if (volumeVO != null) { + long vmSnapshotChainSize = volumeVO.getVmSnapshotChainSize() == null ? 0 : volumeVO.getVmSnapshotChainSize(); + vmSnapshotChainSize += volumeVO.getSize(); + volumeVO.setVmSnapshotChainSize(vmSnapshotChainSize); + volumeDao.persist(volumeVO); + } + } + + vmSnapshot.setCurrent(true); + + // change current snapshot + if (vmSnapshot.getParent() != null) { + VMSnapshotVO previousCurrent = vmSnapshotDao.findById(vmSnapshot.getParent()); + previousCurrent.setCurrent(false); + vmSnapshotDao.persist(previousCurrent); + } + vmSnapshotDao.persist(vmSnapshot); + + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationSucceeded); + } + }); + } catch (Exception e) { + String errMsg = "Error while finalize create vm snapshot: " + vmSnapshot.getName() + " due to " + e.getMessage(); + LOGGER.error(errMsg, e); + throw new CloudRuntimeException(errMsg); + } + } + + @Override + public boolean revertVMSnapshot(VMSnapshot vmSnapshot) { + VMSnapshotVO vmSnapshotVO = (VMSnapshotVO)vmSnapshot; + UserVmVO userVm = userVmDao.findById(vmSnapshot.getVmId()); + + try { + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshotVO, VMSnapshot.Event.RevertRequested); + } catch (NoTransitionException e) { + throw new CloudRuntimeException(e.getMessage()); + } + + boolean result = false; + try { + List volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId()); + Long storagePoolId = vmSnapshotHelper.getStoragePoolForVM(userVm.getId()); + Map srcSnapshotDestVolumeMap = new HashMap<>(); + for (VolumeObjectTO volume : volumeTOs) { + VMSnapshotDetailsVO vmSnapshotDetail = vmSnapshotDetailsDao.findDetail(vmSnapshotVO.getId(), "Vol_" + volume.getId() + "_Snapshot"); + String srcSnapshotVolumeId = vmSnapshotDetail.getValue(); + String destVolumeId = volume.getPath(); + srcSnapshotDestVolumeMap.put(srcSnapshotVolumeId, destVolumeId); + } + + String systemId = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID).getValue(); + if (systemId == null) { + throw new CloudRuntimeException("Failed to get the system id for PowerFlex storage pool for reverting VM snapshot: " + vmSnapshot.getName()); + } + + final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + result = client.revertSnapshot(systemId, srcSnapshotDestVolumeMap); + if (!result) { + throw new CloudRuntimeException("Failed to revert VM snapshot on PowerFlex storage pool"); + } + + finalizeRevert(vmSnapshotVO, volumeTOs); + result = true; + } catch (Exception e) { + String errMsg = "Revert VM: " + userVm.getInstanceName() + " to snapshot: " + vmSnapshotVO.getName() + " failed due to " + e.getMessage(); + LOGGER.error(errMsg, e); + throw new CloudRuntimeException(errMsg); + } finally { + if (!result) { + try { + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed); + } catch (NoTransitionException e1) { + LOGGER.error("Cannot set vm snapshot state due to: " + e1.getMessage()); + } + } + } + return result; + } + + @DB + protected void finalizeRevert(VMSnapshotVO vmSnapshot, List volumeToList) { + try { + Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) throws NoTransitionException { + // update chain size for the volumes in the VM snapshot + for (VolumeObjectTO volume : volumeToList) { + VolumeVO volumeVO = volumeDao.findById(volume.getId()); + if (volumeVO != null && volumeVO.getVmSnapshotChainSize() != null && volumeVO.getVmSnapshotChainSize() >= volumeVO.getSize()) { + long vmSnapshotChainSize = volumeVO.getVmSnapshotChainSize() - volumeVO.getSize(); + volumeVO.setVmSnapshotChainSize(vmSnapshotChainSize); + volumeDao.persist(volumeVO); + } + } + + // update current snapshot, current snapshot is the one reverted to + VMSnapshotVO previousCurrent = vmSnapshotDao.findCurrentSnapshotByVmId(vmSnapshot.getVmId()); + if (previousCurrent != null) { + previousCurrent.setCurrent(false); + vmSnapshotDao.persist(previousCurrent); + } + vmSnapshot.setCurrent(true); + vmSnapshotDao.persist(vmSnapshot); + + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationSucceeded); + } + }); + } catch (Exception e) { + String errMsg = "Error while finalize revert vm snapshot: " + vmSnapshot.getName() + " due to " + e.getMessage(); + LOGGER.error(errMsg, e); + throw new CloudRuntimeException(errMsg); + } + } + + @Override + public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) { + UserVmVO userVm = userVmDao.findById(vmSnapshot.getVmId()); + VMSnapshotVO vmSnapshotVO = (VMSnapshotVO)vmSnapshot; + + try { + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.ExpungeRequested); + } catch (NoTransitionException e) { + LOGGER.debug("Failed to change vm snapshot state with event ExpungeRequested"); + throw new CloudRuntimeException("Failed to change vm snapshot state with event ExpungeRequested: " + e.getMessage()); + } + + try { + List volumeTOs = vmSnapshotHelper.getVolumeTOList(vmSnapshot.getVmId()); + Long storagePoolId = vmSnapshotHelper.getStoragePoolForVM(userVm.getId()); + String systemId = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID).getValue(); + if (systemId == null) { + throw new CloudRuntimeException("Failed to get the system id for PowerFlex storage pool for deleting VM snapshot: " + vmSnapshot.getName()); + } + + VMSnapshotDetailsVO vmSnapshotDetailsVO = vmSnapshotDetailsDao.findDetail(vmSnapshot.getId(), "SnapshotGroupId"); + if (vmSnapshotDetailsVO == null) { + throw new CloudRuntimeException("Failed to get snapshot group id for the VM snapshot: " + vmSnapshot.getName()); + } + + String snapshotGroupId = vmSnapshotDetailsVO.getValue(); + final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + int volumesDeleted = client.deleteSnapshotGroup(systemId, snapshotGroupId); + if (volumesDeleted <= 0) { + throw new CloudRuntimeException("Failed to delete VM snapshot: " + vmSnapshot.getName()); + } else if (volumesDeleted != volumeTOs.size()) { + LOGGER.warn("Unable to delete all volumes of the VM snapshot: " + vmSnapshot.getName()); + } + + finalizeDelete(vmSnapshotVO, volumeTOs); + long full_chain_size=0; + for (VolumeObjectTO volumeTo : volumeTOs) { + publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_DELETE, vmSnapshot, userVm, volumeTo); + full_chain_size += volumeTo.getSize(); + } + publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_OFF_PRIMARY, vmSnapshot, userVm, full_chain_size, 0L); + return true; + } catch (Exception e) { + String errMsg = "Unable to delete vm snapshot: " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " due to " + e.getMessage(); + LOGGER.warn(errMsg, e); + throw new CloudRuntimeException(errMsg); + } + } + + @DB + protected void finalizeDelete(VMSnapshotVO vmSnapshot, List volumeTOs) { + try { + Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) throws NoTransitionException { + // update chain size for the volumes in the VM snapshot + for (VolumeObjectTO volume : volumeTOs) { + VolumeVO volumeVO = volumeDao.findById(volume.getId()); + if (volumeVO != null && volumeVO.getVmSnapshotChainSize() != null && volumeVO.getVmSnapshotChainSize() >= volumeVO.getSize()) { + long vmSnapshotChainSize = volumeVO.getVmSnapshotChainSize() - volumeVO.getSize(); + volumeVO.setVmSnapshotChainSize(vmSnapshotChainSize); + volumeDao.persist(volumeVO); + } + } + + // update children's parent snapshots + List children = vmSnapshotDao.listByParent(vmSnapshot.getId()); + for (VMSnapshotVO child : children) { + child.setParent(vmSnapshot.getParent()); + vmSnapshotDao.persist(child); + } + + // update current snapshot + VMSnapshotVO current = vmSnapshotDao.findCurrentSnapshotByVmId(vmSnapshot.getVmId()); + if (current != null && current.getId() == vmSnapshot.getId() && vmSnapshot.getParent() != null) { + VMSnapshotVO parent = vmSnapshotDao.findById(vmSnapshot.getParent()); + parent.setCurrent(true); + vmSnapshotDao.persist(parent); + } + vmSnapshot.setCurrent(false); + vmSnapshotDao.persist(vmSnapshot); + + vmSnapshotDao.remove(vmSnapshot.getId()); + } + }); + } catch (Exception e) { + String errMsg = "Error while finalize delete vm snapshot: " + vmSnapshot.getName() + " due to " + e.getMessage(); + LOGGER.error(errMsg, e); + throw new CloudRuntimeException(errMsg); + } + } + + @Override + public boolean deleteVMSnapshotFromDB(VMSnapshot vmSnapshot, boolean unmanage) { + try { + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.ExpungeRequested); + } catch (NoTransitionException e) { + LOGGER.debug("Failed to change vm snapshot state with event ExpungeRequested"); + throw new CloudRuntimeException("Failed to change vm snapshot state with event ExpungeRequested: " + e.getMessage()); + } + UserVm userVm = userVmDao.findById(vmSnapshot.getVmId()); + List volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId()); + long full_chain_size = 0; + for (VolumeObjectTO volumeTo: volumeTOs) { + volumeTo.setSize(0); + publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_DELETE, vmSnapshot, userVm, volumeTo); + full_chain_size += volumeTo.getSize(); + } + if (unmanage) { + publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_OFF_PRIMARY, vmSnapshot, userVm, full_chain_size, 0L); + } + return vmSnapshotDao.remove(vmSnapshot.getId()); + } + + private void publishUsageEvent(String type, VMSnapshot vmSnapshot, UserVm userVm, VolumeObjectTO volumeTo) { + VolumeVO volume = volumeDao.findById(volumeTo.getId()); + Long diskOfferingId = volume.getDiskOfferingId(); + Long offeringId = null; + if (diskOfferingId != null) { + DiskOfferingVO offering = diskOfferingDao.findById(diskOfferingId); + if (offering != null && (offering.getType() == DiskOfferingVO.Type.Disk)) { + offeringId = offering.getId(); + } + } + Map details = new HashMap<>(); + if (vmSnapshot != null) { + details.put(UsageEventVO.DynamicParameters.vmSnapshotId.name(), String.valueOf(vmSnapshot.getId())); + } + UsageEventUtils.publishUsageEvent(type, vmSnapshot.getAccountId(), userVm.getDataCenterId(), userVm.getId(), vmSnapshot.getName(), offeringId, volume.getId(), // save volume's id into templateId field + volumeTo.getSize(), VMSnapshot.class.getName(), vmSnapshot.getUuid(), details); + } + + private void publishUsageEvent(String type, VMSnapshot vmSnapshot, UserVm userVm, Long vmSnapSize, Long virtualSize) { + try { + Map details = new HashMap<>(); + if (vmSnapshot != null) { + details.put(UsageEventVO.DynamicParameters.vmSnapshotId.name(), String.valueOf(vmSnapshot.getId())); + } + UsageEventUtils.publishUsageEvent(type, vmSnapshot.getAccountId(), userVm.getDataCenterId(), userVm.getId(), vmSnapshot.getName(), 0L, 0L, vmSnapSize, virtualSize, + VMSnapshot.class.getName(), vmSnapshot.getUuid(), details); + } catch (Exception e) { + LOGGER.error("Failed to publish usage event " + type, e); + } + } + + private ScaleIOGatewayClient getScaleIOClient(final Long storagePoolId) throws Exception { + final int clientTimeout = StorageManager.STORAGE_POOL_CLIENT_TIMEOUT.valueIn(storagePoolId); + final String url = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.GATEWAY_API_ENDPOINT).getValue(); + final String username = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.GATEWAY_API_USERNAME).getValue(); + final String password = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.GATEWAY_API_PASSWORD).getValue(); + return ScaleIOGatewayClient.getClient(url, username, password, false, clientTimeout); + } +} diff --git a/engine/storage/snapshot/src/main/resources/META-INF/cloudstack/storage/spring-engine-storage-snapshot-storage-context.xml b/engine/storage/snapshot/src/main/resources/META-INF/cloudstack/storage/spring-engine-storage-snapshot-storage-context.xml index 2bfb3c368a56..2084ce26f69a 100644 --- a/engine/storage/snapshot/src/main/resources/META-INF/cloudstack/storage/spring-engine-storage-snapshot-storage-context.xml +++ b/engine/storage/snapshot/src/main/resources/META-INF/cloudstack/storage/spring-engine-storage-snapshot-storage-context.xml @@ -36,7 +36,13 @@ + + + + diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java index cadbad3341a9..01842441e269 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/helper/VMSnapshotHelperImpl.java @@ -37,6 +37,7 @@ import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; +import com.cloud.storage.Storage; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.fsm.NoTransitionException; @@ -148,4 +149,33 @@ public VMSnapshotTO getSnapshotWithParents(VMSnapshotVO snapshot) { return result; } + @Override + public Long getStoragePoolForVM(Long vmId) { + List rootVolumes = volumeDao.findReadyRootVolumesByInstance(vmId); + if (rootVolumes == null || rootVolumes.isEmpty()) { + throw new InvalidParameterValueException("Failed to find root volume for the user vm:" + vmId); + } + + VolumeVO rootVolume = rootVolumes.get(0); + StoragePoolVO rootVolumePool = primaryDataStoreDao.findById(rootVolume.getPoolId()); + if (rootVolumePool == null) { + throw new InvalidParameterValueException("Failed to find root volume storage pool for the user vm:" + vmId); + } + + if (rootVolumePool.isInMaintenance()) { + throw new InvalidParameterValueException("Storage pool for the user vm:" + vmId + " is in maintenance"); + } + + return rootVolumePool.getId(); + } + + @Override + public Storage.StoragePoolType getStoragePoolType(Long poolId) { + StoragePoolVO storagePool = primaryDataStoreDao.findById(poolId); + if (storagePool == null) { + throw new InvalidParameterValueException("storage pool is not found"); + } + + return storagePool.getPoolType(); + } } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java index 965c33228887..1a26697ef25f 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java @@ -69,6 +69,7 @@ import com.cloud.configuration.Config; import com.cloud.exception.AgentUnavailableException; import com.cloud.exception.OperationTimedoutException; +import com.cloud.host.Host; import com.cloud.host.dao.HostDao; import com.cloud.secstorage.CommandExecLogDao; import com.cloud.secstorage.CommandExecLogVO; @@ -388,6 +389,11 @@ public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCa } } + @Override + public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback) { + copyAsync(srcData, destData, callback); + } + private Answer sendToLeastBusyEndpoint(List eps, CopyCommand cmd) { Answer answer = null; EndPoint endPoint = null; diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java index 2e7e13b0846e..35153a109961 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java @@ -23,6 +23,7 @@ import org.apache.cloudstack.storage.to.VolumeObjectTO; import com.cloud.agent.api.VMSnapshotTO; +import com.cloud.storage.Storage; import com.cloud.utils.fsm.NoTransitionException; import com.cloud.vm.snapshot.VMSnapshot; import com.cloud.vm.snapshot.VMSnapshotVO; @@ -35,4 +36,8 @@ public interface VMSnapshotHelper { List getVolumeTOList(Long vmId); VMSnapshotTO getSnapshotWithParents(VMSnapshotVO snapshot); + + Long getStoragePoolForVM(Long vmId); + + Storage.StoragePoolType getStoragePoolType(Long poolId); } diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 77413ad6c2b6..9f50d78b3d27 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -45,6 +45,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; @@ -86,6 +87,7 @@ import com.cloud.event.EventTypes; import com.cloud.event.UsageEventUtils; import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.StorageAccessException; import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; @@ -99,13 +101,16 @@ import com.cloud.storage.DataStoreRole; import com.cloud.storage.RegisterVolumePayload; import com.cloud.storage.ScopeType; +import com.cloud.storage.Storage; import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.Volume; import com.cloud.storage.Volume.State; +import com.cloud.storage.VolumeDetailVO; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.storage.dao.VolumeDao; @@ -166,6 +171,8 @@ public class VolumeServiceImpl implements VolumeService { private ClusterDao clusterDao; @Inject private VolumeDetailsDao _volumeDetailsDao; + @Inject + private TemplateDataFactory tmplFactory; private final static String SNAPSHOT_ID = "SNAPSHOT_ID"; @@ -371,6 +378,14 @@ public AsyncCallFuture expungeVolumeAsync(VolumeInfo volume) { return future; } + public void ensureVolumeIsExpungeReady(long volumeId) { + VolumeVO volume = volDao.findById(volumeId); + if (volume != null && volume.getPodId() != null) { + volume.setPodId(null); + volDao.update(volumeId, volume); + } + } + private boolean volumeExistsOnPrimary(VolumeVO vol) { Long poolId = vol.getPoolId(); @@ -780,6 +795,33 @@ protected Void createVolumeFromBaseImageCallBack(AsyncCallbackDispatcher callback, CreateVolumeFromBaseImageContext context) { + CopyCommandResult result = callback.getResult(); + DataObject vo = context.vo; + DataObject tmplOnPrimary = context.templateOnStore; + VolumeApiResult volResult = new VolumeApiResult((VolumeObject)vo); + + if (result.isSuccess()) { + VolumeVO volume = volDao.findById(vo.getId()); + CopyCmdAnswer answer = (CopyCmdAnswer)result.getAnswer(); + VolumeObjectTO volumeObjectTo = (VolumeObjectTO)answer.getNewData(); + volume.setPath(volumeObjectTo.getPath()); + if (volumeObjectTo.getFormat() != null) { + volume.setFormat(volumeObjectTo.getFormat()); + } + + volDao.update(volume.getId(), volume); + } else { + vo.processEvent(Event.DestroyRequested); + volResult.setResult(result.getResult()); + } + + AsyncCallFuture future = context.getFuture(); + future.complete(volResult); + return null; + } + /** * Creates a template volume on managed storage, which will be used for creating ROOT volumes by cloning. * @@ -861,7 +903,7 @@ private TemplateInfo createManagedTemplateVolume(TemplateInfo srcTemplateInfo, P * @param destHost The host that we will use for the copy */ private void copyTemplateToManagedTemplateVolume(TemplateInfo srcTemplateInfo, TemplateInfo templateOnPrimary, VMTemplateStoragePoolVO templatePoolRef, PrimaryDataStore destPrimaryDataStore, - Host destHost) { + Host destHost) throws StorageAccessException { AsyncCallFuture copyTemplateFuture = new AsyncCallFuture<>(); int storagePoolMaxWaitSeconds = NumbersUtil.parseInt(configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600); long templatePoolRefId = templatePoolRef.getId(); @@ -899,6 +941,7 @@ private void copyTemplateToManagedTemplateVolume(TemplateInfo srcTemplateInfo, T details.put(PrimaryDataStore.MANAGED_STORE_TARGET_ROOT_VOLUME, srcTemplateInfo.getUniqueName()); details.put(PrimaryDataStore.REMOVE_AFTER_COPY, Boolean.TRUE.toString()); details.put(PrimaryDataStore.VOLUME_SIZE, String.valueOf(templateOnPrimary.getSize())); + details.put(StorageManager.STORAGE_POOL_DISK_WAIT.toString(), String.valueOf(StorageManager.STORAGE_POOL_DISK_WAIT.valueIn(destPrimaryDataStore.getId()))); ChapInfo chapInfo = getChapInfo(templateOnPrimary, destPrimaryDataStore); @@ -909,11 +952,15 @@ private void copyTemplateToManagedTemplateVolume(TemplateInfo srcTemplateInfo, T details.put(PrimaryDataStore.CHAP_TARGET_SECRET, chapInfo.getTargetSecret()); } - templateOnPrimary.processEvent(Event.CopyingRequested); - destPrimaryDataStore.setDetails(details); - grantAccess(templateOnPrimary, destHost, destPrimaryDataStore); + try { + grantAccess(templateOnPrimary, destHost, destPrimaryDataStore); + } catch (Exception e) { + throw new StorageAccessException("Unable to grant access to template: " + templateOnPrimary.getId() + " on host: " + destHost.getId()); + } + + templateOnPrimary.processEvent(Event.CopyingRequested); VolumeApiResult result; @@ -941,6 +988,8 @@ private void copyTemplateToManagedTemplateVolume(TemplateInfo srcTemplateInfo, T // something weird happens to the volume (XenServer creates an SR, but the VDI copy can fail). // For now, I just retry the copy. } + } catch (StorageAccessException e) { + throw e; } catch (Throwable e) { s_logger.debug("Failed to create a template on primary storage", e); @@ -1017,6 +1066,94 @@ private void createManagedVolumeCloneTemplateAsync(VolumeInfo volumeInfo, Templa } } + private void createManagedVolumeCopyManagedTemplateAsync(VolumeInfo volumeInfo, PrimaryDataStore destPrimaryDataStore, TemplateInfo srcTemplateOnPrimary, Host destHost, AsyncCallFuture future) throws StorageAccessException { + VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), srcTemplateOnPrimary.getId()); + + if (templatePoolRef == null) { + throw new CloudRuntimeException("Failed to find template " + srcTemplateOnPrimary.getUniqueName() + " in storage pool " + srcTemplateOnPrimary.getId()); + } + + if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { + throw new CloudRuntimeException("Template " + srcTemplateOnPrimary.getUniqueName() + " has not been downloaded to primary storage."); + } + + String volumeDetailKey = "POOL_TEMPLATE_ID_COPY_ON_HOST_" + destHost.getId(); + + try { + try { + grantAccess(srcTemplateOnPrimary, destHost, destPrimaryDataStore); + } catch (Exception e) { + throw new StorageAccessException("Unable to grant access to src template: " + srcTemplateOnPrimary.getId() + " on host: " + destHost.getId()); + } + + _volumeDetailsDao.addDetail(volumeInfo.getId(), volumeDetailKey, String.valueOf(templatePoolRef.getId()), false); + + // Create a volume on managed storage. + AsyncCallFuture createVolumeFuture = createVolumeAsync(volumeInfo, destPrimaryDataStore); + VolumeApiResult createVolumeResult = createVolumeFuture.get(); + + if (createVolumeResult.isFailed()) { + throw new CloudRuntimeException("Creation of a volume failed: " + createVolumeResult.getResult()); + } + + // Refresh the volume info from the DB. + volumeInfo = volFactory.getVolume(volumeInfo.getId(), destPrimaryDataStore); + + CreateVolumeFromBaseImageContext context = new CreateVolumeFromBaseImageContext<>(null, volumeInfo, destPrimaryDataStore, srcTemplateOnPrimary, future, null); + + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + + caller.setCallback(caller.getTarget().createVolumeFromBaseManagedImageCallBack(null, null)); + caller.setContext(context); + + Map details = new HashMap(); + + details.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString()); + details.put(PrimaryDataStore.STORAGE_HOST, destPrimaryDataStore.getHostAddress()); + details.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(destPrimaryDataStore.getPort())); + details.put(PrimaryDataStore.MANAGED_STORE_TARGET, volumeInfo.get_iScsiName()); + details.put(PrimaryDataStore.MANAGED_STORE_TARGET_ROOT_VOLUME, volumeInfo.getName()); + details.put(PrimaryDataStore.VOLUME_SIZE, String.valueOf(volumeInfo.getSize())); + details.put(StorageManager.STORAGE_POOL_DISK_WAIT.toString(), String.valueOf(StorageManager.STORAGE_POOL_DISK_WAIT.valueIn(destPrimaryDataStore.getId()))); + destPrimaryDataStore.setDetails(details); + + grantAccess(volumeInfo, destHost, destPrimaryDataStore); + + try { + motionSrv.copyAsync(srcTemplateOnPrimary, volumeInfo, destHost, caller); + } finally { + revokeAccess(volumeInfo, destHost, destPrimaryDataStore); + } + } catch (StorageAccessException e) { + throw e; + } catch (Throwable e) { + s_logger.debug("Failed to copy managed template on primary storage", e); + String errMsg = e.toString(); + volumeInfo.processEvent(Event.DestroyRequested); + + try { + AsyncCallFuture expungeVolumeFuture = expungeVolumeAsync(volumeInfo); + VolumeApiResult expungeVolumeResult = expungeVolumeFuture.get(); + if (expungeVolumeResult.isFailed()) { + errMsg += " : Failed to expunge a volume that was created"; + } + } catch (Exception ex) { + errMsg += " : " + ex.getMessage(); + } + + VolumeApiResult result = new VolumeApiResult(volumeInfo); + result.setResult(errMsg); + future.complete(result); + } finally { + _volumeDetailsDao.removeDetail(volumeInfo.getId(), volumeDetailKey); + + List volumeDetails = _volumeDetailsDao.findDetails(volumeDetailKey, String.valueOf(templatePoolRef.getId()), false); + if (volumeDetails == null || volumeDetails.isEmpty()) { + revokeAccess(srcTemplateOnPrimary, destHost, destPrimaryDataStore); + } + } + } + private void createManagedVolumeCopyTemplateAsync(VolumeInfo volumeInfo, PrimaryDataStore primaryDataStore, TemplateInfo srcTemplateInfo, Host destHost, AsyncCallFuture future) { try { // Create a volume on managed storage. @@ -1047,6 +1184,7 @@ private void createManagedVolumeCopyTemplateAsync(VolumeInfo volumeInfo, Primary details.put(PrimaryDataStore.MANAGED_STORE_TARGET, volumeInfo.get_iScsiName()); details.put(PrimaryDataStore.MANAGED_STORE_TARGET_ROOT_VOLUME, volumeInfo.getName()); details.put(PrimaryDataStore.VOLUME_SIZE, String.valueOf(volumeInfo.getSize())); + details.put(StorageManager.STORAGE_POOL_DISK_WAIT.toString(), String.valueOf(StorageManager.STORAGE_POOL_DISK_WAIT.valueIn(primaryDataStore.getId()))); ChapInfo chapInfo = getChapInfo(volumeInfo, primaryDataStore); @@ -1092,7 +1230,92 @@ private void createManagedVolumeCopyTemplateAsync(VolumeInfo volumeInfo, Primary } @Override - public AsyncCallFuture createManagedStorageVolumeFromTemplateAsync(VolumeInfo volumeInfo, long destDataStoreId, TemplateInfo srcTemplateInfo, long destHostId) { + public TemplateInfo createManagedStorageTemplate(long srcTemplateId, long destDataStoreId, long destHostId) throws StorageAccessException { + Host destHost = _hostDao.findById(destHostId); + if (destHost == null) { + throw new CloudRuntimeException("Destination host should not be null."); + } + + PrimaryDataStore destPrimaryDataStore = dataStoreMgr.getPrimaryDataStore(destDataStoreId); + + // Check if template exists on the storage pool. If not, downland and copy to managed storage pool + VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destDataStoreId, srcTemplateId); + if (templatePoolRef != null && templatePoolRef.getDownloadState() == Status.DOWNLOADED) { + return tmplFactory.getTemplate(srcTemplateId, destPrimaryDataStore); + } + + TemplateInfo srcTemplateInfo = tmplFactory.getTemplate(srcTemplateId); + + if (srcTemplateInfo == null) { + throw new CloudRuntimeException("Failed to get info of template: " + srcTemplateId); + } + + if (Storage.ImageFormat.ISO.equals(srcTemplateInfo.getFormat())) { + throw new CloudRuntimeException("Unsupported format: " + Storage.ImageFormat.ISO.toString() + " for managed storage template"); + } + + TemplateInfo templateOnPrimary = null; + try { + templateOnPrimary = createManagedTemplateVolume(srcTemplateInfo, destPrimaryDataStore); + if (templateOnPrimary == null) { + throw new CloudRuntimeException("Failed to create template " + srcTemplateInfo.getUniqueName() + " on primary storage: " + destDataStoreId); + } + + templatePoolRef = _tmpltPoolDao.findByPoolTemplate(destPrimaryDataStore.getId(), templateOnPrimary.getId()); + if (templatePoolRef == null) { + throw new CloudRuntimeException("Failed to find template " + srcTemplateInfo.getUniqueName() + " in storage pool " + destPrimaryDataStore.getId()); + } + + if (templatePoolRef.getDownloadState() == Status.NOT_DOWNLOADED) { + // Populate details which will be later read by the storage subsystem. + Map details = new HashMap<>(); + + details.put(PrimaryDataStore.MANAGED, Boolean.TRUE.toString()); + details.put(PrimaryDataStore.STORAGE_HOST, destPrimaryDataStore.getHostAddress()); + details.put(PrimaryDataStore.STORAGE_PORT, String.valueOf(destPrimaryDataStore.getPort())); + details.put(PrimaryDataStore.MANAGED_STORE_TARGET, templateOnPrimary.getInstallPath()); + details.put(PrimaryDataStore.MANAGED_STORE_TARGET_ROOT_VOLUME, srcTemplateInfo.getUniqueName()); + details.put(PrimaryDataStore.REMOVE_AFTER_COPY, Boolean.TRUE.toString()); + details.put(PrimaryDataStore.VOLUME_SIZE, String.valueOf(templateOnPrimary.getSize())); + details.put(StorageManager.STORAGE_POOL_DISK_WAIT.toString(), String.valueOf(StorageManager.STORAGE_POOL_DISK_WAIT.valueIn(destPrimaryDataStore.getId()))); + destPrimaryDataStore.setDetails(details); + + try { + grantAccess(templateOnPrimary, destHost, destPrimaryDataStore); + } catch (Exception e) { + throw new StorageAccessException("Unable to grant access to template: " + templateOnPrimary.getId() + " on host: " + destHost.getId()); + } + + templateOnPrimary.processEvent(Event.CopyingRequested); + + try { + //Download and copy template to the managed volume + TemplateInfo templateOnPrimaryNow = tmplFactory.getReadyBypassedTemplateOnManagedStorage(srcTemplateId, templateOnPrimary, destDataStoreId, destHostId); + if (templateOnPrimaryNow == null) { + s_logger.debug("Failed to prepare ready bypassed template: " + srcTemplateId + " on primary storage: " + templateOnPrimary.getId()); + throw new CloudRuntimeException("Failed to prepare ready bypassed template: " + srcTemplateId + " on primary storage: " + templateOnPrimary.getId()); + } + templateOnPrimary.processEvent(Event.OperationSuccessed); + return templateOnPrimaryNow; + } finally { + revokeAccess(templateOnPrimary, destHost, destPrimaryDataStore); + } + } + return null; + } catch (StorageAccessException e) { + throw e; + } catch (Throwable e) { + s_logger.debug("Failed to create template on managed primary storage", e); + if (templateOnPrimary != null) { + templateOnPrimary.processEvent(Event.OperationFailed); + } + + throw new CloudRuntimeException(e.getMessage()); + } + } + + @Override + public AsyncCallFuture createManagedStorageVolumeFromTemplateAsync(VolumeInfo volumeInfo, long destDataStoreId, TemplateInfo srcTemplateInfo, long destHostId) throws StorageAccessException { PrimaryDataStore destPrimaryDataStore = dataStoreMgr.getPrimaryDataStore(destDataStoreId); Host destHost = _hostDao.findById(destHostId); @@ -1130,10 +1353,16 @@ public AsyncCallFuture createManagedStorageVolumeFromTemplateAs copyTemplateToManagedTemplateVolume(srcTemplateInfo, templateOnPrimary, templatePoolRef, destPrimaryDataStore, destHost); } - // We have a template on primary storage. Clone it to new volume. - s_logger.debug("Creating a clone from template on primary storage " + destDataStoreId); + if (destPrimaryDataStore.getPoolType() != StoragePoolType.PowerFlex) { + // We have a template on primary storage. Clone it to new volume. + s_logger.debug("Creating a clone from template on primary storage " + destDataStoreId); - createManagedVolumeCloneTemplateAsync(volumeInfo, templateOnPrimary, destPrimaryDataStore, future); + createManagedVolumeCloneTemplateAsync(volumeInfo, templateOnPrimary, destPrimaryDataStore, future); + } else { + // We have a template on PowerFlex primary storage. Create new volume and copy to it. + s_logger.debug("Copying the template to the volume on primary storage"); + createManagedVolumeCopyManagedTemplateAsync(volumeInfo, destPrimaryDataStore, templateOnPrimary, destHost, future); + } } else { s_logger.debug("Primary storage does not support cloning or no support for UUID resigning on the host side; copying the template normally"); diff --git a/framework/direct-download/src/main/java/org/apache/cloudstack/framework/agent/direct/download/DirectDownloadService.java b/framework/direct-download/src/main/java/org/apache/cloudstack/framework/agent/direct/download/DirectDownloadService.java index ed7bbd76a351..983f935a2fa9 100644 --- a/framework/direct-download/src/main/java/org/apache/cloudstack/framework/agent/direct/download/DirectDownloadService.java +++ b/framework/direct-download/src/main/java/org/apache/cloudstack/framework/agent/direct/download/DirectDownloadService.java @@ -33,4 +33,9 @@ public interface DirectDownloadService { * Upload a stored certificate on database with id 'certificateId' to host with id 'hostId' */ boolean uploadCertificate(long certificateId, long hostId); + + /** + * Sync the stored certificates to host with id 'hostId' + */ + boolean syncCertificatesToHost(long hostId, long zoneId); } diff --git a/plugins/hypervisors/kvm/pom.xml b/plugins/hypervisors/kvm/pom.xml index 046cb6cddeac..9c48e2f8f65c 100644 --- a/plugins/hypervisors/kvm/pom.xml +++ b/plugins/hypervisors/kvm/pom.xml @@ -72,6 +72,12 @@ jna-platform ${cs.jna.version} + + org.apache.cloudstack + cloud-plugin-storage-volume-scaleio + ${project.version} + compile + diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index a481e6808a3f..8a22be8638aa 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -46,9 +46,7 @@ import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; -import com.cloud.hypervisor.kvm.resource.rolling.maintenance.RollingMaintenanceAgentExecutor; -import com.cloud.hypervisor.kvm.resource.rolling.maintenance.RollingMaintenanceExecutor; -import com.cloud.hypervisor.kvm.resource.rolling.maintenance.RollingMaintenanceServiceExecutor; +import org.apache.cloudstack.storage.configdrive.ConfigDrive; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; @@ -89,6 +87,7 @@ import com.cloud.agent.api.PingCommand; import com.cloud.agent.api.PingRoutingCommand; import com.cloud.agent.api.PingRoutingWithNwGroupsCommand; +import com.cloud.agent.api.SecurityGroupRulesCmd; import com.cloud.agent.api.SetupGuestNetworkCommand; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupRoutingCommand; @@ -111,7 +110,6 @@ import com.cloud.agent.resource.virtualnetwork.VRScripts; import com.cloud.agent.resource.virtualnetwork.VirtualRouterDeployer; import com.cloud.agent.resource.virtualnetwork.VirtualRoutingResource; -import com.cloud.agent.api.SecurityGroupRulesCmd; import com.cloud.dc.Vlan; import com.cloud.exception.InternalErrorException; import com.cloud.host.Host.Type; @@ -144,6 +142,9 @@ import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.WatchDogDef; import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.WatchDogDef.WatchDogAction; import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.WatchDogDef.WatchDogModel; +import com.cloud.hypervisor.kvm.resource.rolling.maintenance.RollingMaintenanceAgentExecutor; +import com.cloud.hypervisor.kvm.resource.rolling.maintenance.RollingMaintenanceExecutor; +import com.cloud.hypervisor.kvm.resource.rolling.maintenance.RollingMaintenanceServiceExecutor; import com.cloud.hypervisor.kvm.resource.wrapper.LibvirtRequestWrapper; import com.cloud.hypervisor.kvm.resource.wrapper.LibvirtUtilitiesHelper; import com.cloud.hypervisor.kvm.storage.IscsiStorageCleanupMonitor; @@ -237,6 +238,9 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv public static final String SSHPUBKEYPATH = SSHKEYSPATH + File.separator + "id_rsa.pub.cloud"; public static final String DEFAULTDOMRSSHPORT = "3922"; + public final static String HOST_CACHE_PATH_PARAMETER = "host.cache.location"; + public final static String CONFIG_DIR = "config"; + public static final String BASH_SCRIPT_PATH = "/bin/bash"; private String _mountPoint = "/mnt"; @@ -516,6 +520,14 @@ public String getDirectDownloadTemporaryDownloadPath() { return directDownloadTemporaryDownloadPath; } + public String getConfigPath() { + return getCachePath() + "/" + CONFIG_DIR; + } + + public String getCachePath() { + return cachePath; + } + public String getResizeVolumePath() { return _resizeVolumePath; } @@ -568,6 +580,7 @@ protected enum BridgeType { protected boolean dpdkSupport = false; protected String dpdkOvsPath; protected String directDownloadTemporaryDownloadPath; + protected String cachePath; private String getEndIpFromStartIp(final String startIp, final int numIps) { final String[] tokens = startIp.split("[.]"); @@ -619,6 +632,10 @@ private String getDefaultDirectDownloadTemporaryPath() { return "/var/lib/libvirt/images"; } + private String getDefaultCachePath() { + return "/var/cache/cloud"; + } + protected String getDefaultNetworkScriptsDir() { return "scripts/vm/network/vnet"; } @@ -708,6 +725,11 @@ public boolean configure(final String name, final Map params) th directDownloadTemporaryDownloadPath = getDefaultDirectDownloadTemporaryPath(); } + cachePath = (String) params.get(HOST_CACHE_PATH_PARAMETER); + if (org.apache.commons.lang.StringUtils.isBlank(cachePath)) { + cachePath = getDefaultCachePath(); + } + params.put("domr.scripts.dir", domrScriptsDir); _virtRouterResource = new VirtualRoutingResource(this); @@ -2466,11 +2488,21 @@ public void createVifs(final VirtualMachineTO vmSpec, final LibvirtVMDef vm) thr } public String getVolumePath(final Connect conn, final DiskTO volume) throws LibvirtException, URISyntaxException { + return getVolumePath(conn, volume, false); + } + + public String getVolumePath(final Connect conn, final DiskTO volume, boolean diskOnHostCache) throws LibvirtException, URISyntaxException { final DataTO data = volume.getData(); final DataStoreTO store = data.getDataStore(); if (volume.getType() == Volume.Type.ISO && data.getPath() != null && (store instanceof NfsTO || store instanceof PrimaryDataStoreTO && data instanceof TemplateObjectTO && !((TemplateObjectTO) data).isDirectDownload())) { + + if (data.getPath().startsWith(ConfigDrive.CONFIGDRIVEDIR) && diskOnHostCache) { + String configDrivePath = getConfigPath() + "/" + data.getPath(); + return configDrivePath; + } + final String isoPath = store.getUrl().split("\\?")[0] + File.separator + data.getPath(); final int index = isoPath.lastIndexOf("/"); final String path = isoPath.substring(0, index); @@ -2508,7 +2540,11 @@ public int compare(final DiskTO arg0, final DiskTO arg1) { if (volume.getType() == Volume.Type.ISO && data.getPath() != null) { DataStoreTO dataStore = data.getDataStore(); String dataStoreUrl = null; - if (dataStore instanceof NfsTO) { + if (data.getPath().startsWith(ConfigDrive.CONFIGDRIVEDIR) && vmSpec.isConfigDriveOnHostCache() && data instanceof TemplateObjectTO) { + String configDrivePath = getConfigPath() + "/" + data.getPath(); + physicalDisk = new KVMPhysicalDisk(configDrivePath, ((TemplateObjectTO) data).getUuid(), null); + physicalDisk.setFormat(PhysicalDiskFormat.FILE); + } else if (dataStore instanceof NfsTO) { NfsTO nfsStore = (NfsTO)data.getDataStore(); dataStoreUrl = nfsStore.getUrl(); physicalDisk = getPhysicalDiskFromNfsStore(dataStoreUrl, data); @@ -2587,6 +2623,8 @@ public int compare(final DiskTO arg0, final DiskTO arg1) { */ disk.defNetworkBasedDisk(physicalDisk.getPath().replace("rbd:", ""), pool.getSourceHost(), pool.getSourcePort(), pool.getAuthUserName(), pool.getUuid(), devId, diskBusType, DiskProtocol.RBD, DiskDef.DiskFmtType.RAW); + } else if (pool.getType() == StoragePoolType.PowerFlex) { + disk.defBlockBasedDisk(physicalDisk.getPath(), devId, diskBusTypeData); } else if (pool.getType() == StoragePoolType.Gluster) { final String mountpoint = pool.getLocalPath(); final String path = physicalDisk.getPath(); @@ -2670,7 +2708,6 @@ public int compare(final DiskTO arg0, final DiskTO arg1) { } } } - } private KVMPhysicalDisk getPhysicalDiskPrimaryStore(PrimaryDataStoreTO primaryDataStoreTO, DataTO data) { @@ -2832,6 +2869,8 @@ public synchronized String attachOrDetachDisk(final Connect conn, if (attachingPool.getType() == StoragePoolType.RBD) { diskdef.defNetworkBasedDisk(attachingDisk.getPath(), attachingPool.getSourceHost(), attachingPool.getSourcePort(), attachingPool.getAuthUserName(), attachingPool.getUuid(), devId, busT, DiskProtocol.RBD, DiskDef.DiskFmtType.RAW); + } else if (attachingPool.getType() == StoragePoolType.PowerFlex) { + diskdef.defBlockBasedDisk(attachingDisk.getPath(), devId, busT); } else if (attachingPool.getType() == StoragePoolType.Gluster) { diskdef.defNetworkBasedDisk(attachingDisk.getPath(), attachingPool.getSourceHost(), attachingPool.getSourcePort(), null, null, devId, busT, DiskProtocol.GLUSTER, DiskDef.DiskFmtType.QCOW2); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolDef.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolDef.java index 56519aed3a41..1bdf2db8c4f4 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolDef.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolDef.java @@ -18,7 +18,7 @@ public class LibvirtStoragePoolDef { public enum PoolType { - ISCSI("iscsi"), NETFS("netfs"), LOGICAL("logical"), DIR("dir"), RBD("rbd"), GLUSTERFS("glusterfs"); + ISCSI("iscsi"), NETFS("netfs"), LOGICAL("logical"), DIR("dir"), RBD("rbd"), GLUSTERFS("glusterfs"), POWERFLEX("powerflex"); String _poolType; PoolType(String poolType) { @@ -178,7 +178,7 @@ public String toString() { storagePoolBuilder.append("'/>\n"); storagePoolBuilder.append("\n"); } - if (_poolType != PoolType.RBD) { + if (_poolType != PoolType.RBD && _poolType != PoolType.POWERFLEX) { storagePoolBuilder.append("\n"); storagePoolBuilder.append("" + _targetPath + "\n"); storagePoolBuilder.append("\n"); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolXMLParser.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolXMLParser.java index 239cc3dca47d..ed3220f9a1e0 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolXMLParser.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtStoragePoolXMLParser.java @@ -54,7 +54,7 @@ public LibvirtStoragePoolDef parseStoragePoolXML(String poolXML) { String host = getAttrValue("host", "name", source); String format = getAttrValue("format", "type", source); - if (type.equalsIgnoreCase("rbd")) { + if (type.equalsIgnoreCase("rbd") || type.equalsIgnoreCase("powerflex")) { int port = Integer.parseInt(getAttrValue("host", "port", source)); String pool = getTagValue("name", source); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckUrlCommand.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckUrlCommand.java index efc009037b9c..2618f20fae11 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckUrlCommand.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtCheckUrlCommand.java @@ -18,13 +18,15 @@ // package com.cloud.hypervisor.kvm.resource.wrapper; +import org.apache.cloudstack.agent.directdownload.CheckUrlAnswer; +import org.apache.cloudstack.agent.directdownload.CheckUrlCommand; +import org.apache.log4j.Logger; + import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; import com.cloud.resource.CommandWrapper; import com.cloud.resource.ResourceWrapper; import com.cloud.utils.UriUtils; -import org.apache.cloudstack.agent.directdownload.CheckUrlAnswer; -import org.apache.cloudstack.agent.directdownload.CheckUrlCommand; -import org.apache.log4j.Logger; +import com.cloud.utils.storage.QCOW2Utils; @ResourceWrapper(handles = CheckUrlCommand.class) public class LibvirtCheckUrlCommand extends CommandWrapper { @@ -39,7 +41,12 @@ public CheckUrlAnswer execute(CheckUrlCommand cmd, LibvirtComputingResource serv Long remoteSize = null; try { UriUtils.checkUrlExistence(url); - remoteSize = UriUtils.getRemoteSize(url); + + if ("qcow2".equalsIgnoreCase(cmd.getFormat())) { + remoteSize = QCOW2Utils.getVirtualSize(url); + } else { + remoteSize = UriUtils.getRemoteSize(url); + } } catch (IllegalArgumentException e) { s_logger.warn(e.getMessage()); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVolumeStatsCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVolumeStatsCommandWrapper.java index 00bdfcd49d73..a2f50ac6555f 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVolumeStatsCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetVolumeStatsCommandWrapper.java @@ -50,7 +50,12 @@ public Answer execute(final GetVolumeStatsCommand cmd, final LibvirtComputingRes StoragePoolType poolType = cmd.getPoolType(); HashMap statEntry = new HashMap(); for (String volumeUuid : cmd.getVolumeUuids()) { - statEntry.put(volumeUuid, getVolumeStat(libvirtComputingResource, conn, volumeUuid, storeUuid, poolType)); + VolumeStatsEntry volumeStatsEntry = getVolumeStat(libvirtComputingResource, conn, volumeUuid, storeUuid, poolType); + if (volumeStatsEntry == null) { + String msg = "Can't get disk stats as pool or disk details unavailable for volume: " + volumeUuid + " on the storage pool: " + storeUuid; + return new GetVolumeStatsAnswer(cmd, msg, null); + } + statEntry.put(volumeUuid, volumeStatsEntry); } return new GetVolumeStatsAnswer(cmd, "", statEntry); } catch (LibvirtException | CloudRuntimeException e) { @@ -58,10 +63,17 @@ public Answer execute(final GetVolumeStatsCommand cmd, final LibvirtComputingRes } } - private VolumeStatsEntry getVolumeStat(final LibvirtComputingResource libvirtComputingResource, final Connect conn, final String volumeUuid, final String storeUuid, final StoragePoolType poolType) throws LibvirtException { KVMStoragePool sourceKVMPool = libvirtComputingResource.getStoragePoolMgr().getStoragePool(poolType, storeUuid); + if (sourceKVMPool == null) { + return null; + } + KVMPhysicalDisk sourceKVMVolume = sourceKVMPool.getPhysicalDisk(volumeUuid); + if (sourceKVMVolume == null) { + return null; + } + return new VolumeStatsEntry(volumeUuid, sourceKVMVolume.getSize(), sourceKVMVolume.getVirtualSize()); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtHandleConfigDriveCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtHandleConfigDriveCommandWrapper.java index 6baae85e2214..6067150df0f4 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtHandleConfigDriveCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtHandleConfigDriveCommandWrapper.java @@ -24,16 +24,21 @@ import java.nio.file.Paths; import org.apache.cloudstack.storage.configdrive.ConfigDriveBuilder; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; +import com.cloud.agent.api.HandleConfigDriveIsoAnswer; import com.cloud.agent.api.HandleConfigDriveIsoCommand; +import com.cloud.agent.api.to.DataStoreTO; import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; import com.cloud.hypervisor.kvm.storage.KVMStoragePool; import com.cloud.hypervisor.kvm.storage.KVMStoragePoolManager; +import com.cloud.network.element.NetworkElement; import com.cloud.resource.CommandWrapper; import com.cloud.resource.ResourceWrapper; import com.cloud.storage.Storage; +import com.cloud.utils.exception.CloudRuntimeException; @ResourceWrapper(handles = HandleConfigDriveIsoCommand.class) public final class LibvirtHandleConfigDriveCommandWrapper extends CommandWrapper { @@ -41,38 +46,103 @@ public final class LibvirtHandleConfigDriveCommandWrapper extends CommandWrapper @Override public Answer execute(final HandleConfigDriveIsoCommand command, final LibvirtComputingResource libvirtComputingResource) { - final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr(); - final KVMStoragePool pool = storagePoolMgr.getStoragePool(Storage.StoragePoolType.NetworkFilesystem, command.getDestStore().getUuid()); - if (pool == null) { - return new Answer(command, false, "Pool not found, config drive for KVM is only supported for NFS"); - } + String mountPoint = null; + + try { + if (command.isCreate()) { + LOG.debug("Creating config drive: " + command.getIsoFile()); + + NetworkElement.Location location = NetworkElement.Location.PRIMARY; + if (command.isHostCachePreferred()) { + LOG.debug("Using the KVM host for config drive"); + mountPoint = libvirtComputingResource.getConfigPath(); + location = NetworkElement.Location.HOST; + } else { + final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr(); + KVMStoragePool pool = null; + String poolUuid = null; + Storage.StoragePoolType poolType = null; + DataStoreTO dataStoreTO = command.getDestStore(); + if (dataStoreTO != null) { + if (dataStoreTO instanceof PrimaryDataStoreTO) { + PrimaryDataStoreTO primaryDataStoreTO = (PrimaryDataStoreTO) dataStoreTO; + poolType = primaryDataStoreTO.getPoolType(); + } else { + poolType = Storage.StoragePoolType.NetworkFilesystem; + } + poolUuid = command.getDestStore().getUuid(); + pool = storagePoolMgr.getStoragePool(poolType, poolUuid); + } + + if (pool == null || poolType == null) { + return new HandleConfigDriveIsoAnswer(command, "Unable to create config drive, Pool " + (poolUuid != null ? poolUuid : "") + " not found"); + } + + if (pool.supportsConfigDriveIso()) { + LOG.debug("Using the pool: " + poolUuid + " for config drive"); + mountPoint = pool.getLocalPath(); + } else if (command.getUseHostCacheOnUnsupportedPool()) { + LOG.debug("Config drive for KVM is not supported for pool type: " + poolType.toString() + ", using the KVM host"); + mountPoint = libvirtComputingResource.getConfigPath(); + location = NetworkElement.Location.HOST; + } else { + LOG.debug("Config drive for KVM is not supported for pool type: " + poolType.toString()); + return new HandleConfigDriveIsoAnswer(command, "Config drive for KVM is not supported for pool type: " + poolType.toString()); + } + } + + Path isoPath = Paths.get(mountPoint, command.getIsoFile()); + File isoFile = new File(mountPoint, command.getIsoFile()); + + if (command.getIsoData() == null) { + return new HandleConfigDriveIsoAnswer(command, "Invalid config drive ISO data received"); + } + if (isoFile.exists()) { + LOG.debug("An old config drive iso already exists"); + } - final String mountPoint = pool.getLocalPath(); - final Path isoPath = Paths.get(mountPoint, command.getIsoFile()); - final File isoFile = new File(mountPoint, command.getIsoFile()); - if (command.isCreate()) { - LOG.debug("Creating config drive: " + command.getIsoFile()); - if (command.getIsoData() == null) { - return new Answer(command, false, "Invalid config drive ISO data received"); - } - if (isoFile.exists()) { - LOG.debug("An old config drive iso already exists"); - } - try { Files.createDirectories(isoPath.getParent()); ConfigDriveBuilder.base64StringToFile(command.getIsoData(), mountPoint, command.getIsoFile()); - } catch (IOException e) { - return new Answer(command, false, "Failed due to exception: " + e.getMessage()); - } - } else { - try { - Files.deleteIfExists(isoPath); - } catch (IOException e) { - LOG.warn("Failed to delete config drive: " + isoPath.toAbsolutePath().toString()); - return new Answer(command, false, "Failed due to exception: " + e.getMessage()); + + return new HandleConfigDriveIsoAnswer(command, location); + } else { + LOG.debug("Deleting config drive: " + command.getIsoFile()); + Path configDrivePath = null; + + if (command.isHostCachePreferred()) { + // Check and delete config drive in host storage if exists + mountPoint = libvirtComputingResource.getConfigPath(); + configDrivePath = Paths.get(mountPoint, command.getIsoFile()); + Files.deleteIfExists(configDrivePath); + } else { + final KVMStoragePoolManager storagePoolMgr = libvirtComputingResource.getStoragePoolMgr(); + KVMStoragePool pool = null; + DataStoreTO dataStoreTO = command.getDestStore(); + if (dataStoreTO != null) { + if (dataStoreTO instanceof PrimaryDataStoreTO) { + PrimaryDataStoreTO primaryDataStoreTO = (PrimaryDataStoreTO) dataStoreTO; + Storage.StoragePoolType poolType = primaryDataStoreTO.getPoolType(); + pool = storagePoolMgr.getStoragePool(poolType, command.getDestStore().getUuid()); + } else { + pool = storagePoolMgr.getStoragePool(Storage.StoragePoolType.NetworkFilesystem, command.getDestStore().getUuid()); + } + } + + if (pool != null && pool.supportsConfigDriveIso()) { + mountPoint = pool.getLocalPath(); + configDrivePath = Paths.get(mountPoint, command.getIsoFile()); + Files.deleteIfExists(configDrivePath); + } + } + + return new HandleConfigDriveIsoAnswer(command); } + } catch (final IOException e) { + LOG.debug("Failed to handle config drive due to " + e.getMessage(), e); + return new HandleConfigDriveIsoAnswer(command, "Failed due to exception: " + e.getMessage()); + } catch (final CloudRuntimeException e) { + LOG.debug("Failed to handle config drive due to " + e.getMessage(), e); + return new HandleConfigDriveIsoAnswer(command, "Failed due to exception: " + e.toString()); } - - return new Answer(command); } } \ No newline at end of file diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java index f3f50aa61f06..38cd9958d7c9 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java @@ -19,11 +19,22 @@ package com.cloud.hypervisor.kvm.resource.wrapper; +import java.net.URISyntaxException; +import java.util.HashMap; +import java.util.Map; + +import org.apache.cloudstack.storage.configdrive.ConfigDrive; +import org.apache.commons.collections.MapUtils; +import org.apache.log4j.Logger; +import org.libvirt.Connect; +import org.libvirt.LibvirtException; + import com.cloud.agent.api.Answer; import com.cloud.agent.api.PrepareForMigrationAnswer; import com.cloud.agent.api.PrepareForMigrationCommand; -import com.cloud.agent.api.to.DpdkTO; +import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.DiskTO; +import com.cloud.agent.api.to.DpdkTO; import com.cloud.agent.api.to.NicTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.exception.InternalErrorException; @@ -36,14 +47,6 @@ import com.cloud.storage.Volume; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; -import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; -import org.libvirt.Connect; -import org.libvirt.LibvirtException; - -import java.net.URISyntaxException; -import java.util.HashMap; -import java.util.Map; @ResourceWrapper(handles = PrepareForMigrationCommand.class) public final class LibvirtPrepareForMigrationCommandWrapper extends CommandWrapper { @@ -86,7 +89,12 @@ public Answer execute(final PrepareForMigrationCommand command, final LibvirtCom final DiskTO[] volumes = vm.getDisks(); for (final DiskTO volume : volumes) { if (volume.getType() == Volume.Type.ISO) { - libvirtComputingResource.getVolumePath(conn, volume); + final DataTO data = volume.getData(); + if (data != null && data.getPath() != null && data.getPath().startsWith(ConfigDrive.CONFIGDRIVEDIR)) { + libvirtComputingResource.getVolumePath(conn, volume, vm.isConfigDriveOnHostCache()); + } else { + libvirtComputingResource.getVolumePath(conn, volume); + } } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java index 0418dbbb0000..7684789c3d2a 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java @@ -330,6 +330,12 @@ public boolean disconnectPhysicalDisk(String volumeUuid, KVMStoragePool pool) { @Override public boolean disconnectPhysicalDisk(Map volumeToDisconnect) { + String poolType = volumeToDisconnect.get(DiskTO.PROTOCOL_TYPE); + // Unsupported pool types + if (poolType != null && poolType.equalsIgnoreCase(StoragePoolType.PowerFlex.toString())) { + return false; + } + String host = volumeToDisconnect.get(DiskTO.STORAGE_HOST); String port = volumeToDisconnect.get(DiskTO.STORAGE_PORT); String path = volumeToDisconnect.get(DiskTO.IQN); @@ -447,7 +453,7 @@ public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, S } @Override - public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, KVMStoragePool destPool, boolean isIso) { + public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) { return null; } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStoragePool.java index 865dfab58ff5..8e4af764cd60 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStoragePool.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStoragePool.java @@ -19,9 +19,9 @@ import java.util.List; import java.util.Map; -import com.cloud.storage.Storage; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import com.cloud.storage.Storage; import com.cloud.storage.Storage.StoragePoolType; public class IscsiAdmStoragePool implements KVMStoragePool { @@ -165,4 +165,9 @@ public String getSourceDir() { public String getLocalPath() { return _localPath; } + + @Override + public boolean supportsConfigDriveIso() { + return false; + } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java index be7a8b05184a..46d78e5f6b3a 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java @@ -19,9 +19,9 @@ import java.util.List; import java.util.Map; -import com.cloud.storage.Storage; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import com.cloud.storage.Storage; import com.cloud.storage.Storage.StoragePoolType; public interface KVMStoragePool { @@ -70,4 +70,6 @@ public interface KVMStoragePool { PhysicalDiskFormat getDefaultFormat(); public boolean createFolder(String path); + + public boolean supportsConfigDriveIso(); } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java index 544c47f07e57..e747093ec671 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java @@ -22,15 +22,15 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.UUID; import java.util.Set; +import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; -import org.apache.log4j.Logger; - import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import org.apache.log4j.Logger; +import org.reflections.Reflections; import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.api.to.VirtualMachineTO; @@ -44,8 +44,6 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine; -import org.reflections.Reflections; - public class KVMStoragePoolManager { private static final Logger s_logger = Logger.getLogger(KVMStoragePoolManager.class); @@ -100,6 +98,7 @@ public KVMStoragePoolManager(StorageLayer storagelayer, KVMHAMonitor monitor) { // add other storage adaptors here // this._storageMapper.put("newadaptor", new NewStorageAdaptor(storagelayer)); this._storageMapper.put(StoragePoolType.ManagedNFS.toString(), new ManagedNfsStorageAdaptor(storagelayer)); + this._storageMapper.put(StoragePoolType.PowerFlex.toString(), new ScaleIOStorageAdaptor(storagelayer)); // add any adaptors that wish to register themselves via annotation Reflections reflections = new Reflections("com.cloud.hypervisor.kvm.storage"); @@ -253,7 +252,7 @@ public KVMStoragePool getStoragePool(StoragePoolType type, String uuid, boolean if (info != null) { pool = createStoragePool(info.name, info.host, info.port, info.path, info.userInfo, info.poolType, info.type); } else { - throw new CloudRuntimeException("Could not fetch storage pool " + uuid + " from libvirt"); + throw new CloudRuntimeException("Could not fetch storage pool " + uuid + " from libvirt due to " + e.getMessage()); } } return pool; @@ -286,36 +285,38 @@ public KVMStoragePool getStoragePoolByURI(String uri) { public KVMPhysicalDisk getPhysicalDisk(StoragePoolType type, String poolUuid, String volName) { int cnt = 0; - int retries = 10; + int retries = 100; KVMPhysicalDisk vol = null; //harden get volume, try cnt times to get volume, in case volume is created on other host + //Poll more frequently and return immediately once disk is found String errMsg = ""; while (cnt < retries) { try { KVMStoragePool pool = getStoragePool(type, poolUuid); vol = pool.getPhysicalDisk(volName); if (vol != null) { - break; + return vol; } } catch (Exception e) { - s_logger.debug("Failed to find volume:" + volName + " due to" + e.toString() + ", retry:" + cnt); + s_logger.debug("Failed to find volume:" + volName + " due to " + e.toString() + ", retry:" + cnt); errMsg = e.toString(); } try { - Thread.sleep(30000); + Thread.sleep(3000); } catch (InterruptedException e) { s_logger.debug("[ignored] interupted while trying to get storage pool."); } cnt++; } + KVMStoragePool pool = getStoragePool(type, poolUuid); + vol = pool.getPhysicalDisk(volName); if (vol == null) { throw new CloudRuntimeException(errMsg); } else { return vol; } - } public KVMStoragePool createStoragePool(String name, String host, int port, String path, String userInfo, StoragePoolType type) { @@ -377,6 +378,10 @@ public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String n return adaptor.createDiskFromTemplate(template, name, PhysicalDiskFormat.DIR, provisioningType, size, destPool, timeout); + } else if (destPool.getType() == StoragePoolType.PowerFlex) { + return adaptor.createDiskFromTemplate(template, name, + PhysicalDiskFormat.RAW, provisioningType, + size, destPool, timeout); } else { return adaptor.createDiskFromTemplate(template, name, PhysicalDiskFormat.QCOW2, provisioningType, @@ -405,9 +410,9 @@ public KVMPhysicalDisk createDiskWithTemplateBacking(KVMPhysicalDisk template, S return adaptor.createDiskFromTemplateBacking(template, name, format, size, destPool, timeout); } - public KVMPhysicalDisk createPhysicalDiskFromDirectDownloadTemplate(String templateFilePath, KVMStoragePool destPool, boolean isIso) { + public KVMPhysicalDisk createPhysicalDiskFromDirectDownloadTemplate(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) { StorageAdaptor adaptor = getStorageAdaptor(destPool.getType()); - return adaptor.createTemplateFromDirectDownloadFile(templateFilePath, destPool, isIso); + return adaptor.createTemplateFromDirectDownloadFile(templateFilePath, destTemplatePath, destPool, format, timeout); } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index b792ff22204d..b8cd548c59ae 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -37,7 +37,6 @@ import javax.naming.ConfigurationException; -import com.cloud.utils.Pair; import org.apache.cloudstack.agent.directdownload.DirectDownloadAnswer; import org.apache.cloudstack.agent.directdownload.DirectDownloadCommand; import org.apache.cloudstack.agent.directdownload.HttpDirectDownloadCommand; @@ -116,6 +115,7 @@ import com.cloud.storage.template.QCOW2Processor; import com.cloud.storage.template.TemplateLocation; import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; import com.cloud.utils.storage.S3.S3Utils; @@ -254,11 +254,15 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { String path = details != null ? details.get("managedStoreTarget") : null; - storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details); + if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { + s_logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + } primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, path != null ? path : destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds()); - storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path); + if (!storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path)) { + s_logger.warn("Failed to disconnect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + } } else { primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, UUID.randomUUID().toString(), primaryPool, cmd.getWaitInMillSeconds()); } @@ -272,7 +276,7 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { final TemplateObjectTO newTemplate = new TemplateObjectTO(); newTemplate.setPath(primaryVol.getName()); newTemplate.setSize(primaryVol.getSize()); - if (primaryPool.getType() == StoragePoolType.RBD) { + if (primaryPool.getType() == StoragePoolType.RBD || primaryPool.getType() == StoragePoolType.PowerFlex) { newTemplate.setFormat(ImageFormat.RAW); } else { newTemplate.setFormat(ImageFormat.QCOW2); @@ -380,6 +384,27 @@ public Answer cloneVolumeFromBaseTemplate(final CopyCommand cmd) { if (primaryPool.getType() == StoragePoolType.CLVM) { templatePath = ((NfsTO)imageStore).getUrl() + File.separator + templatePath; vol = templateToPrimaryDownload(templatePath, primaryPool, volume.getUuid(), volume.getSize(), cmd.getWaitInMillSeconds()); + } if (primaryPool.getType() == StoragePoolType.PowerFlex) { + Map details = primaryStore.getDetails(); + String path = details != null ? details.get("managedStoreTarget") : null; + + if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath, details)) { + s_logger.warn("Failed to connect base template volume at path: " + templatePath + ", in storage pool id: " + primaryStore.getUuid()); + } + + BaseVol = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath); + if (BaseVol == null) { + s_logger.debug("Failed to get the base template volume: " + templatePath); + throw new CloudRuntimeException(""); + } + + if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { + s_logger.warn("Failed to connect new volume at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + } + + vol = storagePoolMgr.copyPhysicalDisk(BaseVol, path != null ? path : volume.getUuid(), primaryPool, cmd.getWaitInMillSeconds()); + + storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path); } else { if (templatePath.contains("/mnt")) { //upgrade issue, if the path contains path, need to extract the volume uuid from path @@ -1343,6 +1368,9 @@ public Answer attachVolume(final AttachCommand cmd) { } catch (final InternalErrorException e) { s_logger.debug("Failed to attach volume: " + vol.getPath() + ", due to ", e); return new AttachAnswer(e.toString()); + } catch (final CloudRuntimeException e) { + s_logger.debug("Failed to attach volume: " + vol.getPath() + ", due to ", e); + return new AttachAnswer(e.toString()); } } @@ -1374,6 +1402,9 @@ public Answer dettachVolume(final DettachCommand cmd) { } catch (final InternalErrorException e) { s_logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e); return new DettachAnswer(e.toString()); + } catch (final CloudRuntimeException e) { + s_logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e); + return new DettachAnswer(e.toString()); } } @@ -1727,6 +1758,7 @@ public Answer handleDownloadTemplateToPrimaryStorage(DirectDownloadCommand cmd) final PrimaryDataStoreTO pool = cmd.getDestPool(); DirectTemplateDownloader downloader; KVMPhysicalDisk template; + KVMStoragePool destPool = null; try { s_logger.debug("Verifying temporary location for downloading the template exists on the host"); @@ -1745,7 +1777,7 @@ public Answer handleDownloadTemplateToPrimaryStorage(DirectDownloadCommand cmd) return new DirectDownloadAnswer(false, msg, true); } - KVMStoragePool destPool = storagePoolMgr.getStoragePool(pool.getPoolType(), pool.getUuid()); + destPool = storagePoolMgr.getStoragePool(pool.getPoolType(), pool.getUuid()); downloader = getDirectTemplateDownloaderFromCommand(cmd, destPool, temporaryDownloadPath); s_logger.debug("Trying to download template"); Pair result = downloader.downloadTemplate(); @@ -1758,7 +1790,19 @@ public Answer handleDownloadTemplateToPrimaryStorage(DirectDownloadCommand cmd) s_logger.warn("Couldn't validate template checksum"); return new DirectDownloadAnswer(false, "Checksum validation failed", false); } - template = storagePoolMgr.createPhysicalDiskFromDirectDownloadTemplate(tempFilePath, destPool, cmd.isIso()); + + final TemplateObjectTO destTemplate = cmd.getDestData(); + String destTemplatePath = (destTemplate != null) ? destTemplate.getPath() : null; + + if (!storagePoolMgr.connectPhysicalDisk(pool.getPoolType(), pool.getUuid(), destTemplatePath, null)) { + s_logger.warn("Unable to connect physical disk at path: " + destTemplatePath + ", in storage pool id: " + pool.getUuid()); + } + + template = storagePoolMgr.createPhysicalDiskFromDirectDownloadTemplate(tempFilePath, destTemplatePath, destPool, cmd.getFormat(), cmd.getWaitInMillSeconds()); + + if (!storagePoolMgr.disconnectPhysicalDisk(pool.getPoolType(), pool.getUuid(), destTemplatePath)) { + s_logger.warn("Unable to disconnect physical disk at path: " + destTemplatePath + ", in storage pool id: " + pool.getUuid()); + } } catch (CloudRuntimeException e) { s_logger.warn("Error downloading template " + cmd.getTemplateId() + " due to: " + e.getMessage()); return new DirectDownloadAnswer(false, "Unable to download template: " + e.getMessage(), true); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java index f9c627b82b45..630b98855149 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java @@ -24,6 +24,10 @@ import java.util.Map; import java.util.UUID; +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; import org.apache.commons.codec.binary.Base64; import org.apache.log4j.Logger; import org.libvirt.Connect; @@ -42,12 +46,6 @@ import com.ceph.rbd.RbdImage; import com.ceph.rbd.jna.RbdImageInfo; import com.ceph.rbd.jna.RbdSnapInfo; - -import org.apache.cloudstack.utils.qemu.QemuImg; -import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; -import org.apache.cloudstack.utils.qemu.QemuImgException; -import org.apache.cloudstack.utils.qemu.QemuImgFile; - import com.cloud.exception.InternalErrorException; import com.cloud.hypervisor.kvm.resource.LibvirtConnection; import com.cloud.hypervisor.kvm.resource.LibvirtSecretDef; @@ -160,20 +158,20 @@ private void extractDownloadedTemplate(String downloadedTemplateFile, KVMStorage } @Override - public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, KVMStoragePool destPool, boolean isIso) { + public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) { File sourceFile = new File(templateFilePath); if (!sourceFile.exists()) { throw new CloudRuntimeException("Direct download template file " + sourceFile + " does not exist on this host"); } String templateUuid = UUID.randomUUID().toString(); - if (isIso) { + if (Storage.ImageFormat.ISO.equals(format)) { templateUuid += ".iso"; } String destinationFile = destPool.getLocalPath() + File.separator + templateUuid; if (destPool.getType() == StoragePoolType.NetworkFilesystem || destPool.getType() == StoragePoolType.Filesystem || destPool.getType() == StoragePoolType.SharedMountPoint) { - if (!isIso && isTemplateExtractable(templateFilePath)) { + if (!Storage.ImageFormat.ISO.equals(format) && isTemplateExtractable(templateFilePath)) { extractDownloadedTemplate(templateFilePath, destPool, destinationFile); } else { Script.runSimpleBashScript("mv " + templateFilePath + " " + destinationFile); @@ -451,11 +449,13 @@ public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) { type = StoragePoolType.CLVM; } else if (spd.getPoolType() == LibvirtStoragePoolDef.PoolType.GLUSTERFS) { type = StoragePoolType.Gluster; + } else if (spd.getPoolType() == LibvirtStoragePoolDef.PoolType.POWERFLEX) { + type = StoragePoolType.PowerFlex; } LibvirtStoragePool pool = new LibvirtStoragePool(uuid, storage.getName(), type, this, storage); - if (pool.getType() != StoragePoolType.RBD) + if (pool.getType() != StoragePoolType.RBD && pool.getType() != StoragePoolType.PowerFlex) pool.setLocalPath(spd.getTargetPath()); else pool.setLocalPath(""); @@ -545,7 +545,6 @@ public KVMPhysicalDisk getPhysicalDisk(String volumeUuid, KVMStoragePool pool) { s_logger.debug("Failed to get physical disk:", e); throw new CloudRuntimeException(e.toString()); } - } @Override @@ -1022,7 +1021,6 @@ public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, } } - return disk; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java index 1b554f7037f7..b2e8decfcb13 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java @@ -45,6 +45,7 @@ public class LibvirtStoragePool implements KVMStoragePool { protected String authSecret; protected String sourceHost; protected int sourcePort; + protected String sourceDir; public LibvirtStoragePool(String uuid, String name, StoragePoolType type, StorageAdaptor adaptor, StoragePool pool) { @@ -56,7 +57,6 @@ public LibvirtStoragePool(String uuid, String name, StoragePoolType type, Storag this.used = 0; this.available = 0; this._pool = pool; - } public void setCapacity(long capacity) { @@ -101,7 +101,7 @@ public String getUuid() { @Override public PhysicalDiskFormat getDefaultFormat() { - if (getStoragePoolType() == StoragePoolType.CLVM || getStoragePoolType() == StoragePoolType.RBD) { + if (getStoragePoolType() == StoragePoolType.CLVM || getStoragePoolType() == StoragePoolType.RBD || getStoragePoolType() == StoragePoolType.PowerFlex) { return PhysicalDiskFormat.RAW; } else { return PhysicalDiskFormat.QCOW2; @@ -271,4 +271,12 @@ public boolean delete() { public boolean createFolder(String path) { return this._storageAdaptor.createFolder(this.uuid, path); } + + @Override + public boolean supportsConfigDriveIso() { + if (this.type == StoragePoolType.NetworkFilesystem) { + return true; + } + return false; + } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java index 1ea4f6262263..6db2f82beb48 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ManagedNfsStorageAdaptor.java @@ -35,6 +35,7 @@ import com.cloud.hypervisor.kvm.resource.LibvirtStoragePoolDef.PoolType; import com.cloud.hypervisor.kvm.resource.LibvirtStorageVolumeDef; import com.cloud.hypervisor.kvm.resource.LibvirtStorageVolumeXMLParser; +import com.cloud.storage.Storage; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.ProvisioningType; import com.cloud.storage.Storage.StoragePoolType; @@ -319,7 +320,7 @@ public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, S } @Override - public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, KVMStoragePool destPool, boolean isIso) { + public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) { return null; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java new file mode 100644 index 000000000000..419fa0cb2d43 --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStorageAdaptor.java @@ -0,0 +1,389 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.kvm.storage; + +import java.io.File; +import java.io.FileFilter; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImgException; +import org.apache.cloudstack.utils.qemu.QemuImgFile; +import org.apache.commons.io.filefilter.WildcardFileFilter; +import org.apache.log4j.Logger; + +import com.cloud.storage.Storage; +import com.cloud.storage.StorageLayer; +import com.cloud.storage.StorageManager; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.OutputInterpreter; +import com.cloud.utils.script.Script; +import com.google.common.base.Strings; + +@StorageAdaptorInfo(storagePoolType= Storage.StoragePoolType.PowerFlex) +public class ScaleIOStorageAdaptor implements StorageAdaptor { + private static final Logger LOGGER = Logger.getLogger(ScaleIOStorageAdaptor.class); + private static final Map MapStorageUuidToStoragePool = new HashMap<>(); + private static final int DEFAULT_DISK_WAIT_TIME_IN_SECS = 60; + private StorageLayer storageLayer; + + public ScaleIOStorageAdaptor(StorageLayer storagelayer) { + storageLayer = storagelayer; + } + + @Override + public KVMStoragePool getStoragePool(String uuid) { + KVMStoragePool pool = MapStorageUuidToStoragePool.get(uuid); + if (pool == null) { + LOGGER.error("Pool: " + uuid + " not found, probably sdc not connected on agent start"); + throw new CloudRuntimeException("Pool: " + uuid + " not found, reconnect sdc and restart agent if sdc not connected on agent start"); + } + + return pool; + } + + @Override + public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) { + return getStoragePool(uuid); + } + + @Override + public KVMPhysicalDisk getPhysicalDisk(String volumeId, KVMStoragePool pool) { + if (Strings.isNullOrEmpty(volumeId) || pool == null) { + LOGGER.error("Unable to get physical disk, unspecified volumeid or pool"); + return null; + } + + try { + String diskFilePath = null; + String systemId = ScaleIOUtil.getSystemIdForVolume(volumeId); + if (!Strings.isNullOrEmpty(systemId) && systemId.length() == ScaleIOUtil.IDENTIFIER_LENGTH) { + // Disk path format: /dev/disk/by-id/emc-vol-- + final String diskFileName = ScaleIOUtil.DISK_NAME_PREFIX + systemId + "-" + volumeId; + diskFilePath = ScaleIOUtil.DISK_PATH + File.separator + diskFileName; + final File diskFile = new File(diskFilePath); + if (!diskFile.exists()) { + LOGGER.debug("Physical disk file: " + diskFilePath + " doesn't exists on the storage pool: " + pool.getUuid()); + return null; + } + } else { + LOGGER.debug("Try with wildcard filter to get the physical disk: " + volumeId + " on the storage pool: " + pool.getUuid()); + final File dir = new File(ScaleIOUtil.DISK_PATH); + final FileFilter fileFilter = new WildcardFileFilter(ScaleIOUtil.DISK_NAME_PREFIX_FILTER + volumeId); + final File[] files = dir.listFiles(fileFilter); + if (files != null && files.length == 1) { + diskFilePath = files[0].getAbsolutePath(); + } else { + LOGGER.debug("Unable to find the physical disk: " + volumeId + " on the storage pool: " + pool.getUuid()); + return null; + } + } + + KVMPhysicalDisk disk = new KVMPhysicalDisk(diskFilePath, volumeId, pool); + disk.setFormat(QemuImg.PhysicalDiskFormat.RAW); + + long diskSize = getPhysicalDiskSize(diskFilePath); + disk.setSize(diskSize); + disk.setVirtualSize(diskSize); + + return disk; + } catch (Exception e) { + LOGGER.error("Failed to get the physical disk: " + volumeId + " on the storage pool: " + pool.getUuid() + " due to " + e.getMessage()); + throw new CloudRuntimeException("Failed to get the physical disk: " + volumeId + " on the storage pool: " + pool.getUuid()); + } + } + + @Override + public KVMStoragePool createStoragePool(String uuid, String host, int port, String path, String userInfo, Storage.StoragePoolType type) { + ScaleIOStoragePool storagePool = new ScaleIOStoragePool(uuid, host, port, path, type, this); + MapStorageUuidToStoragePool.put(uuid, storagePool); + return storagePool; + } + + @Override + public boolean deleteStoragePool(String uuid) { + return MapStorageUuidToStoragePool.remove(uuid) != null; + } + + @Override + public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, QemuImg.PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size) { + return null; + } + + @Override + public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map details) { + if (Strings.isNullOrEmpty(volumePath) || pool == null) { + LOGGER.error("Unable to connect physical disk due to insufficient data"); + throw new CloudRuntimeException("Unable to connect physical disk due to insufficient data"); + } + + int waitTimeInSec = DEFAULT_DISK_WAIT_TIME_IN_SECS; + if (details != null && details.containsKey(StorageManager.STORAGE_POOL_DISK_WAIT.toString())) { + String waitTime = details.get(StorageManager.STORAGE_POOL_DISK_WAIT.toString()); + if (!Strings.isNullOrEmpty(waitTime)) { + waitTimeInSec = Integer.valueOf(waitTime).intValue(); + } + } + return waitForDiskToBecomeAvailable(volumePath, pool, waitTimeInSec); + } + + private boolean waitForDiskToBecomeAvailable(String volumePath, KVMStoragePool pool, int waitTimeInSec) { + LOGGER.debug("Waiting for the volume with id: " + volumePath + " of the storage pool: " + pool.getUuid() + " to become available for " + waitTimeInSec + " secs"); + int timeBetweenTries = 1000; // Try more frequently (every sec) and return early if disk is found + KVMPhysicalDisk physicalDisk = null; + + // Rescan before checking for the physical disk + ScaleIOUtil.rescanForNewVolumes(); + + while (waitTimeInSec > 0) { + physicalDisk = getPhysicalDisk(volumePath, pool); + if (physicalDisk != null && physicalDisk.getSize() > 0) { + LOGGER.debug("Found the volume with id: " + volumePath + " of the storage pool: " + pool.getUuid()); + return true; + } + + waitTimeInSec--; + + try { + Thread.sleep(timeBetweenTries); + } catch (Exception ex) { + // don't do anything + } + } + + physicalDisk = getPhysicalDisk(volumePath, pool); + if (physicalDisk != null && physicalDisk.getSize() > 0) { + LOGGER.debug("Found the volume using id: " + volumePath + " of the storage pool: " + pool.getUuid()); + return true; + } + + LOGGER.debug("Unable to find the volume with id: " + volumePath + " of the storage pool: " + pool.getUuid()); + return false; + } + + private long getPhysicalDiskSize(String diskPath) { + if (Strings.isNullOrEmpty(diskPath)) { + return 0; + } + + Script diskCmd = new Script("blockdev", LOGGER); + diskCmd.add("--getsize64", diskPath); + + OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); + String result = diskCmd.execute(parser); + + if (result != null) { + LOGGER.warn("Unable to get the disk size at path: " + diskPath); + return 0; + } else { + LOGGER.info("Able to retrieve the disk size at path:" + diskPath); + } + + return Long.parseLong(parser.getLine()); + } + + @Override + public boolean disconnectPhysicalDisk(String volumePath, KVMStoragePool pool) { + return true; + } + + @Override + public boolean disconnectPhysicalDisk(Map volumeToDisconnect) { + return true; + } + + @Override + public boolean disconnectPhysicalDiskByPath(String localPath) { + return true; + } + + @Override + public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.ImageFormat format) { + return true; + } + + @Override + public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name, QemuImg.PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, KVMStoragePool destPool, int timeout) { + return null; + } + + @Override + public KVMPhysicalDisk createTemplateFromDisk(KVMPhysicalDisk disk, String name, QemuImg.PhysicalDiskFormat format, long size, KVMStoragePool destPool) { + return null; + } + + @Override + public List listPhysicalDisks(String storagePoolUuid, KVMStoragePool pool) { + return null; + } + + @Override + public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout) { + if (Strings.isNullOrEmpty(name) || disk == null || destPool == null) { + LOGGER.error("Unable to copy physical disk due to insufficient data"); + throw new CloudRuntimeException("Unable to copy physical disk due to insufficient data"); + } + + LOGGER.debug("Copy physical disk with size: " + disk.getSize() + ", virtualsize: " + disk.getVirtualSize()+ ", format: " + disk.getFormat()); + + KVMPhysicalDisk destDisk = destPool.getPhysicalDisk(name); + if (destDisk == null) { + LOGGER.error("Failed to find the disk: " + name + " of the storage pool: " + destPool.getUuid()); + throw new CloudRuntimeException("Failed to find the disk: " + name + " of the storage pool: " + destPool.getUuid()); + } + + destDisk.setFormat(QemuImg.PhysicalDiskFormat.RAW); + destDisk.setSize(disk.getVirtualSize()); + destDisk.setVirtualSize(disk.getSize()); + + QemuImg qemu = new QemuImg(timeout); + QemuImgFile srcFile = null; + QemuImgFile destFile = null; + + try { + srcFile = new QemuImgFile(disk.getPath(), disk.getFormat()); + destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat()); + + LOGGER.debug("Starting copy from source image " + srcFile.getFileName() + " to PowerFlex volume: " + destDisk.getPath()); + qemu.convert(srcFile, destFile); + LOGGER.debug("Succesfully converted source image " + srcFile.getFileName() + " to PowerFlex volume: " + destDisk.getPath()); + } catch (QemuImgException e) { + LOGGER.error("Failed to convert from " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + e.getMessage()); + destDisk = null; + } + + return destDisk; + } + + @Override + public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool, int timeout) { + return null; + } + + @Override + public boolean refresh(KVMStoragePool pool) { + return true; + } + + @Override + public boolean deleteStoragePool(KVMStoragePool pool) { + return deleteStoragePool(pool.getUuid()); + } + + @Override + public boolean createFolder(String uuid, String path) { + return true; + } + + @Override + public KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, String name, QemuImg.PhysicalDiskFormat format, long size, KVMStoragePool destPool, int timeout) { + return null; + } + + @Override + public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) { + if (Strings.isNullOrEmpty(templateFilePath) || Strings.isNullOrEmpty(destTemplatePath) || destPool == null) { + LOGGER.error("Unable to create template from direct download template file due to insufficient data"); + throw new CloudRuntimeException("Unable to create template from direct download template file due to insufficient data"); + } + + LOGGER.debug("Create template from direct download template - file path: " + templateFilePath + ", dest path: " + destTemplatePath + ", format: " + format.toString()); + + File sourceFile = new File(templateFilePath); + if (!sourceFile.exists()) { + throw new CloudRuntimeException("Direct download template file " + templateFilePath + " does not exist on this host"); + } + + if (destTemplatePath == null || destTemplatePath.isEmpty()) { + LOGGER.error("Failed to create template, target template disk path not provided"); + throw new CloudRuntimeException("Target template disk path not provided"); + } + + if (destPool.getType() != Storage.StoragePoolType.PowerFlex) { + throw new CloudRuntimeException("Unsupported storage pool type: " + destPool.getType().toString()); + } + + if (Storage.ImageFormat.RAW.equals(format) && Storage.ImageFormat.QCOW2.equals(format)) { + LOGGER.error("Failed to create template, unsupported template format: " + format.toString()); + throw new CloudRuntimeException("Unsupported template format: " + format.toString()); + } + + String srcTemplateFilePath = templateFilePath; + KVMPhysicalDisk destDisk = null; + QemuImgFile srcFile = null; + QemuImgFile destFile = null; + try { + destDisk = destPool.getPhysicalDisk(destTemplatePath); + if (destDisk == null) { + LOGGER.error("Failed to find the disk: " + destTemplatePath + " of the storage pool: " + destPool.getUuid()); + throw new CloudRuntimeException("Failed to find the disk: " + destTemplatePath + " of the storage pool: " + destPool.getUuid()); + } + + if (isTemplateExtractable(templateFilePath)) { + srcTemplateFilePath = sourceFile.getParent() + "/" + UUID.randomUUID().toString(); + LOGGER.debug("Extract the downloaded template " + templateFilePath + " to " + srcTemplateFilePath); + String extractCommand = getExtractCommandForDownloadedFile(templateFilePath, srcTemplateFilePath); + Script.runSimpleBashScript(extractCommand); + Script.runSimpleBashScript("rm -f " + templateFilePath); + } + + QemuImg.PhysicalDiskFormat srcFileFormat = QemuImg.PhysicalDiskFormat.RAW; + if (format == Storage.ImageFormat.RAW) { + srcFileFormat = QemuImg.PhysicalDiskFormat.RAW; + } else if (format == Storage.ImageFormat.QCOW2) { + srcFileFormat = QemuImg.PhysicalDiskFormat.QCOW2; + } + + srcFile = new QemuImgFile(srcTemplateFilePath, srcFileFormat); + destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat()); + + LOGGER.debug("Starting copy from source downloaded template " + srcFile.getFileName() + " to PowerFlex template volume: " + destDisk.getPath()); + QemuImg qemu = new QemuImg(timeout); + qemu.convert(srcFile, destFile); + LOGGER.debug("Succesfully converted source downloaded template " + srcFile.getFileName() + " to PowerFlex template volume: " + destDisk.getPath()); + } catch (QemuImgException e) { + LOGGER.error("Failed to convert from " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + e.getMessage()); + destDisk = null; + } finally { + Script.runSimpleBashScript("rm -f " + srcTemplateFilePath); + } + + return destDisk; + } + + private boolean isTemplateExtractable(String templatePath) { + String type = Script.runSimpleBashScript("file " + templatePath + " | awk -F' ' '{print $2}'"); + return type.equalsIgnoreCase("bzip2") || type.equalsIgnoreCase("gzip") || type.equalsIgnoreCase("zip"); + } + + private String getExtractCommandForDownloadedFile(String downloadedTemplateFile, String templateFile) { + if (downloadedTemplateFile.endsWith(".zip")) { + return "unzip -p " + downloadedTemplateFile + " | cat > " + templateFile; + } else if (downloadedTemplateFile.endsWith(".bz2")) { + return "bunzip2 -c " + downloadedTemplateFile + " > " + templateFile; + } else if (downloadedTemplateFile.endsWith(".gz")) { + return "gunzip -c " + downloadedTemplateFile + " > " + templateFile; + } else { + throw new CloudRuntimeException("Unable to extract template " + downloadedTemplateFile); + } + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePool.java new file mode 100644 index 000000000000..4ead92d6a0dd --- /dev/null +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePool.java @@ -0,0 +1,181 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.kvm.storage; + +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.utils.qemu.QemuImg; + +import com.cloud.storage.Storage; + +public class ScaleIOStoragePool implements KVMStoragePool { + private String uuid; + private String sourceHost; + private int sourcePort; + private String sourceDir; + private Storage.StoragePoolType storagePoolType; + private StorageAdaptor storageAdaptor; + private long capacity; + private long used; + private long available; + + public ScaleIOStoragePool(String uuid, String host, int port, String path, Storage.StoragePoolType poolType, StorageAdaptor adaptor) { + this.uuid = uuid; + sourceHost = host; + sourcePort = port; + sourceDir = path; + storagePoolType = poolType; + storageAdaptor = adaptor; + capacity = 0; + used = 0; + available = 0; + } + + @Override + public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, QemuImg.PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size) { + return null; + } + + @Override + public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, Storage.ProvisioningType provisioningType, long size) { + return null; + } + + @Override + public boolean connectPhysicalDisk(String volumeUuid, Map details) { + return storageAdaptor.connectPhysicalDisk(volumeUuid, this, details); + } + + @Override + public KVMPhysicalDisk getPhysicalDisk(String volumeId) { + return storageAdaptor.getPhysicalDisk(volumeId, this); + } + + @Override + public boolean disconnectPhysicalDisk(String volumeUuid) { + return storageAdaptor.disconnectPhysicalDisk(volumeUuid, this); + } + + @Override + public boolean deletePhysicalDisk(String volumeUuid, Storage.ImageFormat format) { + return true; + } + + @Override + public List listPhysicalDisks() { + return null; + } + + @Override + public String getUuid() { + return uuid; + } + + public void setCapacity(long capacity) { + this.capacity = capacity; + } + + @Override + public long getCapacity() { + return this.capacity; + } + + public void setUsed(long used) { + this.used = used; + } + + @Override + public long getUsed() { + return this.used; + } + + public void setAvailable(long available) { + this.available = available; + } + + @Override + public long getAvailable() { + return this.available; + } + + @Override + public boolean refresh() { + return false; + } + + @Override + public boolean isExternalSnapshot() { + return true; + } + + @Override + public String getLocalPath() { + return null; + } + + @Override + public String getSourceHost() { + return this.sourceHost; + } + + @Override + public String getSourceDir() { + return this.sourceDir; + } + + @Override + public int getSourcePort() { + return this.sourcePort; + } + + @Override + public String getAuthUserName() { + return null; + } + + @Override + public String getAuthSecret() { + return null; + } + + @Override + public Storage.StoragePoolType getType() { + return storagePoolType; + } + + @Override + public boolean delete() { + return false; + } + + @Override + public QemuImg.PhysicalDiskFormat getDefaultFormat() { + return QemuImg.PhysicalDiskFormat.RAW; + } + + @Override + public boolean createFolder(String path) { + return false; + } + + @Override + public boolean supportsConfigDriveIso() { + return false; + } +} diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java index 99f2876915c0..570c2070c75f 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java @@ -86,7 +86,8 @@ KVMPhysicalDisk createDiskFromTemplateBacking(KVMPhysicalDisk template, * Create physical disk on Primary Storage from direct download template on the host (in temporary location) * @param templateFilePath * @param destPool - * @param isIso + * @param format + * @param timeout */ - KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, KVMStoragePool destPool, boolean isIso); + KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout); } diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePoolTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePoolTest.java new file mode 100644 index 000000000000..cb9ffaee531f --- /dev/null +++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/ScaleIOStoragePoolTest.java @@ -0,0 +1,152 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.hypervisor.kvm.storage; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +import java.io.File; +import java.io.FileFilter; + +import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; +import org.apache.cloudstack.utils.qemu.QemuImg; +import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.StorageLayer; + +@PrepareForTest(ScaleIOUtil.class) +@RunWith(PowerMockRunner.class) +public class ScaleIOStoragePoolTest { + + ScaleIOStoragePool pool; + + StorageAdaptor adapter; + + @Mock + StorageLayer storageLayer; + + @Before + public void setUp() throws Exception { + final String uuid = "345fc603-2d7e-47d2-b719-a0110b3732e6"; + final StoragePoolType type = StoragePoolType.PowerFlex; + + adapter = spy(new ScaleIOStorageAdaptor(storageLayer)); + pool = new ScaleIOStoragePool(uuid, "192.168.1.19", 443, "a519be2f00000000", type, adapter); + } + + @After + public void tearDown() throws Exception { + } + + @Test + public void testAttributes() { + assertEquals(pool.getCapacity(), 0); + assertEquals(pool.getUsed(), 0); + assertEquals(pool.getAvailable(), 0); + assertEquals(pool.getUuid(), "345fc603-2d7e-47d2-b719-a0110b3732e6"); + assertEquals(pool.getSourceHost(), "192.168.1.19"); + assertEquals(pool.getSourcePort(), 443); + assertEquals(pool.getSourceDir(), "a519be2f00000000"); + assertEquals(pool.getType(), StoragePoolType.PowerFlex); + + pool.setCapacity(131072); + pool.setUsed(24576); + pool.setAvailable(106496); + + assertEquals(pool.getCapacity(), 131072); + assertEquals(pool.getUsed(), 24576); + assertEquals(pool.getAvailable(), 106496); + } + + @Test + public void testDefaults() { + assertEquals(pool.getDefaultFormat(), PhysicalDiskFormat.RAW); + assertEquals(pool.getType(), StoragePoolType.PowerFlex); + + assertNull(pool.getAuthUserName()); + assertNull(pool.getAuthSecret()); + + Assert.assertFalse(pool.supportsConfigDriveIso()); + assertTrue(pool.isExternalSnapshot()); + } + + public void testGetPhysicalDiskWithWildcardFileFilter() throws Exception { + final String volumePath = "6c3362b500000001"; + final String systemId = "218ce1797566a00f"; + + File dir = PowerMockito.mock(File.class); + PowerMockito.whenNew(File.class).withAnyArguments().thenReturn(dir); + + // TODO: Mock file in dir + File[] files = new File[1]; + String diskFilePath = ScaleIOUtil.DISK_PATH + File.separator + ScaleIOUtil.DISK_NAME_PREFIX + systemId + "-" + volumePath; + files[0] = new File(diskFilePath); + PowerMockito.when(dir.listFiles(any(FileFilter.class))).thenReturn(files); + + KVMPhysicalDisk disk = adapter.getPhysicalDisk(volumePath, pool); + assertNull(disk); + } + + @Test + public void testGetPhysicalDiskWithSystemId() throws Exception { + final String volumePath = "6c3362b500000001"; + final String systemId = "218ce1797566a00f"; + PowerMockito.mockStatic(ScaleIOUtil.class); + when(ScaleIOUtil.getSystemIdForVolume(volumePath)).thenReturn(systemId); + + // TODO: Mock file exists + File file = PowerMockito.mock(File.class); + PowerMockito.whenNew(File.class).withAnyArguments().thenReturn(file); + PowerMockito.when(file.exists()).thenReturn(true); + + KVMPhysicalDisk disk = adapter.getPhysicalDisk(volumePath, pool); + assertNull(disk); + } + + @Test + public void testConnectPhysicalDisk() { + final String volumePath = "6c3362b500000001"; + final String systemId = "218ce1797566a00f"; + final String diskFilePath = ScaleIOUtil.DISK_PATH + File.separator + ScaleIOUtil.DISK_NAME_PREFIX + systemId + "-" + volumePath; + KVMPhysicalDisk disk = new KVMPhysicalDisk(diskFilePath, volumePath, pool); + disk.setFormat(QemuImg.PhysicalDiskFormat.RAW); + disk.setSize(8192); + disk.setVirtualSize(8192); + + assertEquals(disk.getPath(), "/dev/disk/by-id/emc-vol-218ce1797566a00f-6c3362b500000001"); + + when(adapter.getPhysicalDisk(volumePath, pool)).thenReturn(disk); + + final boolean result = adapter.connectPhysicalDisk(volumePath, pool, null); + assertTrue(result); + } +} \ No newline at end of file diff --git a/plugins/pom.xml b/plugins/pom.xml index 051cd4e480f2..e4cf358475a9 100755 --- a/plugins/pom.xml +++ b/plugins/pom.xml @@ -121,6 +121,7 @@ storage/volume/nexenta storage/volume/sample storage/volume/solidfire + storage/volume/scaleio storage-allocators/random diff --git a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/driver/ElastistorPrimaryDataStoreDriver.java b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/driver/ElastistorPrimaryDataStoreDriver.java index 89e8c4fc1e41..f9e614692338 100644 --- a/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/driver/ElastistorPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/cloudbyte/src/main/java/org/apache/cloudstack/storage/datastore/driver/ElastistorPrimaryDataStoreDriver.java @@ -48,6 +48,7 @@ import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; +import com.cloud.host.Host; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.ResizeVolumePayload; import com.cloud.storage.Storage.StoragePoolType; @@ -59,6 +60,7 @@ import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeDetailsDao; import com.cloud.user.AccountManager; +import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; /** @@ -259,7 +261,11 @@ public void deleteAsync(DataStore dataStore, DataObject dataObject, AsyncComplet @Override public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCallback callback) { throw new UnsupportedOperationException(); + } + @Override + public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback) { + throw new UnsupportedOperationException(); } @Override @@ -409,4 +415,28 @@ public Map getCapabilities() { return mapCapabilities; } + @Override + public boolean canProvideStorageStats() { + return false; + } + + @Override + public Pair getStorageStats(StoragePool storagePool) { + return null; + } + + @Override + public boolean canProvideVolumeStats() { + return false; + } + + @Override + public Pair getVolumeStats(StoragePool storagePool, String volumeId) { + return null; + } + + @Override + public boolean canHostAccessStoragePool(Host host, StoragePool pool) { + return true; + } } diff --git a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java index 497960d1c232..ddf782a81587 100644 --- a/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/datera/src/main/java/org/apache/cloudstack/storage/datastore/driver/DateraPrimaryDataStoreDriver.java @@ -17,6 +17,37 @@ package org.apache.cloudstack.storage.datastore.driver; +import java.io.UnsupportedEncodingException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.command.CommandResult; +import org.apache.cloudstack.storage.command.CreateObjectAnswer; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.util.DateraObject; +import org.apache.cloudstack.storage.datastore.util.DateraUtil; +import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.log4j.Logger; + import com.cloud.agent.api.Answer; import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataStoreTO; @@ -44,40 +75,12 @@ import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeDetailsDao; +import com.cloud.utils.Pair; import com.cloud.utils.StringUtils; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.exception.CloudRuntimeException; import com.google.common.base.Preconditions; import com.google.common.primitives.Ints; -import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.apache.cloudstack.storage.command.CommandResult; -import org.apache.cloudstack.storage.command.CreateObjectAnswer; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; -import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.storage.datastore.util.DateraObject; -import org.apache.cloudstack.storage.datastore.util.DateraUtil; -import org.apache.cloudstack.storage.to.SnapshotObjectTO; -import org.apache.log4j.Logger; - -import javax.inject.Inject; -import java.io.UnsupportedEncodingException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; import static com.cloud.utils.NumbersUtil.toHumanReadableSize; @@ -1254,6 +1257,12 @@ public void copyAsync(DataObject srcData, DataObject destData, throw new UnsupportedOperationException(); } + @Override + public void copyAsync(DataObject srcData, DataObject destData, + Host destHost, AsyncCompletionCallback callback) { + throw new UnsupportedOperationException(); + } + @Override public boolean canCopy(DataObject srcData, DataObject destData) { return false; @@ -1825,6 +1834,30 @@ private long getVolumeSizeIncludingHypervisorSnapshotReserve(long volumeSize, In @Override public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, QualityOfServiceState qualityOfServiceState) { + } + + @Override + public boolean canProvideStorageStats() { + return false; + } + + @Override + public Pair getStorageStats(StoragePool storagePool) { + return null; + } + @Override + public boolean canProvideVolumeStats() { + return false; + } + + @Override + public Pair getVolumeStats(StoragePool storagePool, String volumeId) { + return null; + } + + @Override + public boolean canHostAccessStoragePool(Host host, StoragePool pool) { + return true; } } diff --git a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java index 6ce874107b32..3cbcc8541ad7 100644 --- a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java +++ b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java @@ -76,6 +76,7 @@ import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.snapshot.SnapshotManager; import com.cloud.template.TemplateManager; +import com.cloud.utils.Pair; import com.cloud.vm.dao.VMInstanceDao; import static com.cloud.utils.NumbersUtil.toHumanReadableSize; @@ -277,6 +278,11 @@ public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCa } } + @Override + public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback) { + copyAsync(srcData, destData, callback); + } + @Override public boolean canCopy(DataObject srcData, DataObject destData) { //BUG fix for CLOUDSTACK-4618 @@ -389,4 +395,29 @@ public void resize(DataObject data, AsyncCompletionCallback cal @Override public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, QualityOfServiceState qualityOfServiceState) {} + + @Override + public boolean canProvideStorageStats() { + return false; + } + + @Override + public Pair getStorageStats(StoragePool storagePool) { + return null; + } + + @Override + public boolean canProvideVolumeStats() { + return false; + } + + @Override + public Pair getVolumeStats(StoragePool storagePool, String volumeId) { + return null; + } + + @Override + public boolean canHostAccessStoragePool(Host host, StoragePool pool) { + return true; + } } diff --git a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/driver/NexentaPrimaryDataStoreDriver.java b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/driver/NexentaPrimaryDataStoreDriver.java index d59fce4b68c7..92f8938060be 100644 --- a/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/driver/NexentaPrimaryDataStoreDriver.java +++ b/plugins/storage/volume/nexenta/src/main/java/org/apache/cloudstack/storage/datastore/driver/NexentaPrimaryDataStoreDriver.java @@ -53,6 +53,7 @@ import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.VolumeDao; import com.cloud.user.dao.AccountDao; +import com.cloud.utils.Pair; public class NexentaPrimaryDataStoreDriver implements PrimaryDataStoreDriver { private static final Logger logger = Logger.getLogger(NexentaPrimaryDataStoreDriver.class); @@ -199,6 +200,10 @@ public void deleteAsync(DataStore store, DataObject data, AsyncCompletionCallbac @Override public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCallback callback) {} + @Override + public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback) { + } + @Override public boolean canCopy(DataObject srcData, DataObject destData) { return false; @@ -209,4 +214,29 @@ public void resize(DataObject data, AsyncCompletionCallback cal @Override public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, QualityOfServiceState qualityOfServiceState) {} + + @Override + public boolean canProvideStorageStats() { + return false; + } + + @Override + public boolean canProvideVolumeStats() { + return false; + } + + @Override + public Pair getVolumeStats(StoragePool storagePool, String volumeId) { + return null; + } + + @Override + public Pair getStorageStats(StoragePool storagePool) { + return null; + } + + @Override + public boolean canHostAccessStoragePool(Host host, StoragePool pool) { + return true; + } } diff --git a/plugins/storage/volume/sample/src/main/java/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java b/plugins/storage/volume/sample/src/main/java/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java index fc0186f15381..a41627723437 100644 --- a/plugins/storage/volume/sample/src/main/java/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java +++ b/plugins/storage/volume/sample/src/main/java/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java @@ -46,6 +46,7 @@ import com.cloud.host.Host; import com.cloud.storage.StoragePool; import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.utils.Pair; import com.cloud.utils.exception.CloudRuntimeException; public class SamplePrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver { @@ -224,6 +225,10 @@ public boolean canCopy(DataObject srcData, DataObject destData) { public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCallback callback) { } + @Override + public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback) { + } + @Override public void resize(DataObject data, AsyncCompletionCallback callback) { } @@ -236,4 +241,28 @@ public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, Qual public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback) { } + @Override + public boolean canProvideStorageStats() { + return false; + } + + @Override + public boolean canProvideVolumeStats() { + return false; + } + + @Override + public Pair getVolumeStats(StoragePool storagePool, String volumeId) { + return null; + } + + @Override + public Pair getStorageStats(StoragePool storagePool) { + return null; + } + + @Override + public boolean canHostAccessStoragePool(Host host, StoragePool pool) { + return true; + } } diff --git a/plugins/storage/volume/scaleio/pom.xml b/plugins/storage/volume/scaleio/pom.xml new file mode 100644 index 000000000000..859b2868235a --- /dev/null +++ b/plugins/storage/volume/scaleio/pom.xml @@ -0,0 +1,55 @@ + + + 4.0.0 + cloud-plugin-storage-volume-scaleio + Apache CloudStack Plugin - Storage Volume Dell-EMC ScaleIO/PowerFlex Provider + + org.apache.cloudstack + cloudstack-plugins + 4.15.0.0-SNAPSHOT + ../../../pom.xml + + + + org.apache.cloudstack + cloud-engine-storage-volume + ${project.version} + + + + + + maven-surefire-plugin + + true + + + + integration-test + + test + + + + + + + diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/ProtectionDomain.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/ProtectionDomain.java new file mode 100644 index 000000000000..5d260e0fd0c6 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/ProtectionDomain.java @@ -0,0 +1,57 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +public class ProtectionDomain { + String id; + String name; + String protectionDomainState; + String systemId; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getProtectionDomainState() { + return protectionDomainState; + } + + public void setProtectionDomainState(String protectionDomainState) { + this.protectionDomainState = protectionDomainState; + } + + public String getSystemId() { + return systemId; + } + + public void setSystemId(String systemId) { + this.systemId = systemId; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/Sdc.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/Sdc.java new file mode 100644 index 000000000000..71e4077d6d0b --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/Sdc.java @@ -0,0 +1,138 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +public class Sdc { + String id; + String name; + String mdmConnectionState; + Boolean sdcApproved; + String perfProfile; + String sdcGuid; + String sdcIp; + String[] sdcIps; + String systemId; + String osType; + String kernelVersion; + String softwareVersionInfo; + String versionInfo; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getMdmConnectionState() { + return mdmConnectionState; + } + + public void setMdmConnectionState(String mdmConnectionState) { + this.mdmConnectionState = mdmConnectionState; + } + + public Boolean getSdcApproved() { + return sdcApproved; + } + + public void setSdcApproved(Boolean sdcApproved) { + this.sdcApproved = sdcApproved; + } + + public String getPerfProfile() { + return perfProfile; + } + + public void setPerfProfile(String perfProfile) { + this.perfProfile = perfProfile; + } + + public String getSdcGuid() { + return sdcGuid; + } + + public void setSdcGuid(String sdcGuid) { + this.sdcGuid = sdcGuid; + } + + public String getSdcIp() { + return sdcIp; + } + + public void setSdcIp(String sdcIp) { + this.sdcIp = sdcIp; + } + + public String[] getSdcIps() { + return sdcIps; + } + + public void setSdcIps(String[] sdcIps) { + this.sdcIps = sdcIps; + } + + public String getSystemId() { + return systemId; + } + + public void setSystemId(String systemId) { + this.systemId = systemId; + } + + public String getOsType() { + return osType; + } + + public void setOsType(String osType) { + this.osType = osType; + } + + public String getKernelVersion() { + return kernelVersion; + } + + public void setKernelVersion(String kernelVersion) { + this.kernelVersion = kernelVersion; + } + + public String getSoftwareVersionInfo() { + return softwareVersionInfo; + } + + public void setSoftwareVersionInfo(String softwareVersionInfo) { + this.softwareVersionInfo = softwareVersionInfo; + } + + public String getVersionInfo() { + return versionInfo; + } + + public void setVersionInfo(String versionInfo) { + this.versionInfo = versionInfo; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SdcMappingInfo.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SdcMappingInfo.java new file mode 100644 index 000000000000..1b3436a553ae --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SdcMappingInfo.java @@ -0,0 +1,39 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +public class SdcMappingInfo { + String sdcId; + String sdcIp; + + public String getSdcId() { + return sdcId; + } + + public void setSdcId(String sdcId) { + this.sdcId = sdcId; + } + + public String getSdcIp() { + return sdcIp; + } + + public void setSdcIp(String sdcIp) { + this.sdcIp = sdcIp; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SnapshotDef.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SnapshotDef.java new file mode 100644 index 000000000000..fa973600bc87 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SnapshotDef.java @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +public class SnapshotDef { + String volumeId; + String snapshotName; + String allowOnExtManagedVol; + + public String getVolumeId() { + return volumeId; + } + + public void setVolumeId(String volumeId) { + this.volumeId = volumeId; + } + + public String getSnapshotName() { + return snapshotName; + } + + public void setSnapshotName(String snapshotName) { + this.snapshotName = snapshotName; + } + + public String getAllowOnExtManagedVol() { + return allowOnExtManagedVol; + } + + public void setAllowOnExtManagedVol(String allowOnExtManagedVol) { + this.allowOnExtManagedVol = allowOnExtManagedVol; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SnapshotDefs.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SnapshotDefs.java new file mode 100644 index 000000000000..a86ae30ba925 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SnapshotDefs.java @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +public class SnapshotDefs { + SnapshotDef[] snapshotDefs; + + public SnapshotDef[] getSnapshotDefs() { + return snapshotDefs; + } + + public void setSnapshotDefs(SnapshotDef[] snapshotDefs) { + this.snapshotDefs = snapshotDefs; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SnapshotGroup.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SnapshotGroup.java new file mode 100644 index 000000000000..bef2cee8fd4a --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/SnapshotGroup.java @@ -0,0 +1,46 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +import java.util.Arrays; +import java.util.List; + +public class SnapshotGroup { + String snapshotGroupId; + String[] volumeIdList; + + public String getSnapshotGroupId() { + return snapshotGroupId; + } + + public void setSnapshotGroupId(String snapshotGroupId) { + this.snapshotGroupId = snapshotGroupId; + } + + public List getVolumeIds() { + return Arrays.asList(volumeIdList); + } + + public String[] getVolumeIdList() { + return volumeIdList; + } + + public void setVolumeIdList(String[] volumeIdList) { + this.volumeIdList = volumeIdList; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/StoragePool.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/StoragePool.java new file mode 100644 index 000000000000..df903bb67f7c --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/StoragePool.java @@ -0,0 +1,75 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +public class StoragePool { + String id; + String name; + String mediaType; + String protectionDomainId; + String systemId; + StoragePoolStatistics statistics; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getMediaType() { + return mediaType; + } + + public void setMediaType(String mediaType) { + this.mediaType = mediaType; + } + + public String getProtectionDomainId() { + return protectionDomainId; + } + + public void setProtectionDomainId(String protectionDomainId) { + this.protectionDomainId = protectionDomainId; + } + + public String getSystemId() { + return systemId; + } + + public void setSystemId(String systemId) { + this.systemId = systemId; + } + + public StoragePoolStatistics getStatistics() { + return statistics; + } + + public void setStatistics(StoragePoolStatistics statistics) { + this.statistics = statistics; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/StoragePoolStatistics.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/StoragePoolStatistics.java new file mode 100644 index 000000000000..599aa5c3ae9c --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/StoragePoolStatistics.java @@ -0,0 +1,85 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +import com.google.common.base.Strings; + +public class StoragePoolStatistics { + String maxCapacityInKb; // total capacity + String spareCapacityInKb; // spare capacity, space not used for volumes creation/allocation + String netCapacityInUseInKb; // user data capacity in use + String netUnusedCapacityInKb; // capacity available for volume creation (volume space to write) + + public Long getMaxCapacityInKb() { + if (Strings.isNullOrEmpty(maxCapacityInKb)) { + return Long.valueOf(0); + } + return Long.valueOf(maxCapacityInKb); + } + + public void setMaxCapacityInKb(String maxCapacityInKb) { + this.maxCapacityInKb = maxCapacityInKb; + } + + public Long getSpareCapacityInKb() { + if (Strings.isNullOrEmpty(spareCapacityInKb)) { + return Long.valueOf(0); + } + return Long.valueOf(spareCapacityInKb); + } + + public void setSpareCapacityInKb(String spareCapacityInKb) { + this.spareCapacityInKb = spareCapacityInKb; + } + + public Long getNetCapacityInUseInKb() { + if (Strings.isNullOrEmpty(netCapacityInUseInKb)) { + return Long.valueOf(0); + } + return Long.valueOf(netCapacityInUseInKb); + } + + public void setNetCapacityInUseInKb(String netCapacityInUseInKb) { + this.netCapacityInUseInKb = netCapacityInUseInKb; + } + + public Long getNetUnusedCapacityInKb() { + if (Strings.isNullOrEmpty(netUnusedCapacityInKb)) { + return Long.valueOf(0); + } + return Long.valueOf(netUnusedCapacityInKb); + } + + public Long getNetUnusedCapacityInBytes() { + return (getNetUnusedCapacityInKb() * 1024); + } + + public void setNetUnusedCapacityInKb(String netUnusedCapacityInKb) { + this.netUnusedCapacityInKb = netUnusedCapacityInKb; + } + + public Long getNetMaxCapacityInBytes() { + // total usable capacity = ("maxCapacityInKb" - "spareCapacityInKb") / 2 + Long netMaxCapacityInKb = getMaxCapacityInKb() - getSpareCapacityInKb(); + return ((netMaxCapacityInKb / 2) * 1024); + } + + public Long getNetUsedCapacityInBytes() { + return (getNetMaxCapacityInBytes() - getNetUnusedCapacityInBytes()); + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/Volume.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/Volume.java new file mode 100644 index 000000000000..4517a1286916 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/Volume.java @@ -0,0 +1,152 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +import java.util.Arrays; +import java.util.List; + +public class Volume { + public enum VolumeType { + ThickProvisioned, + ThinProvisioned, + Snapshot + } + String id; + String name; + String ancestorVolumeId; + String consistencyGroupId; + Long creationTime; + Long sizeInKb; + String sizeInGB; + String storagePoolId; + VolumeType volumeType; + String volumeSizeInGb; + String vtreeId; + SdcMappingInfo[] mappedSdcInfo; + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getAncestorVolumeId() { + return ancestorVolumeId; + } + + public void setAncestorVolumeId(String ancestorVolumeId) { + this.ancestorVolumeId = ancestorVolumeId; + } + + public String getConsistencyGroupId() { + return consistencyGroupId; + } + + public void setConsistencyGroupId(String consistencyGroupId) { + this.consistencyGroupId = consistencyGroupId; + } + + public Long getCreationTime() { + return creationTime; + } + + public void setCreationTime(Long creationTime) { + this.creationTime = creationTime; + } + + public Long getSizeInKb() { + return sizeInKb; + } + + public void setSizeInKb(Long sizeInKb) { + this.sizeInKb = sizeInKb; + } + + public String getSizeInGB() { + return sizeInGB; + } + + public void setSizeInGB(Integer sizeInGB) { + this.sizeInGB = sizeInGB.toString(); + } + + public void setVolumeSizeInGb(String volumeSizeInGb) { + this.volumeSizeInGb = volumeSizeInGb; + } + + public String getStoragePoolId() { + return storagePoolId; + } + + public void setStoragePoolId(String storagePoolId) { + this.storagePoolId = storagePoolId; + } + + public String getVolumeSizeInGb() { + return volumeSizeInGb; + } + + public void setVolumeSizeInGb(Integer volumeSizeInGb) { + this.volumeSizeInGb = volumeSizeInGb.toString(); + } + + public VolumeType getVolumeType() { + return volumeType; + } + + public void setVolumeType(String volumeType) { + this.volumeType = Enum.valueOf(VolumeType.class, volumeType); + } + + public void setVolumeType(VolumeType volumeType) { + this.volumeType = volumeType; + } + + public String getVtreeId() { + return vtreeId; + } + + public void setVtreeId(String vtreeId) { + this.vtreeId = vtreeId; + } + + public List getMappedSdcList() { + if (mappedSdcInfo != null) { + return Arrays.asList(mappedSdcInfo); + } + return null; + } + + public SdcMappingInfo[] getMappedSdcInfo() { + return mappedSdcInfo; + } + + public void setMappedSdcInfo(SdcMappingInfo[] mappedSdcInfo) { + this.mappedSdcInfo = mappedSdcInfo; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/VolumeStatistics.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/VolumeStatistics.java new file mode 100644 index 000000000000..6f48e1721ad5 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/api/VolumeStatistics.java @@ -0,0 +1,53 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.api; + +public class VolumeStatistics { + Long allocatedSizeInKb; // virtual size + Long netProvisionedAddressesInKb; // physical size + + public Long getAllocatedSizeInKb() { + if (allocatedSizeInKb == null) { + return Long.valueOf(0); + } + return allocatedSizeInKb; + } + + public Long getAllocatedSizeInBytes() { + return (getAllocatedSizeInKb() * 1024); + } + + public void setAllocatedSizeInKb(Long allocatedSizeInKb) { + this.allocatedSizeInKb = allocatedSizeInKb; + } + + public Long getNetProvisionedAddressesInKb() { + if (netProvisionedAddressesInKb == null) { + return Long.valueOf(0); + } + return netProvisionedAddressesInKb; + } + + public Long getNetProvisionedAddressesInBytes() { + return (getNetProvisionedAddressesInKb() * 1024); + } + + public void setNetProvisionedAddressesInKb(Long netProvisionedAddressesInKb) { + this.netProvisionedAddressesInKb = netProvisionedAddressesInKb; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClient.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClient.java new file mode 100644 index 000000000000..08f25254cfd4 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClient.java @@ -0,0 +1,86 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.client; + +import java.net.URISyntaxException; +import java.security.KeyManagementException; +import java.security.NoSuchAlgorithmException; +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.storage.datastore.api.Sdc; +import org.apache.cloudstack.storage.datastore.api.SnapshotGroup; +import org.apache.cloudstack.storage.datastore.api.StoragePool; +import org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics; +import org.apache.cloudstack.storage.datastore.api.Volume; +import org.apache.cloudstack.storage.datastore.api.VolumeStatistics; + +import com.cloud.storage.Storage; + +public interface ScaleIOGatewayClient { + String GATEWAY_API_ENDPOINT = "powerflex.gw.url"; + String GATEWAY_API_USERNAME = "powerflex.gw.username"; + String GATEWAY_API_PASSWORD = "powerflex.gw.password"; + String STORAGE_POOL_NAME = "powerflex.storagepool.name"; + String STORAGE_POOL_SYSTEM_ID = "powerflex.storagepool.system.id"; + + static ScaleIOGatewayClient getClient(final String url, final String username, final String password, + final boolean validateCertificate, final int timeout) throws NoSuchAlgorithmException, KeyManagementException, URISyntaxException { + return new ScaleIOGatewayClientImpl(url, username, password, validateCertificate, timeout); + } + + // Volume APIs + Volume createVolume(final String name, final String storagePoolId, + final Integer sizeInGb, final Storage.ProvisioningType volumeType); + List listVolumes(); + List listSnapshotVolumes(); + Volume getVolume(String volumeId); + Volume getVolumeByName(String name); + Volume resizeVolume(final String volumeId, final Integer sizeInGb); + Volume cloneVolume(final String sourceVolumeId, final String destVolumeName); + boolean deleteVolume(final String volumeId); + + boolean mapVolumeToSdc(final String volumeId, final String sdcId); + boolean mapVolumeToSdcWithLimits(final String volumeId, final String sdcId, final Long iopsLimit, final Long bandwidthLimitInKbps); + boolean unmapVolumeFromSdc(final String volumeId, final String sdcId); + boolean unmapVolumeFromAllSdcs(final String volumeId); + boolean isVolumeMappedToSdc(final String volumeId, final String sdcId); + + // Snapshot APIs + SnapshotGroup takeSnapshot(final Map srcVolumeDestSnapshotMap); + boolean revertSnapshot(final String systemId, final Map srcSnapshotDestVolumeMap); + int deleteSnapshotGroup(final String systemId, final String snapshotGroupId); + Volume takeSnapshot(final String volumeId, final String snapshotVolumeName); + boolean revertSnapshot(final String sourceSnapshotVolumeId, final String destVolumeId); + + // Storage Pool APIs + List listStoragePools(); + StoragePool getStoragePool(String poolId); + StoragePoolStatistics getStoragePoolStatistics(String poolId); + VolumeStatistics getVolumeStatistics(String volumeId); + String getSystemId(String protectionDomainId); + List listVolumesInStoragePool(String poolId); + + // SDC APIs + List listSdcs(); + Sdc getSdc(String sdcId); + Sdc getSdcByIp(String ipAddress); + Sdc getConnectedSdcByIp(String ipAddress); + List listConnectedSdcIps(); + boolean isSdcConnected(String ipAddress); +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java new file mode 100644 index 000000000000..5b93d4f564f6 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImpl.java @@ -0,0 +1,975 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.client; + +import java.io.IOException; +import java.net.SocketTimeoutException; +import java.net.URI; +import java.net.URISyntaxException; +import java.security.KeyManagementException; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Base64; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.X509TrustManager; + +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.storage.datastore.api.ProtectionDomain; +import org.apache.cloudstack.storage.datastore.api.Sdc; +import org.apache.cloudstack.storage.datastore.api.SdcMappingInfo; +import org.apache.cloudstack.storage.datastore.api.SnapshotDef; +import org.apache.cloudstack.storage.datastore.api.SnapshotDefs; +import org.apache.cloudstack.storage.datastore.api.SnapshotGroup; +import org.apache.cloudstack.storage.datastore.api.StoragePool; +import org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics; +import org.apache.cloudstack.storage.datastore.api.Volume; +import org.apache.cloudstack.storage.datastore.api.VolumeStatistics; +import org.apache.cloudstack.utils.security.SSLUtils; +import org.apache.http.HttpHeaders; +import org.apache.http.HttpResponse; +import org.apache.http.HttpStatus; +import org.apache.http.client.HttpClient; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.conn.ConnectTimeoutException; +import org.apache.http.conn.ssl.NoopHostnameVerifier; +import org.apache.http.conn.ssl.SSLConnectionSocketFactory; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.util.EntityUtils; +import org.apache.log4j.Logger; + +import com.cloud.storage.Storage; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.nio.TrustAllManager; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.json.JsonMapper; +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; + +public class ScaleIOGatewayClientImpl implements ScaleIOGatewayClient { + private static final Logger LOG = Logger.getLogger(ScaleIOGatewayClientImpl.class); + + private final URI apiURI; + private final HttpClient httpClient; + private static final String SESSION_HEADER = "X-RestSvcSessionId"; + private static final String MDM_CONNECTED_STATE = "Connected"; + + private String host; + private String username; + private String password; + private String sessionKey = null; + + public ScaleIOGatewayClientImpl(final String url, final String username, final String password, + final boolean validateCertificate, final int timeout) + throws NoSuchAlgorithmException, KeyManagementException, URISyntaxException { + Preconditions.checkArgument(!Strings.isNullOrEmpty(url), "Gateway client url cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(username) && !Strings.isNullOrEmpty(password), "Gateway client credentials cannot be null"); + + final RequestConfig config = RequestConfig.custom() + .setConnectTimeout(timeout * 1000) + .setConnectionRequestTimeout(timeout * 1000) + .setSocketTimeout(timeout * 1000) + .build(); + + if (!validateCertificate) { + final SSLContext sslcontext = SSLUtils.getSSLContext(); + sslcontext.init(null, new X509TrustManager[]{new TrustAllManager()}, new SecureRandom()); + final SSLConnectionSocketFactory factory = new SSLConnectionSocketFactory(sslcontext, NoopHostnameVerifier.INSTANCE); + this.httpClient = HttpClientBuilder.create() + .setDefaultRequestConfig(config) + .setSSLSocketFactory(factory) + .build(); + } else { + this.httpClient = HttpClientBuilder.create() + .setDefaultRequestConfig(config) + .build(); + } + + this.apiURI = new URI(url); + this.host = apiURI.getHost(); + this.username = username; + this.password = password; + + authenticate(username, password); + } + + ///////////////////////////////////////////////////////////// + //////////////// Private Helper Methods ///////////////////// + ///////////////////////////////////////////////////////////// + + private void authenticate(final String username, final String password) { + final HttpGet request = new HttpGet(apiURI.toString() + "/login"); + request.setHeader(HttpHeaders.AUTHORIZATION, "Basic " + Base64.getEncoder().encodeToString((username + ":" + password).getBytes())); + try { + final HttpResponse response = httpClient.execute(request); + checkAuthFailure(response); + this.sessionKey = EntityUtils.toString(response.getEntity()); + if (Strings.isNullOrEmpty(this.sessionKey)) { + throw new CloudRuntimeException("Failed to create a valid PowerFlex Gateway Session to perform API requests"); + } + this.sessionKey = this.sessionKey.replace("\"", ""); + if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) { + throw new CloudRuntimeException("PowerFlex Gateway login failed, please check the provided settings"); + } + } catch (final IOException e) { + throw new CloudRuntimeException("Failed to authenticate PowerFlex API Gateway due to:" + e.getMessage()); + } + } + + private void checkAuthFailure(final HttpResponse response) { + if (response != null && response.getStatusLine().getStatusCode() == HttpStatus.SC_UNAUTHORIZED) { + throw new ServerApiException(ApiErrorCode.UNAUTHORIZED, "PowerFlex Gateway API call unauthorized, please check the provided settings"); + } + } + + private void checkResponseOK(final HttpResponse response) { + if (response.getStatusLine().getStatusCode() == HttpStatus.SC_NO_CONTENT) { + LOG.debug("Requested resource does not exist"); + return; + } + if (response.getStatusLine().getStatusCode() == HttpStatus.SC_BAD_REQUEST) { + throw new ServerApiException(ApiErrorCode.MALFORMED_PARAMETER_ERROR, "Bad API request"); + } + if (!(response.getStatusLine().getStatusCode() == HttpStatus.SC_OK || + response.getStatusLine().getStatusCode() == HttpStatus.SC_ACCEPTED)) { + String responseBody = response.toString(); + try { + responseBody = EntityUtils.toString(response.getEntity()); + } catch (IOException ignored) { + } + LOG.debug("HTTP request failed, status code is " + response.getStatusLine().getStatusCode() + ", response is: " + responseBody); + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "API failed due to: " + responseBody); + } + } + + private void checkResponseTimeOut(final Exception e) { + if (e instanceof ConnectTimeoutException || e instanceof SocketTimeoutException) { + throw new ServerApiException(ApiErrorCode.RESOURCE_UNAVAILABLE_ERROR, "API operation timed out, please try again."); + } + } + + private HttpResponse get(final String path) throws IOException { + final HttpGet request = new HttpGet(apiURI.toString() + path); + request.setHeader(HttpHeaders.AUTHORIZATION, "Basic " + Base64.getEncoder().encodeToString((this.username + ":" + this.sessionKey).getBytes())); + final HttpResponse response = httpClient.execute(request); + String responseStatus = (response != null) ? (response.getStatusLine().getStatusCode() + " " + response.getStatusLine().getReasonPhrase()) : "nil"; + LOG.debug("GET request path: " + path + ", response: " + responseStatus); + checkAuthFailure(response); + return response; + } + + private HttpResponse post(final String path, final Object obj) throws IOException { + final HttpPost request = new HttpPost(apiURI.toString() + path); + request.setHeader(HttpHeaders.AUTHORIZATION, "Basic " + Base64.getEncoder().encodeToString((this.username + ":" + this.sessionKey).getBytes())); + request.setHeader("Content-type", "application/json"); + if (obj != null) { + if (obj instanceof String) { + request.setEntity(new StringEntity((String) obj)); + } else { + JsonMapper mapper = new JsonMapper(); + mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL); + String json = mapper.writer().writeValueAsString(obj); + request.setEntity(new StringEntity(json)); + } + } + final HttpResponse response = httpClient.execute(request); + String responseStatus = (response != null) ? (response.getStatusLine().getStatusCode() + " " + response.getStatusLine().getReasonPhrase()) : "nil"; + LOG.debug("POST request path: " + path + ", response: " + responseStatus); + checkAuthFailure(response); + return response; + } + + ////////////////////////////////////////////////// + //////////////// Volume APIs ///////////////////// + ////////////////////////////////////////////////// + + @Override + public Volume createVolume(final String name, final String storagePoolId, + final Integer sizeInGb, final Storage.ProvisioningType volumeType) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(name), "Volume name cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(storagePoolId), "Storage pool id cannot be null"); + Preconditions.checkArgument(sizeInGb != null && sizeInGb > 0, "Size(GB) must be greater than 0"); + + HttpResponse response = null; + try { + Volume newVolume = new Volume(); + newVolume.setName(name); + newVolume.setStoragePoolId(storagePoolId); + newVolume.setVolumeSizeInGb(sizeInGb); + if (Storage.ProvisioningType.FAT.equals(volumeType)) { + newVolume.setVolumeType(Volume.VolumeType.ThickProvisioned); + } else { + newVolume.setVolumeType(Volume.VolumeType.ThinProvisioned); + } + // The basic allocation granularity is 8GB. The volume size will be rounded up. + response = post("/types/Volume/instances", newVolume); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + Volume newVolumeObject = mapper.readValue(response.getEntity().getContent(), Volume.class); + return getVolume(newVolumeObject.getId()); + } catch (final IOException e) { + LOG.error("Failed to create PowerFlex volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + + @Override + public List listVolumes() { + HttpResponse response = null; + try { + response = get("/types/Volume/instances"); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + Volume[] volumes = mapper.readValue(response.getEntity().getContent(), Volume[].class); + return Arrays.asList(volumes); + } catch (final IOException e) { + LOG.error("Failed to list PowerFlex volumes due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return new ArrayList<>(); + } + + @Override + public List listSnapshotVolumes() { + List volumes = listVolumes(); + List snapshotVolumes = new ArrayList<>(); + if (volumes != null && !volumes.isEmpty()) { + for (Volume volume : volumes) { + if (volume != null && volume.getVolumeType() == Volume.VolumeType.Snapshot) { + snapshotVolumes.add(volume); + } + } + } + + return snapshotVolumes; + } + + @Override + public Volume getVolume(String volumeId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "Volume id cannot be null"); + + HttpResponse response = null; + try { + response = get("/instances/Volume::" + volumeId); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + return mapper.readValue(response.getEntity().getContent(), Volume.class); + } catch (final IOException e) { + LOG.error("Failed to get volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + + @Override + public Volume getVolumeByName(String name) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(name), "Volume name cannot be null"); + + HttpResponse response = null; + try { + Volume searchVolume = new Volume(); + searchVolume.setName(name); + response = post("/types/Volume/instances/action/queryIdByKey", searchVolume); + checkResponseOK(response); + String volumeId = EntityUtils.toString(response.getEntity()); + if (!Strings.isNullOrEmpty(volumeId)) { + return getVolume(volumeId.replace("\"", "")); + } + } catch (final IOException e) { + LOG.error("Failed to get volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + + @Override + public Volume resizeVolume(final String volumeId, final Integer sizeInGB) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "Volume id cannot be null"); + Preconditions.checkArgument(sizeInGB != null && (sizeInGB > 0 && sizeInGB % 8 == 0), + "Size(GB) must be greater than 0 and in granularity of 8"); + + HttpResponse response = null; + try { + // Volume capacity can only be increased. sizeInGB must be a positive number in granularity of 8 GB. + response = post( + "/instances/Volume::" + volumeId + "/action/setVolumeSize", + String.format("{\"sizeInGB\":\"%s\"}", sizeInGB.toString())); + checkResponseOK(response); + return getVolume(volumeId); + } catch (final IOException e) { + LOG.error("Failed to resize PowerFlex volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + + @Override + public Volume cloneVolume(final String sourceVolumeId, final String destVolumeName) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(sourceVolumeId), "Source volume id cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(destVolumeName), "Dest volume name cannot be null"); + + Map snapshotMap = new HashMap<>(); + snapshotMap.put(sourceVolumeId, destVolumeName); + takeSnapshot(snapshotMap); + return getVolumeByName(destVolumeName); + } + + @Override + public SnapshotGroup takeSnapshot(final Map srcVolumeDestSnapshotMap) { + Preconditions.checkArgument(srcVolumeDestSnapshotMap != null && !srcVolumeDestSnapshotMap.isEmpty(), "srcVolumeDestSnapshotMap cannot be null"); + + HttpResponse response = null; + try { + final List defs = new ArrayList<>(); + for (final String volumeId : srcVolumeDestSnapshotMap.keySet()) { + final SnapshotDef snapshotDef = new SnapshotDef(); + snapshotDef.setVolumeId(volumeId); + String snapshotName = srcVolumeDestSnapshotMap.get(volumeId); + if (!Strings.isNullOrEmpty(snapshotName)) { + snapshotDef.setSnapshotName(srcVolumeDestSnapshotMap.get(volumeId)); + } + defs.add(snapshotDef); + } + final SnapshotDefs snapshotDefs = new SnapshotDefs(); + snapshotDefs.setSnapshotDefs(defs.toArray(new SnapshotDef[0])); + response = post("/instances/System/action/snapshotVolumes", snapshotDefs); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + return mapper.readValue(response.getEntity().getContent(), SnapshotGroup.class); + } catch (final IOException e) { + LOG.error("Failed to take snapshot due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + + @Override + public boolean revertSnapshot(final String systemId, final Map srcSnapshotDestVolumeMap) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(systemId), "System id cannot be null"); + Preconditions.checkArgument(srcSnapshotDestVolumeMap != null && !srcSnapshotDestVolumeMap.isEmpty(), "srcSnapshotDestVolumeMap cannot be null"); + + // Take group snapshot (needs additional storage pool capacity till revert operation) to keep the last state of all volumes ??? + // and delete the group snapshot after revert operation + // If revert snapshot failed for any volume, use the group snapshot, to revert volumes to last state + Map srcVolumeDestSnapshotMap = new HashMap<>(); + List originalVolumeIds = new ArrayList<>(); + for (final String sourceSnapshotVolumeId : srcSnapshotDestVolumeMap.keySet()) { + String destVolumeId = srcSnapshotDestVolumeMap.get(sourceSnapshotVolumeId); + srcVolumeDestSnapshotMap.put(destVolumeId, ""); + originalVolumeIds.add(destVolumeId); + } + SnapshotGroup snapshotGroup = takeSnapshot(srcVolumeDestSnapshotMap); + if (snapshotGroup == null) { + throw new CloudRuntimeException("Failed to snapshot the last vm state"); + } + + boolean revertSnapshotResult = true; + int revertStatusIndex = -1; + + try { + // non-atomic operation, try revert each volume + for (final String sourceSnapshotVolumeId : srcSnapshotDestVolumeMap.keySet()) { + String destVolumeId = srcSnapshotDestVolumeMap.get(sourceSnapshotVolumeId); + boolean revertStatus = revertSnapshot(sourceSnapshotVolumeId, destVolumeId); + if (!revertStatus) { + revertSnapshotResult = false; + LOG.warn("Failed to revert snapshot for volume id: " + sourceSnapshotVolumeId); + throw new CloudRuntimeException("Failed to revert snapshot for volume id: " + sourceSnapshotVolumeId); + } else { + revertStatusIndex++; + } + } + } catch (final Exception e) { + LOG.error("Failed to revert vm snapshot due to: " + e.getMessage(), e); + throw new CloudRuntimeException("Failed to revert vm snapshot due to: " + e.getMessage()); + } finally { + if (!revertSnapshotResult) { + //revert to volume with last state and delete the snapshot group, for already reverted volumes + List volumesWithLastState = snapshotGroup.getVolumeIds(); + for (int index = revertStatusIndex; index >= 0; index--) { + // Handling failure for revert again will become recursive ??? + revertSnapshot(volumesWithLastState.get(index), originalVolumeIds.get(index)); + } + } + deleteSnapshotGroup(systemId, snapshotGroup.getSnapshotGroupId()); + } + + return revertSnapshotResult; + } + + @Override + public int deleteSnapshotGroup(final String systemId, final String snapshotGroupId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(systemId), "System id cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(snapshotGroupId), "Snapshot group id cannot be null"); + + HttpResponse response = null; + try { + response = post( + "/instances/System::" + systemId + "/action/removeConsistencyGroupSnapshots", + String.format("{\"snapGroupId\":\"%s\"}", snapshotGroupId)); + checkResponseOK(response); + JsonNode node = new ObjectMapper().readTree(response.getEntity().getContent()); + JsonNode noOfVolumesNode = node.get("numberOfVolumes"); + return noOfVolumesNode.asInt(); + } catch (final IOException e) { + LOG.error("Failed to delete PowerFlex snapshot group due to: " + e.getMessage(), e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return -1; + } + + @Override + public Volume takeSnapshot(final String volumeId, final String snapshotVolumeName) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "Volume id cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(snapshotVolumeName), "Snapshot name cannot be null"); + + HttpResponse response = null; + try { + final SnapshotDef[] snapshotDef = new SnapshotDef[1]; + snapshotDef[0] = new SnapshotDef(); + snapshotDef[0].setVolumeId(volumeId); + snapshotDef[0].setSnapshotName(snapshotVolumeName); + final SnapshotDefs snapshotDefs = new SnapshotDefs(); + snapshotDefs.setSnapshotDefs(snapshotDef); + + response = post("/instances/System/action/snapshotVolumes", snapshotDefs); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + SnapshotGroup snapshotGroup = mapper.readValue(response.getEntity().getContent(), SnapshotGroup.class); + if (snapshotGroup != null) { + List volumeIds = snapshotGroup.getVolumeIds(); + if (volumeIds != null && !volumeIds.isEmpty()) { + return getVolume(volumeIds.get(0)); + } + } + } catch (final IOException e) { + LOG.error("Failed to take snapshot due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + + @Override + public boolean revertSnapshot(final String sourceSnapshotVolumeId, final String destVolumeId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(sourceSnapshotVolumeId), "Source snapshot volume id cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(destVolumeId), "Destination volume id cannot be null"); + + HttpResponse response = null; + try { + Volume sourceSnapshotVolume = getVolume(sourceSnapshotVolumeId); + if (sourceSnapshotVolume == null) { + throw new CloudRuntimeException("Source snapshot volume: " + sourceSnapshotVolumeId + " doesn't exists"); + } + + Volume destVolume = getVolume(destVolumeId); + if (sourceSnapshotVolume == null) { + throw new CloudRuntimeException("Destination volume: " + destVolumeId + " doesn't exists"); + } + + if (!sourceSnapshotVolume.getVtreeId().equals(destVolume.getVtreeId())) { + throw new CloudRuntimeException("Unable to revert, source snapshot volume and destination volume doesn't belong to same volume tree"); + } + + response = post( + "/instances/Volume::" + destVolumeId + "/action/overwriteVolumeContent", + String.format("{\"srcVolumeId\":\"%s\",\"allowOnExtManagedVol\":\"TRUE\"}", sourceSnapshotVolumeId)); + checkResponseOK(response); + return true; + } catch (final IOException e) { + LOG.error("Failed to map PowerFlex volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return false; + } + + @Override + public boolean mapVolumeToSdc(final String volumeId, final String sdcId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "Volume id cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(sdcId), "Sdc Id cannot be null"); + + HttpResponse response = null; + try { + if (isVolumeMappedToSdc(volumeId, sdcId)) { + return true; + } + + response = post( + "/instances/Volume::" + volumeId + "/action/addMappedSdc", + String.format("{\"sdcId\":\"%s\",\"allowMultipleMappings\":\"TRUE\"}", sdcId)); + checkResponseOK(response); + return true; + } catch (final IOException e) { + LOG.error("Failed to map PowerFlex volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return false; + } + + @Override + public boolean mapVolumeToSdcWithLimits(final String volumeId, final String sdcId, final Long iopsLimit, final Long bandwidthLimitInKbps) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "Volume id cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(sdcId), "Sdc Id cannot be null"); + Preconditions.checkArgument(iopsLimit != null && (iopsLimit == 0 || iopsLimit > 10), + "IOPS limit must be 0 (unlimited) or greater than 10"); + Preconditions.checkArgument(bandwidthLimitInKbps != null && (bandwidthLimitInKbps == 0 || (bandwidthLimitInKbps > 0 && bandwidthLimitInKbps % 1024 == 0)), + "Bandwidth limit(Kbps) must be 0 (unlimited) or in granularity of 1024"); + + HttpResponse response = null; + try { + if (mapVolumeToSdc(volumeId, sdcId)) { + long iopsLimitVal = 0; + if (iopsLimit != null && iopsLimit.longValue() > 0) { + iopsLimitVal = iopsLimit.longValue(); + } + + long bandwidthLimitInKbpsVal = 0; + if (bandwidthLimitInKbps != null && bandwidthLimitInKbps.longValue() > 0) { + bandwidthLimitInKbpsVal = bandwidthLimitInKbps.longValue(); + } + + response = post( + "/instances/Volume::" + volumeId + "/action/setMappedSdcLimits", + String.format("{\"sdcId\":\"%s\",\"bandwidthLimitInKbps\":\"%d\",\"iopsLimit\":\"%d\"}", sdcId, bandwidthLimitInKbpsVal, iopsLimitVal)); + checkResponseOK(response); + return true; + } + } catch (final IOException e) { + LOG.error("Failed to map PowerFlex volume with limits due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return false; + } + + @Override + public boolean unmapVolumeFromSdc(final String volumeId, final String sdcId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "Volume id cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(sdcId), "Sdc Id cannot be null"); + + HttpResponse response = null; + try { + if (isVolumeMappedToSdc(volumeId, sdcId)) { + response = post( + "/instances/Volume::" + volumeId + "/action/removeMappedSdc", + String.format("{\"sdcId\":\"%s\",\"skipApplianceValidation\":\"TRUE\"}", sdcId)); + checkResponseOK(response); + return true; + } + } catch (final IOException e) { + LOG.error("Failed to unmap PowerFlex volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return false; + } + + @Override + public boolean unmapVolumeFromAllSdcs(final String volumeId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "Volume id cannot be null"); + + HttpResponse response = null; + try { + Volume volume = getVolume(volumeId); + if (volume == null) { + return false; + } + + List mappedSdcList = volume.getMappedSdcList(); + if (mappedSdcList == null || mappedSdcList.isEmpty()) { + return true; + } + + response = post( + "/instances/Volume::" + volumeId + "/action/removeMappedSdc", + "{\"allSdcs\": \"\"}"); + checkResponseOK(response); + return true; + } catch (final IOException e) { + LOG.error("Failed to unmap PowerFlex volume from all SDCs due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return false; + } + + @Override + public boolean isVolumeMappedToSdc(final String volumeId, final String sdcId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "Volume id cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(sdcId), "Sdc Id cannot be null"); + + if (Strings.isNullOrEmpty(volumeId) || Strings.isNullOrEmpty(sdcId)) { + return false; + } + + Volume volume = getVolume(volumeId); + if (volume == null) { + return false; + } + + List mappedSdcList = volume.getMappedSdcList(); + if (mappedSdcList != null && !mappedSdcList.isEmpty()) { + for (SdcMappingInfo mappedSdc : mappedSdcList) { + if (sdcId.equalsIgnoreCase(mappedSdc.getSdcId())) { + return true; + } + } + } + + return false; + } + + @Override + public boolean deleteVolume(final String volumeId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "Volume id cannot be null"); + + HttpResponse response = null; + try { + try { + unmapVolumeFromAllSdcs(volumeId); + } catch (Exception ignored) {} + response = post( + "/instances/Volume::" + volumeId + "/action/removeVolume", + "{\"removeMode\":\"ONLY_ME\"}"); + checkResponseOK(response); + return true; + } catch (final IOException e) { + LOG.error("Failed to delete PowerFlex volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return false; + } + + /////////////////////////////////////////////////////// + //////////////// StoragePool APIs ///////////////////// + /////////////////////////////////////////////////////// + + @Override + public List listStoragePools() { + HttpResponse response = null; + try { + response = get("/types/StoragePool/instances"); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + StoragePool[] pools = mapper.readValue(response.getEntity().getContent(), StoragePool[].class); + return Arrays.asList(pools); + } catch (final IOException e) { + LOG.error("Failed to list PowerFlex storage pools due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return new ArrayList<>(); + } + + @Override + public StoragePool getStoragePool(String poolId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(poolId), "Storage pool id cannot be null"); + + HttpResponse response = null; + try { + response = get("/instances/StoragePool::" + poolId); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + return mapper.readValue(response.getEntity().getContent(), StoragePool.class); + } catch (final IOException e) { + LOG.error("Failed to get storage pool due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + + @Override + public StoragePoolStatistics getStoragePoolStatistics(String poolId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(poolId), "Storage pool id cannot be null"); + + HttpResponse response = null; + try { + response = get("/instances/StoragePool::" + poolId + "/relationships/Statistics"); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + return mapper.readValue(response.getEntity().getContent(), StoragePoolStatistics.class); + } catch (final IOException e) { + LOG.error("Failed to get storage pool due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + + @Override + public VolumeStatistics getVolumeStatistics(String volumeId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "Volume id cannot be null"); + + HttpResponse response = null; + try { + Volume volume = getVolume(volumeId); + if (volume != null) { + String volumeTreeId = volume.getVtreeId(); + if (!Strings.isNullOrEmpty(volumeTreeId)) { + response = get("/instances/VTree::" + volumeTreeId + "/relationships/Statistics"); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + VolumeStatistics volumeStatistics = mapper.readValue(response.getEntity().getContent(), VolumeStatistics.class); + if (volumeStatistics != null) { + volumeStatistics.setAllocatedSizeInKb(volume.getSizeInKb()); + return volumeStatistics; + } + } + } + } catch (final IOException e) { + LOG.error("Failed to get volume stats due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + + return null; + } + + @Override + public String getSystemId(String protectionDomainId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(protectionDomainId), "Protection domain id cannot be null"); + + HttpResponse response = null; + try { + response = get("/instances/ProtectionDomain::" + protectionDomainId); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + ProtectionDomain protectionDomain = mapper.readValue(response.getEntity().getContent(), ProtectionDomain.class); + if (protectionDomain != null) { + return protectionDomain.getSystemId(); + } + } catch (final IOException e) { + LOG.error("Failed to get protection domain details due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + + @Override + public List listVolumesInStoragePool(String poolId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(poolId), "Storage pool id cannot be null"); + + HttpResponse response = null; + try { + response = get("/instances/StoragePool::" + poolId + "/relationships/Volume"); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + Volume[] volumes = mapper.readValue(response.getEntity().getContent(), Volume[].class); + return Arrays.asList(volumes); + } catch (final IOException e) { + LOG.error("Failed to list volumes in storage pool due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return new ArrayList<>(); + } + + /////////////////////////////////////////////// + //////////////// SDC APIs ///////////////////// + /////////////////////////////////////////////// + + @Override + public List listSdcs() { + HttpResponse response = null; + try { + response = get("/types/Sdc/instances"); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + Sdc[] sdcs = mapper.readValue(response.getEntity().getContent(), Sdc[].class); + return Arrays.asList(sdcs); + } catch (final IOException e) { + LOG.error("Failed to list SDCs due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return new ArrayList<>(); + } + + @Override + public Sdc getSdc(String sdcId) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(sdcId), "Sdc id cannot be null"); + + HttpResponse response = null; + try { + response = get("/instances/Sdc::" + sdcId); + checkResponseOK(response); + ObjectMapper mapper = new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + return mapper.readValue(response.getEntity().getContent(), Sdc.class); + } catch (final IOException e) { + LOG.error("Failed to get volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + + @Override + public Sdc getSdcByIp(String ipAddress) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(ipAddress), "IP address cannot be null"); + + HttpResponse response = null; + try { + response = post("/types/Sdc/instances/action/queryIdByKey", String.format("{\"ip\":\"%s\"}", ipAddress)); + checkResponseOK(response); + String sdcId = EntityUtils.toString(response.getEntity()); + if (!Strings.isNullOrEmpty(sdcId)) { + return getSdc(sdcId.replace("\"", "")); + } + } catch (final IOException e) { + LOG.error("Failed to get volume due to:", e); + checkResponseTimeOut(e); + } finally { + if (response != null) { + EntityUtils.consumeQuietly(response.getEntity()); + } + } + return null; + } + + @Override + public Sdc getConnectedSdcByIp(String ipAddress) { + Sdc sdc = getSdcByIp(ipAddress); + if (sdc != null && MDM_CONNECTED_STATE.equalsIgnoreCase(sdc.getMdmConnectionState())) { + return sdc; + } + + return null; + } + + @Override + public List listConnectedSdcIps() { + List sdcIps = new ArrayList<>(); + List sdcs = listSdcs(); + if(sdcs != null) { + for (Sdc sdc : sdcs) { + if (MDM_CONNECTED_STATE.equalsIgnoreCase(sdc.getMdmConnectionState())) { + sdcIps.add(sdc.getSdcIp()); + } + } + } + + return sdcIps; + } + + @Override + public boolean isSdcConnected(String ipAddress) { + Preconditions.checkArgument(!Strings.isNullOrEmpty(ipAddress), "IP address cannot be null"); + + List sdcs = listSdcs(); + if(sdcs != null) { + for (Sdc sdc : sdcs) { + if (ipAddress.equalsIgnoreCase(sdc.getSdcIp()) && MDM_CONNECTED_STATE.equalsIgnoreCase(sdc.getMdmConnectionState())) { + return true; + } + } + } + + return false; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java new file mode 100644 index 000000000000..2d3e33b0d97b --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java @@ -0,0 +1,753 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.driver; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.RemoteHostEndPoint; +import org.apache.cloudstack.storage.command.CommandResult; +import org.apache.cloudstack.storage.command.CopyCommand; +import org.apache.cloudstack.storage.command.CreateObjectAnswer; +import org.apache.cloudstack.storage.datastore.api.Sdc; +import org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics; +import org.apache.cloudstack.storage.datastore.api.VolumeStatistics; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; +import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.log4j.Logger; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.to.DataObjectType; +import com.cloud.agent.api.to.DataStoreTO; +import com.cloud.agent.api.to.DataTO; +import com.cloud.alert.AlertManager; +import com.cloud.host.Host; +import com.cloud.server.ManagementServerImpl; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.ResizeVolumePayload; +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeDetailVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeDetailsDao; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.VirtualMachineManager; +import com.google.common.base.Preconditions; +import com.google.common.base.Strings; + +public class ScaleIOPrimaryDataStoreDriver implements PrimaryDataStoreDriver { + private static final Logger LOGGER = Logger.getLogger(ScaleIOPrimaryDataStoreDriver.class); + + @Inject + EndPointSelector selector; + @Inject + private PrimaryDataStoreDao storagePoolDao; + @Inject + private StoragePoolDetailsDao storagePoolDetailsDao; + @Inject + private VolumeDao volumeDao; + @Inject + private VolumeDetailsDao volumeDetailsDao; + @Inject + private VMTemplatePoolDao vmTemplatePoolDao; + @Inject + private SnapshotDataStoreDao snapshotDataStoreDao; + @Inject + private AlertManager alertMgr; + + public ScaleIOPrimaryDataStoreDriver() { + + } + + private ScaleIOGatewayClient getScaleIOClient(final Long storagePoolId) throws Exception { + final int clientTimeout = StorageManager.STORAGE_POOL_CLIENT_TIMEOUT.valueIn(storagePoolId); + final String url = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.GATEWAY_API_ENDPOINT).getValue(); + final String username = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.GATEWAY_API_USERNAME).getValue(); + final String password = storagePoolDetailsDao.findDetail(storagePoolId, ScaleIOGatewayClient.GATEWAY_API_PASSWORD).getValue(); + return ScaleIOGatewayClient.getClient(url, username, password, false, clientTimeout); + } + + @Override + public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) { + try { + if (DataObjectType.VOLUME.equals(dataObject.getType())) { + final VolumeVO volume = volumeDao.findById(dataObject.getId()); + LOGGER.debug("Granting access for PowerFlex volume: " + volume.getPath()); + + Long bandwidthLimitInKbps = Long.valueOf(0); // Unlimited + // Check Bandwidht Limit parameter in volume details + final VolumeDetailVO bandwidthVolumeDetail = volumeDetailsDao.findDetail(volume.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS); + if (bandwidthVolumeDetail != null && bandwidthVolumeDetail.getValue() != null) { + bandwidthLimitInKbps = Long.parseLong(bandwidthVolumeDetail.getValue()) * 1024; + } + + Long iopsLimit = Long.valueOf(0); // Unlimited + // Check IOPS Limit parameter in volume details, else try MaxIOPS + final VolumeDetailVO iopsVolumeDetail = volumeDetailsDao.findDetail(volume.getId(), Volume.IOPS_LIMIT); + if (iopsVolumeDetail != null && iopsVolumeDetail.getValue() != null) { + iopsLimit = Long.parseLong(iopsVolumeDetail.getValue()); + } else if (volume.getMaxIops() != null) { + iopsLimit = volume.getMaxIops(); + } + if (iopsLimit > 0 && iopsLimit < ScaleIOUtil.MINIMUM_ALLOWED_IOPS_LIMIT) { + iopsLimit = ScaleIOUtil.MINIMUM_ALLOWED_IOPS_LIMIT; + } + + final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + final Sdc sdc = client.getConnectedSdcByIp(host.getPrivateIpAddress()); + if (sdc == null) { + alertHostSdcDisconnection(host); + throw new CloudRuntimeException("Unable to grant access to volume: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress()); + } + + return client.mapVolumeToSdcWithLimits(volume.getPath(), sdc.getId(), iopsLimit, bandwidthLimitInKbps); + } else if (DataObjectType.TEMPLATE.equals(dataObject.getType())) { + final VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(dataStore.getId(), dataObject.getId()); + LOGGER.debug("Granting access for PowerFlex template volume: " + templatePoolRef.getInstallPath()); + + final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + final Sdc sdc = client.getConnectedSdcByIp(host.getPrivateIpAddress()); + if (sdc == null) { + alertHostSdcDisconnection(host); + throw new CloudRuntimeException("Unable to grant access to template: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress()); + } + + return client.mapVolumeToSdc(templatePoolRef.getInstallPath(), sdc.getId()); + } + + return false; + } catch (Exception e) { + throw new CloudRuntimeException(e); + } + } + + @Override + public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) { + try { + if (DataObjectType.VOLUME.equals(dataObject.getType())) { + final VolumeVO volume = volumeDao.findById(dataObject.getId()); + LOGGER.debug("Revoking access for PowerFlex volume: " + volume.getPath()); + + final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + final Sdc sdc = client.getConnectedSdcByIp(host.getPrivateIpAddress()); + if (sdc == null) { + throw new CloudRuntimeException("Unable to revoke access for volume: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress()); + } + + client.unmapVolumeFromSdc(volume.getPath(), sdc.getId()); + } else if (DataObjectType.TEMPLATE.equals(dataObject.getType())) { + final VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(dataStore.getId(), dataObject.getId()); + LOGGER.debug("Revoking access for PowerFlex template volume: " + templatePoolRef.getInstallPath()); + + final ScaleIOGatewayClient client = getScaleIOClient(dataStore.getId()); + final Sdc sdc = client.getConnectedSdcByIp(host.getPrivateIpAddress()); + if (sdc == null) { + throw new CloudRuntimeException("Unable to revoke access for template: " + dataObject.getId() + ", no Sdc connected with host ip: " + host.getPrivateIpAddress()); + } + + client.unmapVolumeFromSdc(templatePoolRef.getInstallPath(), sdc.getId()); + } + } catch (Exception e) { + LOGGER.warn("Failed to revoke access due to: " + e.getMessage(), e); + } + } + + @Override + public long getUsedBytes(StoragePool storagePool) { + long usedSpaceBytes = 0; + // Volumes + List volumes = volumeDao.findByPoolIdAndState(storagePool.getId(), Volume.State.Ready); + if (volumes != null) { + for (VolumeVO volume : volumes) { + usedSpaceBytes += volume.getSize(); + + long vmSnapshotChainSize = volume.getVmSnapshotChainSize() == null ? 0 : volume.getVmSnapshotChainSize(); + usedSpaceBytes += vmSnapshotChainSize; + } + } + + //Snapshots + List snapshots = snapshotDataStoreDao.listByStoreIdAndState(storagePool.getId(), ObjectInDataStoreStateMachine.State.Ready); + if (snapshots != null) { + for (SnapshotDataStoreVO snapshot : snapshots) { + usedSpaceBytes += snapshot.getSize(); + } + } + + // Templates + List templates = vmTemplatePoolDao.listByPoolIdAndState(storagePool.getId(), ObjectInDataStoreStateMachine.State.Ready); + if (templates != null) { + for (VMTemplateStoragePoolVO template : templates) { + usedSpaceBytes += template.getTemplateSize(); + } + } + + LOGGER.debug("Used/Allocated storage space (in bytes): " + String.valueOf(usedSpaceBytes)); + + return usedSpaceBytes; + } + + @Override + public long getUsedIops(StoragePool storagePool) { + return 0; + } + + @Override + public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataObject, StoragePool pool) { + return ((dataObject != null && dataObject.getSize() != null) ? dataObject.getSize() : 0); + } + + @Override + public long getBytesRequiredForTemplate(TemplateInfo templateInfo, StoragePool storagePool) { + if (templateInfo == null || storagePool == null) { + return 0; + } + + VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(storagePool.getId(), templateInfo.getId()); + if (templatePoolRef != null) { + // Template exists on this primary storage, do not require additional space + return 0; + } + + return getDataObjectSizeIncludingHypervisorSnapshotReserve(templateInfo, storagePool); + } + + @Override + public Map getCapabilities() { + Map mapCapabilities = new HashMap<>(); + mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString()); + mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString()); + mapCapabilities.put(DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString(), Boolean.TRUE.toString()); + mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.TRUE.toString()); + return mapCapabilities; + } + + @Override + public ChapInfo getChapInfo(DataObject dataObject) { + return null; + } + + @Override + public DataTO getTO(DataObject data) { + return null; + } + + @Override + public DataStoreTO getStoreTO(DataStore store) { + return null; + } + + @Override + public void takeSnapshot(SnapshotInfo snapshotInfo, AsyncCompletionCallback callback) { + LOGGER.debug("Taking PowerFlex volume snapshot"); + + Preconditions.checkArgument(snapshotInfo != null, "snapshotInfo cannot be null"); + + VolumeInfo volumeInfo = snapshotInfo.getBaseVolume(); + Preconditions.checkArgument(volumeInfo != null, "volumeInfo cannot be null"); + + VolumeVO volumeVO = volumeDao.findById(volumeInfo.getId()); + + long storagePoolId = volumeVO.getPoolId(); + Preconditions.checkArgument(storagePoolId > 0, "storagePoolId should be > 0"); + + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + Preconditions.checkArgument(storagePool != null && storagePool.getHostAddress() != null, "storagePool and host address should not be null"); + + CreateCmdResult result; + + try { + SnapshotObjectTO snapshotObjectTo = (SnapshotObjectTO)snapshotInfo.getTO(); + + final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + final String scaleIOVolumeId = volumeVO.getPath(); + String snapshotName = String.format("%s-%s-%s-%s", ScaleIOUtil.SNAPSHOT_PREFIX, snapshotInfo.getId(), + storagePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value()); + + org.apache.cloudstack.storage.datastore.api.Volume scaleIOVolume = null; + scaleIOVolume = client.takeSnapshot(scaleIOVolumeId, snapshotName); + + if (scaleIOVolume == null) { + throw new CloudRuntimeException("Failed to take snapshot on PowerFlex cluster"); + } + + snapshotObjectTo.setPath(scaleIOVolume.getId()); + CreateObjectAnswer createObjectAnswer = new CreateObjectAnswer(snapshotObjectTo); + result = new CreateCmdResult(null, createObjectAnswer); + result.setResult(null); + } catch (Exception e) { + String errMsg = "Unable to take PowerFlex volume snapshot for volume: " + volumeInfo.getId() + " due to " + e.getMessage(); + LOGGER.warn(errMsg); + result = new CreateCmdResult(null, new CreateObjectAnswer(e.toString())); + result.setResult(e.toString()); + } + + callback.complete(result); + } + + @Override + public void revertSnapshot(SnapshotInfo snapshot, SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback callback) { + LOGGER.debug("Reverting to PowerFlex volume snapshot"); + + Preconditions.checkArgument(snapshot != null, "snapshotInfo cannot be null"); + + VolumeInfo volumeInfo = snapshot.getBaseVolume(); + Preconditions.checkArgument(volumeInfo != null, "volumeInfo cannot be null"); + + VolumeVO volumeVO = volumeDao.findById(volumeInfo.getId()); + + try { + if (volumeVO == null || volumeVO.getRemoved() != null) { + String errMsg = "The volume that the snapshot belongs to no longer exists."; + CommandResult commandResult = new CommandResult(); + commandResult.setResult(errMsg); + callback.complete(commandResult); + return; + } + + long storagePoolId = volumeVO.getPoolId(); + final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + String snapshotVolumeId = snapshot.getPath(); + final String destVolumeId = volumeVO.getPath(); + client.revertSnapshot(snapshotVolumeId, destVolumeId); + + CommandResult commandResult = new CommandResult(); + callback.complete(commandResult); + } catch (Exception ex) { + LOGGER.debug("Unable to revert to PowerFlex snapshot: " + snapshot.getId(), ex); + throw new CloudRuntimeException(ex.getMessage()); + } + } + + private String createVolume(VolumeInfo volumeInfo, long storagePoolId) { + LOGGER.debug("Creating PowerFlex volume"); + + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + + Preconditions.checkArgument(volumeInfo != null, "volumeInfo cannot be null"); + Preconditions.checkArgument(storagePoolId > 0, "storagePoolId should be > 0"); + Preconditions.checkArgument(storagePool != null && storagePool.getHostAddress() != null, "storagePool and host address should not be null"); + + try { + final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + final String scaleIOStoragePoolId = storagePool.getPath(); + final Long sizeInBytes = volumeInfo.getSize(); + final long sizeInGb = (long) Math.ceil(sizeInBytes / (1024.0 * 1024.0 * 1024.0)); + final String scaleIOVolumeName = String.format("%s-%s-%s-%s", ScaleIOUtil.VOLUME_PREFIX, volumeInfo.getId(), + storagePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value()); + + org.apache.cloudstack.storage.datastore.api.Volume scaleIOVolume = null; + scaleIOVolume = client.createVolume(scaleIOVolumeName, scaleIOStoragePoolId, (int) sizeInGb, volumeInfo.getProvisioningType()); + + if (scaleIOVolume == null) { + throw new CloudRuntimeException("Failed to create volume on PowerFlex cluster"); + } + + VolumeVO volume = volumeDao.findById(volumeInfo.getId()); + volume.set_iScsiName(scaleIOVolume.getId()); + volume.setPath(scaleIOVolume.getId()); + volume.setFolder(scaleIOVolume.getVtreeId()); + volume.setSize(scaleIOVolume.getSizeInKb() * 1024); + volume.setPoolType(Storage.StoragePoolType.PowerFlex); + volume.setFormat(Storage.ImageFormat.RAW); + volume.setPoolId(storagePoolId); + volumeDao.update(volume.getId(), volume); + + long capacityBytes = storagePool.getCapacityBytes(); + long usedBytes = storagePool.getUsedBytes(); + usedBytes += volume.getSize(); + storagePool.setUsedBytes(usedBytes > capacityBytes ? capacityBytes : usedBytes); + storagePoolDao.update(storagePoolId, storagePool); + + return volume.getPath(); + } catch (Exception e) { + String errMsg = "Unable to create PowerFlex Volume due to " + e.getMessage(); + LOGGER.warn(errMsg); + throw new CloudRuntimeException(errMsg, e); + } + } + + private String createTemplateVolume(TemplateInfo templateInfo, long storagePoolId) { + LOGGER.debug("Creating PowerFlex template volume"); + + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + Preconditions.checkArgument(templateInfo != null, "templateInfo cannot be null"); + Preconditions.checkArgument(storagePoolId > 0, "storagePoolId should be > 0"); + Preconditions.checkArgument(storagePool != null && storagePool.getHostAddress() != null, "storagePool and host address should not be null"); + + try { + final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + final String scaleIOStoragePoolId = storagePool.getPath(); + final Long sizeInBytes = templateInfo.getSize(); + final long sizeInGb = (long) Math.ceil(sizeInBytes / (1024.0 * 1024.0 * 1024.0)); + final String scaleIOVolumeName = String.format("%s-%s-%s-%s", ScaleIOUtil.TEMPLATE_PREFIX, templateInfo.getId(), + storagePool.getUuid().split("-")[0].substring(4), ManagementServerImpl.customCsIdentifier.value()); + + org.apache.cloudstack.storage.datastore.api.Volume scaleIOVolume = null; + scaleIOVolume = client.createVolume(scaleIOVolumeName, scaleIOStoragePoolId, (int) sizeInGb, Storage.ProvisioningType.THIN); + + if (scaleIOVolume == null) { + throw new CloudRuntimeException("Failed to create template volume on PowerFlex cluster"); + } + + VMTemplateStoragePoolVO templatePoolRef = vmTemplatePoolDao.findByPoolTemplate(storagePoolId, templateInfo.getId()); + templatePoolRef.setInstallPath(scaleIOVolume.getId()); + templatePoolRef.setLocalDownloadPath(scaleIOVolume.getId()); + templatePoolRef.setTemplateSize(scaleIOVolume.getSizeInKb() * 1024); + vmTemplatePoolDao.update(templatePoolRef.getId(), templatePoolRef); + + long capacityBytes = storagePool.getCapacityBytes(); + long usedBytes = storagePool.getUsedBytes(); + usedBytes += templatePoolRef.getTemplateSize(); + storagePool.setUsedBytes(usedBytes > capacityBytes ? capacityBytes : usedBytes); + storagePoolDao.update(storagePoolId, storagePool); + + return scaleIOVolume.getId(); + } catch (Exception e) { + String errMsg = "Unable to create PowerFlex template volume due to " + e.getMessage(); + LOGGER.warn(errMsg); + throw new CloudRuntimeException(errMsg, e); + } + } + + @Override + public void createAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) { + String scaleIOVolId = null; + String errMsg = null; + try { + if (dataObject.getType() == DataObjectType.VOLUME) { + LOGGER.debug("createAsync - creating volume"); + scaleIOVolId = createVolume((VolumeInfo) dataObject, dataStore.getId()); + } else if (dataObject.getType() == DataObjectType.TEMPLATE) { + LOGGER.debug("createAsync - creating template"); + scaleIOVolId = createTemplateVolume((TemplateInfo)dataObject, dataStore.getId()); + } else { + errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync"; + LOGGER.error(errMsg); + } + } catch (Exception ex) { + errMsg = ex.getMessage(); + LOGGER.error(errMsg); + if (callback == null) { + throw ex; + } + } + + if (callback != null) { + CreateCmdResult result = new CreateCmdResult(scaleIOVolId, new Answer(null, errMsg == null, errMsg)); + result.setResult(errMsg); + callback.complete(result); + } + } + + @Override + public void deleteAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) { + Preconditions.checkArgument(dataObject != null, "dataObject cannot be null"); + + long storagePoolId = dataStore.getId(); + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + Preconditions.checkArgument(storagePoolId > 0, "storagePoolId should be > 0"); + Preconditions.checkArgument(storagePool != null && storagePool.getHostAddress() != null, "storagePool and host address should not be null"); + + String errMsg = null; + String scaleIOVolumeId = null; + try { + boolean deleteResult = false; + if (dataObject.getType() == DataObjectType.VOLUME) { + LOGGER.debug("deleteAsync - deleting volume"); + scaleIOVolumeId = ((VolumeInfo) dataObject).getPath(); + } else if (dataObject.getType() == DataObjectType.SNAPSHOT) { + LOGGER.debug("deleteAsync - deleting snapshot"); + scaleIOVolumeId = ((SnapshotInfo) dataObject).getPath(); + } else if (dataObject.getType() == DataObjectType.TEMPLATE) { + LOGGER.debug("deleteAsync - deleting template"); + scaleIOVolumeId = ((TemplateInfo) dataObject).getInstallPath(); + } else { + errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to deleteAsync"; + } + + try { + final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + deleteResult = client.deleteVolume(scaleIOVolumeId); + if (!deleteResult) { + errMsg = "Failed to delete PowerFlex volume with id: " + scaleIOVolumeId; + } + + long usedBytes = storagePool.getUsedBytes(); + usedBytes -= dataObject.getSize(); + storagePool.setUsedBytes(usedBytes < 0 ? 0 : usedBytes); + storagePoolDao.update(storagePoolId, storagePool); + } catch (Exception e) { + errMsg = "Unable to delete PowerFlex volume: " + scaleIOVolumeId + " due to " + e.getMessage(); + LOGGER.warn(errMsg); + throw new CloudRuntimeException(errMsg, e); + } + } catch (Exception ex) { + errMsg = ex.getMessage(); + LOGGER.error(errMsg); + if (callback == null) { + throw ex; + } + } + + if (callback != null) { + CommandResult result = new CommandResult(); + result.setResult(errMsg); + callback.complete(result); + } + } + + @Override + public void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCallback callback) { + copyAsync(srcData, destData, null, callback); + } + + @Override + public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback) { + DataStore srcStore = destData.getDataStore(); + DataStore destStore = destData.getDataStore(); + if (srcStore.getRole() == DataStoreRole.Primary && srcData.getType() == DataObjectType.TEMPLATE + && (destStore.getRole() == DataStoreRole.Primary && destData.getType() == DataObjectType.VOLUME)) { + int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); + Answer answer = null; + String errMsg = null; + + try { + LOGGER.debug("Initiating copy from PowerFlex template volume on host " + destHost != null ? destHost.getId() : ""); + CopyCommand cmd = new CopyCommand(srcData.getTO(), destData.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); + + EndPoint ep = destHost != null ? RemoteHostEndPoint.getHypervisorHostEndPoint(destHost) : selector.select(srcData.getDataStore()); + if (ep == null) { + errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; + LOGGER.error(errMsg); + answer = new Answer(cmd, false, errMsg); + } else { + answer = ep.sendMessage(cmd); + } + + if (answer != null && !answer.getResult()) { + errMsg = answer.getDetails(); + } + } catch (Exception e) { + LOGGER.debug("Failed to copy due to ", e); + errMsg = e.toString(); + } + + CopyCommandResult result = new CopyCommandResult(null, answer); + result.setResult(errMsg); + callback.complete(result); + } + } + + @Override + public boolean canCopy(DataObject srcData, DataObject destData) { + DataStore srcStore = destData.getDataStore(); + DataStore destStore = destData.getDataStore(); + if (srcStore.getRole() == DataStoreRole.Primary && srcData.getType() == DataObjectType.TEMPLATE + && (destStore.getRole() == DataStoreRole.Primary && destData.getType() == DataObjectType.VOLUME)) { + StoragePoolVO srcPoolVO = storagePoolDao.findById(srcStore.getId()); + StoragePoolVO destPoolVO = storagePoolDao.findById(destStore.getId()); + if (srcPoolVO != null && srcPoolVO.getPoolType() == Storage.StoragePoolType.PowerFlex + && destPoolVO != null && destPoolVO.getPoolType() == Storage.StoragePoolType.PowerFlex) { + return true; + } + } + return false; + } + + private void resizeVolume(VolumeInfo volumeInfo) { + LOGGER.debug("Resizing PowerFlex volume"); + + Preconditions.checkArgument(volumeInfo != null, "volumeInfo cannot be null"); + + try { + String scaleIOVolumeId = volumeInfo.getPath(); + Long storagePoolId = volumeInfo.getPoolId(); + + ResizeVolumePayload payload = (ResizeVolumePayload)volumeInfo.getpayload(); + long newSizeInBytes = payload.newSize != null ? payload.newSize : volumeInfo.getSize(); + // Only increase size is allowed and size should be specified in granularity of 8 GB + if (newSizeInBytes <= volumeInfo.getSize()) { + throw new CloudRuntimeException("Only increase size is allowed for volume: " + volumeInfo.getName()); + } + + org.apache.cloudstack.storage.datastore.api.Volume scaleIOVolume = null; + long newSizeInGB = newSizeInBytes / (1024 * 1024 * 1024); + long newSizeIn8gbBoundary = (long) (Math.ceil(newSizeInGB / 8.0) * 8.0); + final ScaleIOGatewayClient client = getScaleIOClient(storagePoolId); + scaleIOVolume = client.resizeVolume(scaleIOVolumeId, (int) newSizeIn8gbBoundary); + if (scaleIOVolume == null) { + throw new CloudRuntimeException("Failed to resize volume: " + volumeInfo.getName()); + } + + VolumeVO volume = volumeDao.findById(volumeInfo.getId()); + long oldVolumeSize = volume.getSize(); + volume.setSize(scaleIOVolume.getSizeInKb() * 1024); + volumeDao.update(volume.getId(), volume); + + StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); + long capacityBytes = storagePool.getCapacityBytes(); + long usedBytes = storagePool.getUsedBytes(); + + long newVolumeSize = volume.getSize(); + usedBytes += newVolumeSize - oldVolumeSize; + storagePool.setUsedBytes(usedBytes > capacityBytes ? capacityBytes : usedBytes); + storagePoolDao.update(storagePoolId, storagePool); + } catch (Exception e) { + String errMsg = "Unable to resize PowerFlex volume: " + volumeInfo.getId() + " due to " + e.getMessage(); + LOGGER.warn(errMsg); + throw new CloudRuntimeException(errMsg, e); + } + } + + @Override + public void resize(DataObject dataObject, AsyncCompletionCallback callback) { + String scaleIOVolumeId = null; + String errMsg = null; + try { + if (dataObject.getType() == DataObjectType.VOLUME) { + scaleIOVolumeId = ((VolumeInfo) dataObject).getPath(); + resizeVolume((VolumeInfo) dataObject); + } else { + errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to resize"; + } + } catch (Exception ex) { + errMsg = ex.getMessage(); + LOGGER.error(errMsg); + if (callback == null) { + throw ex; + } + } + + if (callback != null) { + CreateCmdResult result = new CreateCmdResult(scaleIOVolumeId, new Answer(null, errMsg == null, errMsg)); + result.setResult(errMsg); + callback.complete(result); + } + } + + @Override + public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, QualityOfServiceState qualityOfServiceState) { + } + + @Override + public boolean canProvideStorageStats() { + return true; + } + + @Override + public Pair getStorageStats(StoragePool storagePool) { + Preconditions.checkArgument(storagePool != null, "storagePool cannot be null"); + + try { + final ScaleIOGatewayClient client = getScaleIOClient(storagePool.getId()); + StoragePoolStatistics poolStatistics = client.getStoragePoolStatistics(storagePool.getPath()); + if (poolStatistics != null && poolStatistics.getNetMaxCapacityInBytes() != null && poolStatistics.getNetUsedCapacityInBytes() != null) { + Long capacityBytes = poolStatistics.getNetMaxCapacityInBytes(); + Long usedBytes = poolStatistics.getNetUsedCapacityInBytes(); + return new Pair(capacityBytes, usedBytes); + } + } catch (Exception e) { + String errMsg = "Unable to get storage stats for the pool: " + storagePool.getId() + " due to " + e.getMessage(); + LOGGER.warn(errMsg); + throw new CloudRuntimeException(errMsg, e); + } + + return null; + } + + @Override + public boolean canProvideVolumeStats() { + return true; + } + + @Override + public Pair getVolumeStats(StoragePool storagePool, String volumeId) { + Preconditions.checkArgument(storagePool != null, "storagePool cannot be null"); + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeId), "volumeId cannot be null"); + + try { + final ScaleIOGatewayClient client = getScaleIOClient(storagePool.getId()); + VolumeStatistics volumeStatistics = client.getVolumeStatistics(volumeId); + if (volumeStatistics != null) { + Long provisionedSizeInBytes = volumeStatistics.getNetProvisionedAddressesInBytes(); + Long allocatedSizeInBytes = volumeStatistics.getAllocatedSizeInBytes(); + return new Pair(provisionedSizeInBytes, allocatedSizeInBytes); + } + } catch (Exception e) { + String errMsg = "Unable to get stats for the volume: " + volumeId + " in the pool: " + storagePool.getId() + " due to " + e.getMessage(); + LOGGER.warn(errMsg); + throw new CloudRuntimeException(errMsg, e); + } + + return null; + } + + @Override + public boolean canHostAccessStoragePool(Host host, StoragePool pool) { + if (host == null || pool == null) { + return false; + } + + try { + final ScaleIOGatewayClient client = getScaleIOClient(pool.getId()); + return client.isSdcConnected(host.getPrivateIpAddress()); + } catch (Exception e) { + LOGGER.warn("Unable to check the host: " + host.getId() + " access to storage pool: " + pool.getId() + " due to " + e.getMessage(), e); + return false; + } + } + + private void alertHostSdcDisconnection(Host host) { + if (host == null) { + return; + } + + LOGGER.warn("SDC not connected on the host: " + host.getId()); + String msg = "SDC not connected on the host: " + host.getId() + ", reconnect the SDC to MDM"; + alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC disconnected on host: " + host.getUuid(), msg); + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java new file mode 100644 index 000000000000..47bf12837119 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycle.java @@ -0,0 +1,420 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.lifecycle; + +import java.io.UnsupportedEncodingException; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URLDecoder; +import java.security.KeyManagementException; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import javax.inject.Inject; + +import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; +import org.apache.commons.collections.CollectionUtils; +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; +import org.apache.log4j.Logger; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.DeleteStoragePoolCommand; +import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.capacity.CapacityManager; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolAutomation; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.VMTemplateStorageResourceAssoc; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.template.TemplateManager; +import com.cloud.utils.UriUtils; +import com.cloud.utils.exception.CloudRuntimeException; + +public class ScaleIOPrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle { + private static final Logger LOGGER = Logger.getLogger(ScaleIOPrimaryDataStoreLifeCycle.class); + + @Inject + private ClusterDao clusterDao; + @Inject + private PrimaryDataStoreDao primaryDataStoreDao; + @Inject + private StoragePoolHostDao storagePoolHostDao; + @Inject + private PrimaryDataStoreHelper dataStoreHelper; + @Inject + private ResourceManager resourceManager; + @Inject + private StorageManager storageMgr; + @Inject + private StoragePoolAutomation storagePoolAutomation; + @Inject + private CapacityManager capacityMgr; + @Inject + private TemplateManager templateMgr; + @Inject + private AgentManager agentMgr; + + public ScaleIOPrimaryDataStoreLifeCycle() { + } + + private org.apache.cloudstack.storage.datastore.api.StoragePool findStoragePool(String url, String username, String password, String storagePoolName) { + try { + ScaleIOGatewayClient client = ScaleIOGatewayClient.getClient(url, username, password, false, 60); + List storagePools = client.listStoragePools(); + for (org.apache.cloudstack.storage.datastore.api.StoragePool pool : storagePools) { + if (pool.getName().equals(storagePoolName)) { + LOGGER.info("Found PowerFlex storage pool: " + storagePoolName); + final org.apache.cloudstack.storage.datastore.api.StoragePoolStatistics poolStatistics = client.getStoragePoolStatistics(pool.getId()); + pool.setStatistics(poolStatistics); + + String systemId = client.getSystemId(pool.getProtectionDomainId()); + pool.setSystemId(systemId); + return pool; + } + } + } catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) { + LOGGER.error("Failed to add storage pool", e); + throw new CloudRuntimeException("Failed to establish connection with PowerFlex Gateway to validate storage pool"); + } + throw new CloudRuntimeException("Failed to find the provided storage pool name in discovered PowerFlex storage pools"); + } + + @SuppressWarnings("unchecked") + @Override + public DataStore initialize(Map dsInfos) { + String url = (String) dsInfos.get("url"); + Long zoneId = (Long) dsInfos.get("zoneId"); + String dataStoreName = (String) dsInfos.get("name"); + String providerName = (String) dsInfos.get("providerName"); + Long capacityBytes = (Long)dsInfos.get("capacityBytes"); + Long capacityIops = (Long)dsInfos.get("capacityIops"); + String tags = (String)dsInfos.get("tags"); + Map details = (Map) dsInfos.get("details"); + + URI uri = null; + try { + uri = new URI(UriUtils.encodeURIComponent(url)); + if (uri.getScheme() == null || !uri.getScheme().equalsIgnoreCase("powerflex")) { + throw new InvalidParameterValueException("scheme is invalid for url: " + url + ", should be powerflex://username:password@gatewayhost/pool"); + } + } catch (Exception ignored) { + throw new InvalidParameterValueException(url + " is not a valid uri"); + } + + String storagePoolName = null; + try { + storagePoolName = URLDecoder.decode(uri.getPath(), "UTF-8"); + } catch (UnsupportedEncodingException e) { + LOGGER.error("[ignored] we are on a platform not supporting \"UTF-8\"!?!", e); + } + if (storagePoolName == null) { // if decoding fails, use getPath() anyway + storagePoolName = uri.getPath(); + } + storagePoolName = storagePoolName.replaceFirst("/", ""); + + final String storageHost = uri.getHost(); + final int port = uri.getPort(); + String gatewayApiURL = null; + if (port == -1) { + gatewayApiURL = String.format("https://%s/api", storageHost); + } else { + gatewayApiURL = String.format("https://%s:%d/api", storageHost, port); + } + + final String userInfo = uri.getUserInfo(); + final String gatewayUsername = userInfo.split(":")[0]; + final String gatewayPassword = userInfo.split(":")[1]; + + List storagePoolVO = primaryDataStoreDao.findPoolsByProvider(ScaleIOUtil.PROVIDER_NAME); + if (CollectionUtils.isNotEmpty(storagePoolVO)) { + for (StoragePoolVO poolVO : storagePoolVO) { + Map poolDetails = primaryDataStoreDao.getDetails(poolVO.getId()); + String poolUrl = poolDetails.get(ScaleIOGatewayClient.GATEWAY_API_ENDPOINT); + String poolName = poolDetails.get(ScaleIOGatewayClient.STORAGE_POOL_NAME); + + if (gatewayApiURL.equals(poolUrl) && storagePoolName.equals(poolName)) { + throw new IllegalArgumentException("PowerFlex storage pool: " + storagePoolName + " already exists, please specify other storage pool."); + } + } + } + + final org.apache.cloudstack.storage.datastore.api.StoragePool scaleIOPool = this.findStoragePool(gatewayApiURL, + gatewayUsername, gatewayPassword, storagePoolName); + + PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters(); + parameters.setZoneId(zoneId); + parameters.setName(dataStoreName); + parameters.setProviderName(providerName); + parameters.setManaged(true); + parameters.setHost(storageHost); + parameters.setPath(scaleIOPool.getId()); + parameters.setUserInfo(userInfo); + parameters.setType(Storage.StoragePoolType.PowerFlex); + parameters.setHypervisorType(Hypervisor.HypervisorType.KVM); + parameters.setUuid(UUID.randomUUID().toString()); + parameters.setTags(tags); + + StoragePoolStatistics poolStatistics = scaleIOPool.getStatistics(); + if (poolStatistics != null) { + if (capacityBytes == null) { + parameters.setCapacityBytes(poolStatistics.getNetMaxCapacityInBytes()); + } + parameters.setUsedBytes(poolStatistics.getNetUsedCapacityInBytes()); + } + + if (capacityBytes != null) { + parameters.setCapacityBytes(capacityBytes); + } + + if (capacityIops != null) { + parameters.setCapacityIops(capacityIops); + } + + details.put(ScaleIOGatewayClient.GATEWAY_API_ENDPOINT, gatewayApiURL); + details.put(ScaleIOGatewayClient.GATEWAY_API_USERNAME, gatewayUsername); + details.put(ScaleIOGatewayClient.GATEWAY_API_PASSWORD, gatewayPassword); + details.put(ScaleIOGatewayClient.STORAGE_POOL_NAME, storagePoolName); + details.put(ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID, scaleIOPool.getSystemId()); + parameters.setDetails(details); + + return dataStoreHelper.createPrimaryDataStore(parameters); + } + + @Override + public boolean attachCluster(DataStore dataStore, ClusterScope scope) { + final ClusterVO cluster = clusterDao.findById(scope.getScopeId()); + if (!isSupportedHypervisorType(cluster.getHypervisorType())) { + throw new CloudRuntimeException("Unsupported hypervisor type: " + cluster.getHypervisorType().toString()); + } + + List connectedSdcIps = null; + try { + Map dataStoreDetails = primaryDataStoreDao.getDetails(dataStore.getId()); + String url = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_ENDPOINT); + String username = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_USERNAME); + String password = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_PASSWORD); + ScaleIOGatewayClient client = ScaleIOGatewayClient.getClient(url, username, password, false, 60); + connectedSdcIps = client.listConnectedSdcIps(); + } catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) { + LOGGER.error("Failed to create storage pool", e); + throw new CloudRuntimeException("Failed to establish connection with PowerFlex Gateway to create storage pool"); + } + + if (connectedSdcIps == null || connectedSdcIps.isEmpty()) { + LOGGER.debug("No connected SDCs found for the PowerFlex storage pool"); + throw new CloudRuntimeException("Failed to create storage pool as connected SDCs not found"); + } + + PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) dataStore; + + List hostsInCluster = resourceManager.listAllUpAndEnabledHosts(Host.Type.Routing, primaryDataStoreInfo.getClusterId(), + primaryDataStoreInfo.getPodId(), primaryDataStoreInfo.getDataCenterId()); + if (hostsInCluster.isEmpty()) { + primaryDataStoreDao.expunge(primaryDataStoreInfo.getId()); + throw new CloudRuntimeException("No hosts are Up to associate a storage pool with in cluster: " + primaryDataStoreInfo.getClusterId()); + } + + LOGGER.debug("Attaching the pool to each of the hosts in the cluster: " + primaryDataStoreInfo.getClusterId()); + List poolHosts = new ArrayList(); + for (HostVO host : hostsInCluster) { + try { + if (connectedSdcIps.contains(host.getPrivateIpAddress())) { + storageMgr.connectHostToSharedPool(host.getId(), primaryDataStoreInfo.getId()); + poolHosts.add(host); + } + } catch (Exception e) { + LOGGER.warn("Unable to establish a connection between " + host + " and " + primaryDataStoreInfo, e); + } + } + + if (poolHosts.isEmpty()) { + LOGGER.warn("No host can access storage pool '" + primaryDataStoreInfo + "' on cluster '" + primaryDataStoreInfo.getClusterId() + "'."); + primaryDataStoreDao.expunge(primaryDataStoreInfo.getId()); + throw new CloudRuntimeException("Failed to create storage pool in the cluster: " + primaryDataStoreInfo.getClusterId() + " as it is not accessible to hosts"); + } + + dataStoreHelper.attachCluster(dataStore); + return true; + } + + @Override + public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) { + return true; + } + + @Override + public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.HypervisorType hypervisorType) { + if (!isSupportedHypervisorType(hypervisorType)) { + throw new CloudRuntimeException("Unsupported hypervisor type: " + hypervisorType.toString()); + } + + List connectedSdcIps = null; + try { + Map dataStoreDetails = primaryDataStoreDao.getDetails(dataStore.getId()); + String url = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_ENDPOINT); + String username = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_USERNAME); + String password = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_PASSWORD); + ScaleIOGatewayClient client = ScaleIOGatewayClient.getClient(url, username, password, false, 60); + connectedSdcIps = client.listConnectedSdcIps(); + } catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) { + LOGGER.error("Failed to create storage pool", e); + throw new CloudRuntimeException("Failed to establish connection with PowerFlex Gateway to create storage pool"); + } + + if (connectedSdcIps == null || connectedSdcIps.isEmpty()) { + LOGGER.debug("No connected SDCs found for the PowerFlex storage pool"); + throw new CloudRuntimeException("Failed to create storage pool as connected SDCs not found"); + } + + LOGGER.debug("Attaching the pool to each of the hosts in the zone: " + scope.getScopeId()); + List hosts = resourceManager.listAllUpAndEnabledHostsInOneZoneByHypervisor(hypervisorType, scope.getScopeId()); + List poolHosts = new ArrayList(); + for (HostVO host : hosts) { + try { + if (connectedSdcIps.contains(host.getPrivateIpAddress())) { + storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + poolHosts.add(host); + } + } catch (Exception e) { + LOGGER.warn("Unable to establish a connection between " + host + " and " + dataStore, e); + } + } + if (poolHosts.isEmpty()) { + LOGGER.warn("No host can access storage pool " + dataStore + " in this zone."); + primaryDataStoreDao.expunge(dataStore.getId()); + throw new CloudRuntimeException("Failed to create storage pool as it is not accessible to hosts."); + } + + dataStoreHelper.attachZone(dataStore); + return true; + } + + @Override + public boolean maintain(DataStore store) { + storagePoolAutomation.maintain(store); + dataStoreHelper.maintain(store); + return true; + } + + @Override + public boolean cancelMaintain(DataStore store) { + dataStoreHelper.cancelMaintain(store); + storagePoolAutomation.cancelMaintain(store); + return true; + } + + @Override + public void enableStoragePool(DataStore dataStore) { + dataStoreHelper.enable(dataStore); + } + + @Override + public void disableStoragePool(DataStore dataStore) { + dataStoreHelper.disable(dataStore); + } + + @Override + public boolean deleteDataStore(DataStore dataStore) { + StoragePool storagePool = (StoragePool)dataStore; + StoragePoolVO storagePoolVO = primaryDataStoreDao.findById(storagePool.getId()); + if (storagePoolVO == null) { + return false; + } + + List unusedTemplatesInPool = templateMgr.getUnusedTemplatesInPool(storagePoolVO); + for (VMTemplateStoragePoolVO templatePoolVO : unusedTemplatesInPool) { + if (templatePoolVO.getDownloadState() == VMTemplateStorageResourceAssoc.Status.DOWNLOADED) { + templateMgr.evictTemplateFromStoragePool(templatePoolVO); + } + } + + List poolHostVOs = storagePoolHostDao.listByPoolId(dataStore.getId()); + for (StoragePoolHostVO poolHostVO : poolHostVOs) { + DeleteStoragePoolCommand deleteStoragePoolCommand = new DeleteStoragePoolCommand(storagePool); + final Answer answer = agentMgr.easySend(poolHostVO.getHostId(), deleteStoragePoolCommand); + if (answer != null && answer.getResult()) { + LOGGER.info("Successfully deleted storage pool: " + storagePool.getId() + " from host: " + poolHostVO.getHostId()); + } else { + if (answer != null) { + LOGGER.error("Failed to delete storage pool: " + storagePool.getId() + " from host: " + poolHostVO.getHostId() + " , result: " + answer.getResult()); + } else { + LOGGER.error("Failed to delete storage pool: " + storagePool.getId() + " from host: " + poolHostVO.getHostId()); + } + } + } + + return dataStoreHelper.deletePrimaryDataStore(dataStore); + } + + @Override + public boolean migrateToObjectStore(DataStore store) { + return false; + } + + @Override + public void updateStoragePool(StoragePool storagePool, Map details) { + String capacityBytes = details.get(PrimaryDataStoreLifeCycle.CAPACITY_BYTES); + StoragePoolVO storagePoolVO = primaryDataStoreDao.findById(storagePool.getId()); + + try { + if (capacityBytes == null || capacityBytes.isBlank()) { + return; + } + + long usedBytes = capacityMgr.getUsedBytes(storagePoolVO); + if (Long.parseLong(capacityBytes) < usedBytes) { + throw new CloudRuntimeException("Cannot reduce the number of bytes for this storage pool as it would lead to an insufficient number of bytes"); + } + + primaryDataStoreDao.updateCapacityBytes(storagePool.getId(), Long.parseLong(capacityBytes)); + LOGGER.info("Storage pool successfully updated"); + } catch (Throwable e) { + throw new CloudRuntimeException("Failed to update the storage pool" + e); + } + } + + private static boolean isSupportedHypervisorType(Hypervisor.HypervisorType hypervisorType) { + return Hypervisor.HypervisorType.KVM.equals(hypervisorType); + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java new file mode 100644 index 000000000000..8096d0941893 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOHostListener.java @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.provider; + +import java.net.URISyntaxException; +import java.security.KeyManagementException; +import java.security.NoSuchAlgorithmException; +import java.util.Map; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.log4j.Logger; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.ModifyStoragePoolAnswer; +import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.alert.AlertManager; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.utils.exception.CloudRuntimeException; + +public class ScaleIOHostListener implements HypervisorHostListener { + private static final Logger s_logger = Logger.getLogger(ScaleIOHostListener.class); + + @Inject private AgentManager _agentMgr; + @Inject private AlertManager _alertMgr; + @Inject private DataStoreManager _dataStoreMgr; + @Inject private HostDao _hostDao; + @Inject private StoragePoolHostDao _storagePoolHostDao; + @Inject private PrimaryDataStoreDao _primaryDataStoreDao; + + @Override + public boolean hostAdded(long hostId) { + return true; + } + + @Override + public boolean hostConnect(long hostId, long poolId) { + HostVO host = _hostDao.findById(hostId); + if (host == null) { + s_logger.error("Failed to add host by HostListener as host was not found with id : " + hostId); + return false; + } + + if (!isHostSdcConnected(host.getPrivateIpAddress(), poolId)) { + s_logger.warn("SDC not connected on the host: " + hostId); + String msg = "SDC not connected on the host: " + hostId + ", reconnect the SDC to MDM and restart agent"; + _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, host.getDataCenterId(), host.getPodId(), "SDC disconnected on host: " + host.getUuid(), msg); + return false; + } + + StoragePoolHostVO storagePoolHost = _storagePoolHostDao.findByPoolHost(poolId, hostId); + if (storagePoolHost == null) { + storagePoolHost = new StoragePoolHostVO(poolId, hostId, ""); + _storagePoolHostDao.persist(storagePoolHost); + } + + StoragePool storagePool = (StoragePool)_dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); + ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool); + sendModifyStoragePoolCommand(cmd, storagePool, hostId); + return true; + } + + private boolean isHostSdcConnected(String hostIpAddress, long poolId) { + try { + Map dataStoreDetails = _primaryDataStoreDao.getDetails(poolId); + String url = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_ENDPOINT); + String username = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_USERNAME); + String password = dataStoreDetails.get(ScaleIOGatewayClient.GATEWAY_API_PASSWORD); + final int clientTimeout = StorageManager.STORAGE_POOL_CLIENT_TIMEOUT.valueIn(poolId); + ScaleIOGatewayClient client = ScaleIOGatewayClient.getClient(url, username, password, false, clientTimeout); + return client.isSdcConnected(hostIpAddress); + } catch (NoSuchAlgorithmException | KeyManagementException | URISyntaxException e) { + s_logger.error("Failed to check host sdc connection", e); + throw new CloudRuntimeException("Failed to establish connection with PowerFlex Gateway to check host sdc connection"); + } + } + + private void sendModifyStoragePoolCommand(ModifyStoragePoolCommand cmd, StoragePool storagePool, long hostId) { + Answer answer = _agentMgr.easySend(hostId, cmd); + + if (answer == null) { + throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command (" + storagePool.getId() + ")"); + } + + if (!answer.getResult()) { + String msg = "Unable to attach storage pool " + storagePool.getId() + " to host " + hostId; + + _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg); + + throw new CloudRuntimeException("Unable to establish a connection from agent to storage pool " + storagePool.getId() + " due to " + answer.getDetails() + + " (" + storagePool.getId() + ")"); + } + + assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer expected ; Pool = " + storagePool.getId() + " Host = " + hostId; + + s_logger.info("Connection established between storage pool " + storagePool + " and host: " + hostId); + } + + @Override + public boolean hostDisconnected(long hostId, long poolId) { + StoragePoolHostVO storagePoolHost = _storagePoolHostDao.findByPoolHost(poolId, hostId); + if (storagePoolHost != null) { + _storagePoolHostDao.deleteStoragePoolHostDetails(hostId, poolId); + } + + return true; + } + + @Override + public boolean hostAboutToBeRemoved(long hostId) { + return true; + } + + @Override + public boolean hostRemoved(long hostId, long clusterId) { + return true; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOPrimaryDatastoreProvider.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOPrimaryDatastoreProvider.java new file mode 100644 index 000000000000..0cc82c0d9f1c --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/provider/ScaleIOPrimaryDatastoreProvider.java @@ -0,0 +1,77 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.provider; + +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; +import org.apache.cloudstack.storage.datastore.driver.ScaleIOPrimaryDataStoreDriver; +import org.apache.cloudstack.storage.datastore.lifecycle.ScaleIOPrimaryDataStoreLifeCycle; +import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; +import org.apache.log4j.Logger; + +import com.cloud.utils.component.ComponentContext; + +public class ScaleIOPrimaryDatastoreProvider implements PrimaryDataStoreProvider { + private static final Logger LOGGER = Logger.getLogger(ScaleIOPrimaryDatastoreProvider.class); + + private DataStoreLifeCycle lifeCycle; + private PrimaryDataStoreDriver driver; + private HypervisorHostListener listener; + + @Override + public DataStoreLifeCycle getDataStoreLifeCycle() { + return lifeCycle; + } + + @Override + public DataStoreDriver getDataStoreDriver() { + return driver; + } + + @Override + public HypervisorHostListener getHostListener() { + return listener; + } + + @Override + public String getName() { + return ScaleIOUtil.PROVIDER_NAME; + } + + @Override + public boolean configure(Map params) { + lifeCycle = ComponentContext.inject(ScaleIOPrimaryDataStoreLifeCycle.class); + driver = ComponentContext.inject(ScaleIOPrimaryDataStoreDriver.class); + listener = ComponentContext.inject(ScaleIOHostListener.class); + + return true; + } + + @Override + public Set getTypes() { + Set types = new HashSet(); + types.add(DataStoreProviderType.PRIMARY); + return types; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/util/ScaleIOUtil.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/util/ScaleIOUtil.java new file mode 100644 index 000000000000..d28d72c51ca3 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/util/ScaleIOUtil.java @@ -0,0 +1,98 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.datastore.util; + +import org.apache.log4j.Logger; + +import com.cloud.utils.script.Script; + +public class ScaleIOUtil { + private static final Logger LOGGER = Logger.getLogger(ScaleIOUtil.class); + + public static final String PROVIDER_NAME = "PowerFlex"; + + // Use prefix for CloudStack resources + public static final String VOLUME_PREFIX = "vol"; + public static final String TEMPLATE_PREFIX = "tmpl"; + public static final String SNAPSHOT_PREFIX = "snap"; + public static final String VMSNAPSHOT_PREFIX = "vmsnap"; + + public static final int IDENTIFIER_LENGTH = 16; + public static final Long MINIMUM_ALLOWED_IOPS_LIMIT = Long.valueOf(10); + + public static final String DISK_PATH = "/dev/disk/by-id"; + public static final String DISK_NAME_PREFIX = "emc-vol-"; + public static final String DISK_NAME_PREFIX_FILTER = DISK_NAME_PREFIX + "*-"; + + private static final String AGENT_PROPERTIES_FILE = "/etc/cloudstack/agent/agent.properties"; + + private static final String DEFAULT_SDC_HOME_PATH = "/opt/emc/scaleio/sdc"; + private static final String SDC_HOME_PARAMETER = "powerflex.sdc.home.dir"; + private static final String SDC_HOME_PATH = getSdcHomePath(); + + private static final String RESCAN_CMD = "drv_cfg --rescan"; + private static final String QUERY_VOLUMES_CMD = "drv_cfg --query_vols"; + // Sample output for cmd: drv_cfg --query_vols: + // Retrieved 2 volume(s) + // VOL-ID 6c33633100000009 MDM-ID 218ce1797566a00f + // VOL-ID 6c3362a30000000a MDM-ID 218ce1797566a00f + + public static String getSdcHomePath() { + String sdcHomePath = DEFAULT_SDC_HOME_PATH; + String sdcHomePropertyCmdFormat = "sed -n '/%s/p' '%s' 2>/dev/null | sed 's/%s=//g' 2>/dev/null"; + String sdcHomeCmd = String.format(sdcHomePropertyCmdFormat, SDC_HOME_PARAMETER, AGENT_PROPERTIES_FILE, SDC_HOME_PARAMETER); + + String result = Script.runSimpleBashScript(sdcHomeCmd); + if (result == null) { + LOGGER.warn("Failed to get sdc home path from agent.properties, fallback to default path"); + } else { + sdcHomePath = result; + } + + return sdcHomePath; + } + + public static final void rescanForNewVolumes() { + // Detecting new volumes + String rescanCmd = ScaleIOUtil.SDC_HOME_PATH + "/bin/" + ScaleIOUtil.RESCAN_CMD; + + String result = Script.runSimpleBashScript(rescanCmd); + if (result == null) { + LOGGER.warn("Failed to rescan for new volumes"); + } + } + + public static final String getSystemIdForVolume(String volumeId) { + //query_vols outputs "VOL-ID MDM-ID " for a volume with ID: + String queryDiskCmd = SDC_HOME_PATH + "/bin/" + ScaleIOUtil.QUERY_VOLUMES_CMD; + queryDiskCmd += "|grep " + volumeId + "|awk '{print $4}'"; + + String result = Script.runSimpleBashScript(queryDiskCmd); + if (result == null) { + LOGGER.warn("Query volumes failed to get volume: " + volumeId + " details for system id"); + return null; + } + + if (result.isEmpty()) { + LOGGER.warn("Query volumes doesn't list volume: " + volumeId + ", probably volume is not mapped yet, or sdc not connected"); + return null; + } + + return result; + } +} diff --git a/plugins/storage/volume/scaleio/src/main/resources/META-INF/cloudstack/storage-volume-scaleio/module.properties b/plugins/storage/volume/scaleio/src/main/resources/META-INF/cloudstack/storage-volume-scaleio/module.properties new file mode 100755 index 000000000000..5bf9aa0172e2 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/resources/META-INF/cloudstack/storage-volume-scaleio/module.properties @@ -0,0 +1,21 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +name=storage-volume-scaleio +parent=storage diff --git a/plugins/storage/volume/scaleio/src/main/resources/META-INF/cloudstack/storage-volume-scaleio/spring-storage-volume-scaleio-context.xml b/plugins/storage/volume/scaleio/src/main/resources/META-INF/cloudstack/storage-volume-scaleio/spring-storage-volume-scaleio-context.xml new file mode 100755 index 000000000000..8b86e212e299 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/main/resources/META-INF/cloudstack/storage-volume-scaleio/spring-storage-volume-scaleio-context.xml @@ -0,0 +1,35 @@ + + + + + + diff --git a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImplTest.java b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImplTest.java new file mode 100644 index 000000000000..10823102cf8d --- /dev/null +++ b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/client/ScaleIOGatewayClientImplTest.java @@ -0,0 +1,48 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.storage.datastore.client; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.utils.exception.CloudRuntimeException; + +@RunWith(MockitoJUnitRunner.class) +public class ScaleIOGatewayClientImplTest { + + ScaleIOGatewayClientImpl client; + + @Before + public void setUp() throws Exception { + } + + @After + public void tearDown() throws Exception { + } + + @Test(expected = CloudRuntimeException.class) + public void testClient() throws Exception { + client = (ScaleIOGatewayClientImpl) ScaleIOGatewayClient.getClient("https://10.2.3.149/api", + "admin", "P@ssword123", false, 60); + } +} \ No newline at end of file diff --git a/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java new file mode 100644 index 000000000000..efe2c1dc64c3 --- /dev/null +++ b/plugins/storage/volume/scaleio/src/test/java/org/apache/cloudstack/storage/datastore/lifecycle/ScaleIOPrimaryDataStoreLifeCycleTest.java @@ -0,0 +1,254 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// + +package org.apache.cloudstack.storage.datastore.lifecycle; + +import static com.google.common.truth.Truth.assertThat; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.mockito.MockitoAnnotations.initMocks; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClient; +import org.apache.cloudstack.storage.datastore.client.ScaleIOGatewayClientImpl; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.provider.ScaleIOHostListener; +import org.apache.cloudstack.storage.datastore.util.ScaleIOUtil; +import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.Spy; +import org.powermock.api.mockito.PowerMockito; +import org.powermock.core.classloader.annotations.PrepareForTest; +import org.powermock.modules.junit4.PowerMockRunner; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.ModifyStoragePoolAnswer; +import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.resource.ResourceManager; +import com.cloud.resource.ResourceState; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StorageManagerImpl; +import com.cloud.storage.StoragePoolAutomation; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.template.TemplateManager; +import com.cloud.utils.exception.CloudRuntimeException; + +@PrepareForTest(ScaleIOGatewayClient.class) +@RunWith(PowerMockRunner.class) +public class ScaleIOPrimaryDataStoreLifeCycleTest { + + @Mock + private PrimaryDataStoreDao primaryDataStoreDao; + @Mock + private PrimaryDataStoreHelper dataStoreHelper; + @Mock + private ResourceManager resourceManager; + @Mock + private StoragePoolAutomation storagePoolAutomation; + @Mock + private HostDao hostDao; + @Mock + private StoragePoolHostDao storagePoolHostDao; + @Mock + private DataStoreProviderManager dataStoreProviderMgr; + @Mock + private DataStoreProvider dataStoreProvider; + @Mock + private DataStoreManager dataStoreMgr; + @Mock + private PrimaryDataStore store; + @Mock + private TemplateManager templateMgr; + @Mock + private AgentManager agentMgr; + @Mock + ModifyStoragePoolAnswer answer; + + @Spy + @InjectMocks + private StorageManager storageMgr = new StorageManagerImpl(); + + @Spy + @InjectMocks + private HypervisorHostListener hostListener = new ScaleIOHostListener(); + + @InjectMocks + private ScaleIOPrimaryDataStoreLifeCycle scaleIOPrimaryDataStoreLifeCycleTest; + + @Before + public void setUp() { + initMocks(this); + } + + @Test + public void testAttachZone() throws Exception { + final DataStore dataStore = mock(DataStore.class); + when(dataStore.getId()).thenReturn(1L); + + Map mockDataStoreDetails = new HashMap<>(); + mockDataStoreDetails.put(ScaleIOGatewayClient.GATEWAY_API_ENDPOINT, "https://192.168.1.19/api"); + mockDataStoreDetails.put(ScaleIOGatewayClient.GATEWAY_API_USERNAME, "root"); + mockDataStoreDetails.put(ScaleIOGatewayClient.GATEWAY_API_PASSWORD, "Password@123"); + when(primaryDataStoreDao.getDetails(1L)).thenReturn(mockDataStoreDetails); + + PowerMockito.mockStatic(ScaleIOGatewayClient.class); + ScaleIOGatewayClientImpl client = mock(ScaleIOGatewayClientImpl.class); + when(ScaleIOGatewayClient.getClient("https://192.168.1.19/api", "root", "Password@123", false, 60)).thenReturn(client); + + List connectedSdcIps = new ArrayList<>(); + connectedSdcIps.add("192.168.1.1"); + connectedSdcIps.add("192.168.1.2"); + when(client.listConnectedSdcIps()).thenReturn(connectedSdcIps); + when(client.isSdcConnected(anyString())).thenReturn(true); + + final ZoneScope scope = new ZoneScope(1L); + + List hostList = new ArrayList(); + HostVO host1 = new HostVO(1L, "host01", Host.Type.Routing, "192.168.1.1", "255.255.255.0", null, null, null, null, null, null, null, null, null, null, + UUID.randomUUID().toString(), Status.Up, "1.0", null, null, 1L, null, 0, 0, "aa", 0, Storage.StoragePoolType.PowerFlex); + HostVO host2 = new HostVO(2L, "host02", Host.Type.Routing, "192.168.1.2", "255.255.255.0", null, null, null, null, null, null, null, null, null, null, + UUID.randomUUID().toString(), Status.Up, "1.0", null, null, 1L, null, 0, 0, "aa", 0, Storage.StoragePoolType.PowerFlex); + + host1.setResourceState(ResourceState.Enabled); + host2.setResourceState(ResourceState.Enabled); + hostList.add(host1); + hostList.add(host2); + when(resourceManager.listAllUpAndEnabledHostsInOneZoneByHypervisor(Hypervisor.HypervisorType.KVM, 1L)).thenReturn(hostList); + + when(dataStoreMgr.getDataStore(anyLong(), eq(DataStoreRole.Primary))).thenReturn(store); + when(store.getId()).thenReturn(1L); + when(store.getPoolType()).thenReturn(Storage.StoragePoolType.PowerFlex); + when(store.isShared()).thenReturn(true); + when(store.getName()).thenReturn("ScaleIOPool"); + when(store.getStorageProviderName()).thenReturn(ScaleIOUtil.PROVIDER_NAME); + + when(dataStoreProviderMgr.getDataStoreProvider(ScaleIOUtil.PROVIDER_NAME)).thenReturn(dataStoreProvider); + when(dataStoreProvider.getName()).thenReturn(ScaleIOUtil.PROVIDER_NAME); + storageMgr.registerHostListener(ScaleIOUtil.PROVIDER_NAME, hostListener); + + when(agentMgr.easySend(anyLong(), Mockito.any(ModifyStoragePoolCommand.class))).thenReturn(answer); + when(answer.getResult()).thenReturn(true); + + when(storagePoolHostDao.findByPoolHost(anyLong(), anyLong())).thenReturn(null); + + when(hostDao.findById(1L)).thenReturn(host1); + when(hostDao.findById(2L)).thenReturn(host2); + + when(dataStoreHelper.attachZone(Mockito.any(DataStore.class))).thenReturn(null); + + scaleIOPrimaryDataStoreLifeCycleTest.attachZone(dataStore, scope, Hypervisor.HypervisorType.KVM); + verify(storageMgr,times(2)).connectHostToSharedPool(Mockito.any(Long.class), Mockito.any(Long.class)); + verify(storagePoolHostDao,times(2)).persist(Mockito.any(StoragePoolHostVO.class)); + } + + @Test(expected = CloudRuntimeException.class) + public void testAttachZone_UnsupportedHypervisor() throws Exception { + final DataStore dataStore = mock(DataStore.class); + final ZoneScope scope = new ZoneScope(1L); + scaleIOPrimaryDataStoreLifeCycleTest.attachZone(dataStore, scope, Hypervisor.HypervisorType.VMware); + } + + @Test + public void testMaintain() { + final DataStore store = mock(DataStore.class); + when(storagePoolAutomation.maintain(any(DataStore.class))).thenReturn(true); + when(dataStoreHelper.maintain(any(DataStore.class))).thenReturn(true); + final boolean result = scaleIOPrimaryDataStoreLifeCycleTest.maintain(store); + assertThat(result).isTrue(); + } + + @Test + public void testCancelMaintain() { + final DataStore store = mock(DataStore.class); + when(dataStoreHelper.cancelMaintain(any(DataStore.class))).thenReturn(true); + when(storagePoolAutomation.cancelMaintain(any(DataStore.class))).thenReturn(true); + final boolean result = scaleIOPrimaryDataStoreLifeCycleTest.cancelMaintain(store); + assertThat(result).isTrue(); + } + + @Test + public void testEnableStoragePool() { + final DataStore dataStore = mock(DataStore.class); + when(dataStoreHelper.enable(any(DataStore.class))).thenReturn(true); + scaleIOPrimaryDataStoreLifeCycleTest.enableStoragePool(dataStore); + } + + @Test + public void testDisableStoragePool() { + final DataStore dataStore = mock(DataStore.class); + when(dataStoreHelper.disable(any(DataStore.class))).thenReturn(true); + scaleIOPrimaryDataStoreLifeCycleTest.disableStoragePool(dataStore); + } + + @Test + public void testDeleteDataStoreWithStoragePoolNull() { + final PrimaryDataStore store = mock(PrimaryDataStore.class); + when(primaryDataStoreDao.findById(anyLong())).thenReturn(null); + when(dataStoreHelper.deletePrimaryDataStore(any(DataStore.class))).thenReturn(true); + final boolean result = scaleIOPrimaryDataStoreLifeCycleTest.deleteDataStore(store); + assertThat(result).isFalse(); + } + + @Test + public void testDeleteDataStore() { + final PrimaryDataStore store = mock(PrimaryDataStore.class); + final StoragePoolVO storagePoolVO = mock(StoragePoolVO.class); + when(primaryDataStoreDao.findById(anyLong())).thenReturn(storagePoolVO); + List unusedTemplates = new ArrayList<>(); + when(templateMgr.getUnusedTemplatesInPool(storagePoolVO)).thenReturn(unusedTemplates); + List poolHostVOs = new ArrayList<>(); + when(storagePoolHostDao.listByPoolId(anyLong())).thenReturn(poolHostVOs); + when(dataStoreHelper.deletePrimaryDataStore(any(DataStore.class))).thenReturn(true); + final boolean result = scaleIOPrimaryDataStoreLifeCycleTest.deleteDataStore(store); + assertThat(result).isTrue(); + } +} diff --git a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java index 22e4e952b3ef..51468c2dfdaa 100644 --- a/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java +++ b/plugins/storage/volume/solidfire/src/main/java/org/apache/cloudstack/storage/datastore/driver/SolidFirePrimaryDataStoreDriver.java @@ -82,6 +82,7 @@ import com.cloud.user.AccountDetailsDao; import com.cloud.user.AccountVO; import com.cloud.user.dao.AccountDao; +import com.cloud.utils.Pair; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.exception.CloudRuntimeException; import com.google.common.base.Preconditions; @@ -830,6 +831,11 @@ public void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCa throw new UnsupportedOperationException(); } + @Override + public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback) { + throw new UnsupportedOperationException(); + } + @Override public boolean canCopy(DataObject srcData, DataObject destData) { return false; @@ -1588,4 +1594,29 @@ private List getNonDestroyedSnapshots(long csVolumeId) { return lstSnapshots2; } + + @Override + public boolean canProvideStorageStats() { + return false; + } + + @Override + public Pair getStorageStats(StoragePool storagePool) { + return null; + } + + @Override + public boolean canProvideVolumeStats() { + return false; + } + + @Override + public Pair getVolumeStats(StoragePool storagePool, String volumeId) { + return null; + } + + @Override + public boolean canHostAccessStoragePool(Host host, StoragePool pool) { + return true; + } } diff --git a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java index 5a7a8b436e07..4319bed9dbc1 100644 --- a/server/src/main/java/com/cloud/alert/AlertManagerImpl.java +++ b/server/src/main/java/com/cloud/alert/AlertManagerImpl.java @@ -759,7 +759,8 @@ public void sendAlert(AlertType alertType, long dataCenterId, Long podId, Long c (alertType != AlertManager.AlertType.ALERT_TYPE_UPLOAD_FAILED) && (alertType != AlertManager.AlertType.ALERT_TYPE_OOBM_AUTH_ERROR) && (alertType != AlertManager.AlertType.ALERT_TYPE_HA_ACTION) && - (alertType != AlertManager.AlertType.ALERT_TYPE_CA_CERT)) { + (alertType != AlertManager.AlertType.ALERT_TYPE_CA_CERT) && + (alertType != AlertManager.AlertType.ALERT_TYPE_VM_SNAPSHOT)) { alert = _alertDao.getLastAlert(alertType.getType(), dataCenterId, podId, clusterId); } diff --git a/server/src/main/java/com/cloud/api/ApiDBUtils.java b/server/src/main/java/com/cloud/api/ApiDBUtils.java index e6b307757956..ef9006c620ad 100644 --- a/server/src/main/java/com/cloud/api/ApiDBUtils.java +++ b/server/src/main/java/com/cloud/api/ApiDBUtils.java @@ -1208,7 +1208,7 @@ public static HypervisorType getHypervisorTypeFromFormat(long dcId, ImageFormat type = HypervisorType.Hyperv; } } if (format == ImageFormat.RAW) { - // Currently, KVM only suppoorts RBD images of type RAW. + // Currently, KVM only supports RBD and PowerFlex images of type RAW. // This results in a weird collision with OVM volumes which // can only be raw, thus making KVM RBD volumes show up as OVM // rather than RBD. This block of code can (hopefuly) by checking to @@ -1220,7 +1220,7 @@ public static HypervisorType getHypervisorTypeFromFormat(long dcId, ImageFormat ListIterator itr = pools.listIterator(); while(itr.hasNext()) { StoragePoolVO pool = itr.next(); - if(pool.getPoolType() == StoragePoolType.RBD || pool.getPoolType() == StoragePoolType.CLVM) { + if(pool.getPoolType() == StoragePoolType.RBD || pool.getPoolType() == StoragePoolType.PowerFlex || pool.getPoolType() == StoragePoolType.CLVM) { // This case will note the presence of non-qcow2 primary stores, suggesting KVM without NFS. Otherwse, // If this check is not passed, the hypervisor type will remain OVM. type = HypervisorType.KVM; diff --git a/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java b/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java index 88a7639ac914..3ebe73554cc3 100644 --- a/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java +++ b/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java @@ -289,17 +289,19 @@ public static List createVolumeResponse(ResponseView view, Volum VolumeStats vs = null; if (vr.getFormat() == ImageFormat.QCOW2) { vs = ApiDBUtils.getVolumeStatistics(vrData.getId()); - } - else if (vr.getFormat() == ImageFormat.VHD){ + } else if (vr.getFormat() == ImageFormat.VHD) { vs = ApiDBUtils.getVolumeStatistics(vrData.getPath()); - } - else if (vr.getFormat() == ImageFormat.OVA){ + } else if (vr.getFormat() == ImageFormat.OVA) { if (vrData.getChainInfo() != null) { vs = ApiDBUtils.getVolumeStatistics(vrData.getChainInfo()); } + } else if (vr.getFormat() == ImageFormat.RAW) { + if (vrData.getPath() != null) { + vs = ApiDBUtils.getVolumeStatistics(vrData.getPath()); + } } - if (vs != null){ + if (vs != null) { long vsz = vs.getVirtualSize(); long psz = vs.getPhysicalSize() ; double util = (double)psz/vsz; diff --git a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java index 80c433ad71ca..d16ba9443b7c 100644 --- a/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/UserVmJoinDaoImpl.java @@ -336,6 +336,11 @@ public UserVmResponse newUserVmResponse(ResponseView view, String objectName, Us userVmResponse.setBootType("Bios"); userVmResponse.setBootMode("legacy"); } + + if (userVm.getPoolType() != null) { + userVmResponse.setPoolType(userVm.getPoolType().toString()); + } + // Remove blacklisted settings if user is not admin if (caller.getType() != Account.ACCOUNT_TYPE_ADMIN) { String[] userVmSettingsToHide = QueryService.UserVMBlacklistedDetails.value().split(","); diff --git a/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java b/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java index ce86add1a08d..27f4ee6ae018 100644 --- a/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java +++ b/server/src/main/java/com/cloud/capacity/CapacityManagerImpl.java @@ -585,9 +585,19 @@ public long getAllocatedPoolCapacity(StoragePoolVO pool, VMTemplateVO templateFo // if the storage pool is managed, the used bytes can be larger than the sum of the sizes of all of the non-destroyed volumes // in this case, call getUsedBytes(StoragePoolVO) if (pool.isManaged()) { - return getUsedBytes(pool); - } - else { + totalAllocatedSize = getUsedBytes(pool); + + if (templateForVmCreation != null) { + VMTemplateStoragePoolVO templatePoolVO = _templatePoolDao.findByPoolTemplate(pool.getId(), templateForVmCreation.getId()); + if (templatePoolVO == null) { + // template is not installed in the pool, consider the template size for allocation + long templateForVmCreationSize = templateForVmCreation.getSize() != null ? templateForVmCreation.getSize() : 0; + totalAllocatedSize += templateForVmCreationSize; + } + } + + return totalAllocatedSize; + } else { // Get size for all the non-destroyed volumes. Pair sizes = _volumeDao.getNonDestroyedCountAndTotalByPool(pool.getId()); diff --git a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java index 412fa7fee7d3..f04aef479b7f 100755 --- a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java @@ -207,6 +207,7 @@ import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.Storage.ProvisioningType; import com.cloud.storage.StorageManager; +import com.cloud.storage.Volume; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.VMTemplateZoneDao; import com.cloud.storage.dao.VolumeDao; @@ -2601,6 +2602,10 @@ protected ServiceOfferingVO createServiceOffering(final long userId, final boole continue; } } + if (detailEntry.getKey().equalsIgnoreCase(Volume.BANDWIDTH_LIMIT_IN_MBPS) || detailEntry.getKey().equalsIgnoreCase(Volume.IOPS_LIMIT)) { + // Add in disk offering details + continue; + } detailsVO.add(new ServiceOfferingDetailsVO(offering.getId(), detailEntry.getKey(), detailEntryValue, true)); } } @@ -2620,6 +2625,21 @@ protected ServiceOfferingVO createServiceOffering(final long userId, final boole } _serviceOfferingDetailsDao.saveDetails(detailsVO); } + + if (details != null && !details.isEmpty()) { + List diskDetailsVO = new ArrayList(); + // Support disk offering details for below parameters + if (details.containsKey(Volume.BANDWIDTH_LIMIT_IN_MBPS)) { + diskDetailsVO.add(new DiskOfferingDetailVO(offering.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS, details.get(Volume.BANDWIDTH_LIMIT_IN_MBPS), false)); + } + if (details.containsKey(Volume.IOPS_LIMIT)) { + diskDetailsVO.add(new DiskOfferingDetailVO(offering.getId(), Volume.IOPS_LIMIT, details.get(Volume.IOPS_LIMIT), false)); + } + if (!diskDetailsVO.isEmpty()) { + diskOfferingDetailsDao.saveDetails(diskDetailsVO); + } + } + CallContext.current().setEventDetails("Service offering id=" + offering.getId()); return offering; } else { @@ -2823,7 +2843,7 @@ protected DiskOfferingVO createDiskOffering(final Long userId, final List Long bytesWriteRate, Long bytesWriteRateMax, Long bytesWriteRateMaxLength, Long iopsReadRate, Long iopsReadRateMax, Long iopsReadRateMaxLength, Long iopsWriteRate, Long iopsWriteRateMax, Long iopsWriteRateMaxLength, - final Integer hypervisorSnapshotReserve, String cacheMode) { + final Integer hypervisorSnapshotReserve, String cacheMode, final Map details) { long diskSize = 0;// special case for custom disk offerings if (numGibibytes != null && numGibibytes <= 0) { throw new InvalidParameterValueException("Please specify a disk size of at least 1 Gb."); @@ -2951,6 +2971,15 @@ protected DiskOfferingVO createDiskOffering(final Long userId, final List detailsVO.add(new DiskOfferingDetailVO(offering.getId(), ApiConstants.ZONE_ID, String.valueOf(zoneId), false)); } } + if (details != null && !details.isEmpty()) { + // Support disk offering details for below parameters + if (details.containsKey(Volume.BANDWIDTH_LIMIT_IN_MBPS)) { + detailsVO.add(new DiskOfferingDetailVO(offering.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS, details.get(Volume.BANDWIDTH_LIMIT_IN_MBPS), false)); + } + if (details.containsKey(Volume.IOPS_LIMIT)) { + detailsVO.add(new DiskOfferingDetailVO(offering.getId(), Volume.IOPS_LIMIT, details.get(Volume.IOPS_LIMIT), false)); + } + } if (!detailsVO.isEmpty()) { diskOfferingDetailsDao.saveDetails(detailsVO); } @@ -2972,6 +3001,7 @@ public DiskOffering createDiskOffering(final CreateDiskOfferingCmd cmd) { final String tags = cmd.getTags(); final List domainIds = cmd.getDomainIds(); final List zoneIds = cmd.getZoneIds(); + final Map details = cmd.getDetails(); // check if valid domain if (CollectionUtils.isNotEmpty(domainIds)) { @@ -3041,7 +3071,7 @@ public DiskOffering createDiskOffering(final CreateDiskOfferingCmd cmd) { localStorageRequired, isDisplayOfferingEnabled, isCustomizedIops, minIops, maxIops, bytesReadRate, bytesReadRateMax, bytesReadRateMaxLength, bytesWriteRate, bytesWriteRateMax, bytesWriteRateMaxLength, iopsReadRate, iopsReadRateMax, iopsReadRateMaxLength, iopsWriteRate, iopsWriteRateMax, iopsWriteRateMaxLength, - hypervisorSnapshotReserve, cacheMode); + hypervisorSnapshotReserve, cacheMode, details); } /** diff --git a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java index 2149a5681d09..c49a4052f7cb 100644 --- a/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java +++ b/server/src/main/java/com/cloud/deploy/DeploymentPlanningManagerImpl.java @@ -30,21 +30,14 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.utils.StringUtils; -import com.cloud.utils.db.Filter; -import com.cloud.utils.fsm.StateMachine2; - -import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.collections.MapUtils; -import org.apache.log4j.Logger; +import org.apache.cloudstack.affinity.AffinityGroupDomainMapVO; import org.apache.cloudstack.affinity.AffinityGroupProcessor; import org.apache.cloudstack.affinity.AffinityGroupService; import org.apache.cloudstack.affinity.AffinityGroupVMMapVO; import org.apache.cloudstack.affinity.AffinityGroupVO; -import org.apache.cloudstack.affinity.AffinityGroupDomainMapVO; import org.apache.cloudstack.affinity.dao.AffinityGroupDao; -import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; import org.apache.cloudstack.affinity.dao.AffinityGroupDomainMapDao; +import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; import org.apache.cloudstack.engine.cloud.entity.api.db.VMReservationVO; import org.apache.cloudstack.engine.cloud.entity.api.db.dao.VMReservationDao; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -57,6 +50,9 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.utils.identity.ManagementServerNode; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.collections.MapUtils; +import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -121,15 +117,18 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; +import com.cloud.utils.StringUtils; import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; import com.cloud.utils.db.TransactionCallback; import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.StateListener; +import com.cloud.utils.fsm.StateMachine2; import com.cloud.vm.DiskProfile; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; @@ -1343,7 +1342,7 @@ protected boolean hostCanAccessSPool(Host host, StoragePool pool) { boolean hostCanAccessSPool = false; StoragePoolHostVO hostPoolLinkage = _poolHostDao.findByPoolHost(pool.getId(), host.getId()); - if (hostPoolLinkage != null) { + if (hostPoolLinkage != null && _storageMgr.canHostAccessStoragePool(host, pool)) { hostCanAccessSPool = true; } diff --git a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java index 2ae35fc08ad7..a8456bcd8cb6 100644 --- a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java +++ b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java @@ -248,6 +248,7 @@ protected VirtualMachineTO toVirtualMachineTO(VirtualMachineProfile vmProfile) { to.setConfigDriveLabel(vmProfile.getConfigDriveLabel()); to.setConfigDriveIsoRootFolder(vmProfile.getConfigDriveIsoRootFolder()); to.setConfigDriveIsoFile(vmProfile.getConfigDriveIsoFile()); + to.setConfigDriveLocation(vmProfile.getConfigDriveLocation()); to.setState(vm.getState()); return to; diff --git a/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java b/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java index 904a488c1245..9bcc9738573e 100644 --- a/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java +++ b/server/src/main/java/com/cloud/hypervisor/kvm/discoverer/LibvirtServerDiscoverer.java @@ -31,6 +31,7 @@ import org.apache.cloudstack.agent.lb.IndirectAgentLB; import org.apache.cloudstack.ca.CAManager; import org.apache.cloudstack.ca.SetupCertificateCommand; +import org.apache.cloudstack.direct.download.DirectDownloadManager; import org.apache.cloudstack.framework.ca.Certificate; import org.apache.cloudstack.utils.security.KeyStoreUtils; import org.apache.log4j.Logger; @@ -53,6 +54,7 @@ import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.network.PhysicalNetworkSetupInfo; @@ -78,7 +80,11 @@ public abstract class LibvirtServerDiscoverer extends DiscovererBase implements @Inject private CAManager caManager; @Inject + DirectDownloadManager directDownloadManager; + @Inject private IndirectAgentLB indirectAgentLB; + @Inject + private HostDao hostDao; @Override public abstract Hypervisor.HypervisorType getHypervisorType(); @@ -103,6 +109,10 @@ public AgentControlAnswer processControlCommand(long agentId, AgentControlComman @Override public void processHostAdded(long hostId) { + HostVO host = hostDao.findById(hostId); + if (host != null) { + directDownloadManager.syncCertificatesToHost(hostId, host.getDataCenterId()); + } } @Override @@ -402,6 +412,7 @@ public boolean configure(String name, Map params) throws Configu _kvmGuestNic = _kvmPrivateNic; } + agentMgr.registerForHostEvents(this, true, false, false); _resourceMgr.registerResourceStateAdapter(this.getClass().getSimpleName(), this); return true; } diff --git a/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java b/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java index 4daeda6a530c..ebb6dc27c8fd 100644 --- a/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java +++ b/server/src/main/java/com/cloud/network/element/ConfigDriveNetworkElement.java @@ -36,7 +36,7 @@ import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; -import com.cloud.agent.api.Answer; +import com.cloud.agent.api.HandleConfigDriveIsoAnswer; import com.cloud.agent.api.HandleConfigDriveIsoCommand; import com.cloud.agent.api.to.DiskTO; import com.cloud.configuration.ConfigurationManager; @@ -338,7 +338,16 @@ public boolean prepareMigration(NicProfile nic, Network network, VirtualMachineP if (_networkModel.getUserDataUpdateProvider(network).getProvider().equals(Provider.ConfigDrive)) { LOG.trace(String.format("[prepareMigration] for vm: %s", vm.getInstanceName())); try { - addPasswordAndUserdata(network, nic, vm, dest, context); + if (isConfigDriveIsoOnHostCache(vm.getId())) { + vm.setConfigDriveLocation(Location.HOST); + configureConfigDriveData(vm, nic, dest); + + // Create the config drive on dest host cache + createConfigDriveIsoOnHostCache(vm, dest.getHost().getId()); + } else { + vm.setConfigDriveLocation(getConfigDriveLocation(vm.getId())); + addPasswordAndUserdata(network, nic, vm, dest, context); + } } catch (InsufficientCapacityException | ResourceUnavailableException e) { LOG.error("Failed to add config disk drive due to: ", e); return false; @@ -349,10 +358,28 @@ public boolean prepareMigration(NicProfile nic, Network network, VirtualMachineP @Override public void rollbackMigration(NicProfile nic, Network network, VirtualMachineProfile vm, ReservationContext src, ReservationContext dst) { + try { + if (isConfigDriveIsoOnHostCache(vm.getId())) { + vm.setConfigDriveLocation(Location.HOST); + // Delete the config drive on dest host cache + deleteConfigDriveIsoOnHostCache(vm.getVirtualMachine(), vm.getHostId()); + } + } catch (ConcurrentOperationException | ResourceUnavailableException e) { + LOG.error("rollbackMigration failed.", e); + } } @Override public void commitMigration(NicProfile nic, Network network, VirtualMachineProfile vm, ReservationContext src, ReservationContext dst) { + try { + if (isConfigDriveIsoOnHostCache(vm.getId())) { + vm.setConfigDriveLocation(Location.HOST); + // Delete the config drive on src host cache + deleteConfigDriveIsoOnHostCache(vm.getVirtualMachine(), vm.getHostId()); + } + } catch (ConcurrentOperationException | ResourceUnavailableException e) { + LOG.error("commitMigration failed.", e); + } } private void recreateConfigDriveIso(NicProfile nic, Network network, VirtualMachineProfile vm, DeployDestination dest) throws ResourceUnavailableException { @@ -383,7 +410,8 @@ private boolean isWindows(long guestOSId) { private DataStore findDataStore(VirtualMachineProfile profile, DeployDestination dest) { DataStore dataStore = null; - if (VirtualMachineManager.VmConfigDriveOnPrimaryPool.value()) { + if (VirtualMachineManager.VmConfigDriveOnPrimaryPool.valueIn(dest.getDataCenter().getId()) || + VirtualMachineManager.VmConfigDriveForceHostCacheUse.valueIn(dest.getDataCenter().getId())) { if(MapUtils.isNotEmpty(dest.getStorageForDisks())) { dataStore = getPlannedDataStore(dest, dataStore); } @@ -472,12 +500,86 @@ private Long findAgentId(VirtualMachineProfile profile, DeployDestination dest, } else { agentId = dest.getHost().getId(); } - if (!VirtualMachineManager.VmConfigDriveOnPrimaryPool.value()) { + if (!VirtualMachineManager.VmConfigDriveOnPrimaryPool.valueIn(dest.getDataCenter().getId()) && + !VirtualMachineManager.VmConfigDriveForceHostCacheUse.valueIn(dest.getDataCenter().getId())) { agentId = findAgentIdForImageStore(dataStore); } return agentId; } + private Location getConfigDriveLocation(long vmId) { + final UserVmDetailVO vmDetailConfigDriveLocation = _userVmDetailsDao.findDetail(vmId, VmDetailConstants.CONFIG_DRIVE_LOCATION); + if (vmDetailConfigDriveLocation != null) { + if (Location.HOST.toString().equalsIgnoreCase(vmDetailConfigDriveLocation.getValue())) { + return Location.HOST; + } else if (Location.PRIMARY.toString().equalsIgnoreCase(vmDetailConfigDriveLocation.getValue())) { + return Location.PRIMARY; + } else { + return Location.SECONDARY; + } + } + return Location.SECONDARY; + } + + private boolean isConfigDriveIsoOnHostCache(long vmId) { + final UserVmDetailVO vmDetailConfigDriveLocation = _userVmDetailsDao.findDetail(vmId, VmDetailConstants.CONFIG_DRIVE_LOCATION); + if (vmDetailConfigDriveLocation != null && Location.HOST.toString().equalsIgnoreCase(vmDetailConfigDriveLocation.getValue())) { + return true; + } + return false; + } + + private boolean createConfigDriveIsoOnHostCache(VirtualMachineProfile profile, Long hostId) throws ResourceUnavailableException { + if (hostId == null) { + throw new ResourceUnavailableException("Config drive iso creation failed, dest host not available", + ConfigDriveNetworkElement.class, 0L); + } + + LOG.debug("Creating config drive ISO for vm: " + profile.getInstanceName() + " on host: " + hostId); + + final String isoFileName = ConfigDrive.configIsoFileName(profile.getInstanceName()); + final String isoPath = ConfigDrive.createConfigDrivePath(profile.getInstanceName()); + final String isoData = ConfigDriveBuilder.buildConfigDrive(profile.getVmData(), isoFileName, profile.getConfigDriveLabel()); + final HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(isoPath, isoData, null, false, true, true); + + final HandleConfigDriveIsoAnswer answer = (HandleConfigDriveIsoAnswer) agentManager.easySend(hostId, configDriveIsoCommand); + if (answer == null) { + throw new CloudRuntimeException("Unable to get an answer to handle config drive creation for vm: " + profile.getInstanceName() + " on host: " + hostId); + } + + if (!answer.getResult()) { + throw new ResourceUnavailableException(String.format("Config drive iso creation failed, details: %s", + answer.getDetails()), ConfigDriveNetworkElement.class, 0L); + } + + profile.setConfigDriveLocation(answer.getConfigDriveLocation()); + _userVmDetailsDao.addDetail(profile.getId(), VmDetailConstants.CONFIG_DRIVE_LOCATION, answer.getConfigDriveLocation().toString(), false); + addConfigDriveDisk(profile, null); + return true; + } + + private boolean deleteConfigDriveIsoOnHostCache(final VirtualMachine vm, final Long hostId) throws ResourceUnavailableException { + if (hostId == null) { + throw new ResourceUnavailableException("Config drive iso deletion failed, host not available", + ConfigDriveNetworkElement.class, 0L); + } + + LOG.debug("Deleting config drive ISO for vm: " + vm.getInstanceName() + " on host: " + hostId); + final String isoPath = ConfigDrive.createConfigDrivePath(vm.getInstanceName()); + final HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(isoPath, null, null, false, true, false); + + final HandleConfigDriveIsoAnswer answer = (HandleConfigDriveIsoAnswer) agentManager.easySend(hostId, configDriveIsoCommand); + if (answer == null) { + throw new CloudRuntimeException("Unable to get an answer to handle config drive deletion for vm: " + vm.getInstanceName() + " on host: " + hostId); + } + + if (!answer.getResult()) { + LOG.error("Failed to remove config drive for instance: " + vm.getInstanceName()); + return false; + } + return true; + } + private boolean createConfigDriveIso(VirtualMachineProfile profile, DeployDestination dest, DiskTO disk) throws ResourceUnavailableException { DataStore dataStore = getDatastoreForConfigDriveIso(disk, profile, dest); @@ -492,13 +594,17 @@ private boolean createConfigDriveIso(VirtualMachineProfile profile, DeployDestin final String isoFileName = ConfigDrive.configIsoFileName(profile.getInstanceName()); final String isoPath = ConfigDrive.createConfigDrivePath(profile.getInstanceName()); final String isoData = ConfigDriveBuilder.buildConfigDrive(profile.getVmData(), isoFileName, profile.getConfigDriveLabel()); - final HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(isoPath, isoData, dataStore.getTO(), true); + boolean useHostCacheOnUnsupportedPool = VirtualMachineManager.VmConfigDriveUseHostCacheOnUnsupportedPool.valueIn(dest.getDataCenter().getId()); + boolean preferHostCache = VirtualMachineManager.VmConfigDriveForceHostCacheUse.valueIn(dest.getDataCenter().getId()); + final HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(isoPath, isoData, dataStore.getTO(), useHostCacheOnUnsupportedPool, preferHostCache, true); - final Answer answer = agentManager.easySend(agentId, configDriveIsoCommand); + final HandleConfigDriveIsoAnswer answer = (HandleConfigDriveIsoAnswer) agentManager.easySend(agentId, configDriveIsoCommand); if (!answer.getResult()) { throw new ResourceUnavailableException(String.format("Config drive iso creation failed, details: %s", answer.getDetails()), ConfigDriveNetworkElement.class, 0L); } + profile.setConfigDriveLocation(answer.getConfigDriveLocation()); + _userVmDetailsDao.addDetail(profile.getId(), VmDetailConstants.CONFIG_DRIVE_LOCATION, answer.getConfigDriveLocation().toString(), false); addConfigDriveDisk(profile, dataStore); return true; } @@ -526,28 +632,37 @@ private DataStore getDatastoreForConfigDriveIso(DiskTO disk, VirtualMachineProfi } private boolean deleteConfigDriveIso(final VirtualMachine vm) throws ResourceUnavailableException { - DataStore dataStore = _dataStoreMgr.getImageStoreWithFreeCapacity(vm.getDataCenterId()); - Long agentId = findAgentIdForImageStore(dataStore); + Long hostId = (vm.getHostId() != null) ? vm.getHostId() : vm.getLastHostId(); + Location location = getConfigDriveLocation(vm.getId()); + if (location == Location.HOST) { + return deleteConfigDriveIsoOnHostCache(vm, hostId); + } + + Long agentId = null; + DataStore dataStore = null; - if (VirtualMachineManager.VmConfigDriveOnPrimaryPool.value()) { + if (location == Location.SECONDARY) { + dataStore = _dataStoreMgr.getImageStoreWithFreeCapacity(vm.getDataCenterId()); + agentId = findAgentIdForImageStore(dataStore); + } else if (location == Location.PRIMARY) { List volumes = _volumeDao.findByInstanceAndType(vm.getId(), Volume.Type.ROOT); if (volumes != null && volumes.size() > 0) { dataStore = _dataStoreMgr.getDataStore(volumes.get(0).getPoolId(), DataStoreRole.Primary); } - agentId = (vm.getHostId() != null) ? vm.getHostId() : vm.getLastHostId(); + agentId = hostId; } if (agentId == null || dataStore == null) { - throw new ResourceUnavailableException("Config drive iso creation failed, agent or datastore not available", + throw new ResourceUnavailableException("Config drive iso deletion failed, agent or datastore not available", ConfigDriveNetworkElement.class, 0L); } LOG.debug("Deleting config drive ISO for vm: " + vm.getInstanceName()); final String isoPath = ConfigDrive.createConfigDrivePath(vm.getInstanceName()); - final HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(isoPath, null, dataStore.getTO(), false); + final HandleConfigDriveIsoCommand configDriveIsoCommand = new HandleConfigDriveIsoCommand(isoPath, null, dataStore.getTO(), false, false, false); - final Answer answer = agentManager.easySend(agentId, configDriveIsoCommand); + final HandleConfigDriveIsoAnswer answer = (HandleConfigDriveIsoAnswer) agentManager.easySend(agentId, configDriveIsoCommand); if (!answer.getResult()) { LOG.error("Failed to remove config drive for instance: " + vm.getInstanceName()); return false; @@ -566,11 +681,13 @@ private void addConfigDriveDisk(final VirtualMachineProfile profile, final DataS } if (!isoAvailable) { TemplateObjectTO dataTO = new TemplateObjectTO(); - if (dataStore == null) { + if (dataStore == null && !isConfigDriveIsoOnHostCache(profile.getId())) { throw new ResourceUnavailableException("Config drive disk add failed, datastore not available", ConfigDriveNetworkElement.class, 0L); + } else if (dataStore != null) { + dataTO.setDataStore(dataStore.getTO()); } - dataTO.setDataStore(dataStore.getTO()); + dataTO.setUuid(profile.getUuid()); dataTO.setPath(isoPath); dataTO.setFormat(Storage.ImageFormat.ISO); diff --git a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManager.java b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManager.java index 6edbb4400b11..5bf57f17d9d8 100644 --- a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManager.java +++ b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManager.java @@ -94,7 +94,7 @@ public interface VirtualNetworkApplianceManager extends Manager, VirtualNetworkA false, ConfigKey.Scope.Global, null); static final ConfigKey RouterHealthChecksFailuresToRecreateVr = new ConfigKey(String.class, RouterHealthChecksFailuresToRecreateVrCK, "Advanced", "", "Health checks failures defined by this config are the checks that should cause router recreation. If empty the recreate is not attempted for any health check failure. Possible values are comma separated script names " + - "from systemvm’s /root/health_scripts/ (namely - cpu_usage_check.py, dhcp_check.py, disk_space_check.py, dns_check.py, gateways_check.py, haproxy_check.py, iptables_check.py, memory_usage_check.py, router_version_check.py), connectivity.test " + + "from systemvm’s /root/health_scripts/ (namely - cpu_usage_check.py, dhcp_check.py, disk_space_check.py, dns_check.py, gateways_check.py, haproxy_check.py, iptables_check.py, memory_usage_check.py, router_version_check.py), connectivity.test, filesystem.writable.test " + " or services (namely - loadbalancing.service, webserver.service, dhcp.service) ", true, ConfigKey.Scope.Zone, null); static final ConfigKey RouterHealthChecksToExclude = new ConfigKey(String.class, "router.health.checks.to.exclude", "Advanced", "", diff --git a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index 0e70ea8ab7c3..63460db41334 100644 --- a/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/main/java/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -280,6 +280,8 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V Configurable, StateListener { private static final Logger s_logger = Logger.getLogger(VirtualNetworkApplianceManagerImpl.class); private static final String CONNECTIVITY_TEST = "connectivity.test"; + private static final String FILESYSTEM_WRITABLE_TEST = "filesystem.writable.test"; + private static final String READONLY_FILESYSTEM_ERROR = "Read-only file system"; private static final String BACKUP_ROUTER_EXCLUDED_TESTS = "gateways_check.py"; @Inject private EntityManager _entityMgr; @@ -1274,14 +1276,19 @@ private List getFailingChecks(DomainRouterVO router, GetRouterMonitorRes if (answer == null) { s_logger.warn("Unable to fetch monitor results for router " + router); - resetRouterHealthChecksAndConnectivity(router.getId(), false, "Communication failed"); + resetRouterHealthChecksAndConnectivity(router.getId(), false, false, "Communication failed"); return Arrays.asList(CONNECTIVITY_TEST); } else if (!answer.getResult()) { s_logger.warn("Failed to fetch monitor results from router " + router + " with details: " + answer.getDetails()); - resetRouterHealthChecksAndConnectivity(router.getId(), false, "Failed to fetch results with details: " + answer.getDetails()); - return Arrays.asList(CONNECTIVITY_TEST); + if (StringUtils.isNotBlank(answer.getDetails()) && answer.getDetails().equalsIgnoreCase(READONLY_FILESYSTEM_ERROR)) { + resetRouterHealthChecksAndConnectivity(router.getId(), true, false, "Failed to write: " + answer.getDetails()); + return Arrays.asList(FILESYSTEM_WRITABLE_TEST); + } else { + resetRouterHealthChecksAndConnectivity(router.getId(), false, false, "Failed to fetch results with details: " + answer.getDetails()); + return Arrays.asList(CONNECTIVITY_TEST); + } } else { - resetRouterHealthChecksAndConnectivity(router.getId(), true, "Successfully fetched data"); + resetRouterHealthChecksAndConnectivity(router.getId(), true, true, "Successfully fetched data"); updateDbHealthChecksFromRouterResponse(router.getId(), answer.getMonitoringResults()); return answer.getFailingChecks(); } @@ -1418,28 +1425,31 @@ private Map> getHealthChecksFromD return healthCheckResults; } - private RouterHealthCheckResultVO resetRouterHealthChecksAndConnectivity(final long routerId, boolean connected, String message) { + private void resetRouterHealthChecksAndConnectivity(final long routerId, boolean connected, boolean writable, String message) { routerHealthCheckResultDao.expungeHealthChecks(routerId); - boolean newEntry = false; - RouterHealthCheckResultVO connectivityVO = routerHealthCheckResultDao.getRouterHealthCheckResult(routerId, CONNECTIVITY_TEST, "basic"); + updateRouterHealthCheckResult(routerId, CONNECTIVITY_TEST, "basic", connected, connected ? "Successfully connected to router" : message); + updateRouterHealthCheckResult(routerId, FILESYSTEM_WRITABLE_TEST, "basic", writable, writable ? "Successfully written to file system" : message); + } + + private void updateRouterHealthCheckResult(final long routerId, String checkName, String checkType, boolean checkResult, String checkMessage) { + boolean newHealthCheckEntry = false; + RouterHealthCheckResultVO connectivityVO = routerHealthCheckResultDao.getRouterHealthCheckResult(routerId, checkName, checkType); if (connectivityVO == null) { - connectivityVO = new RouterHealthCheckResultVO(routerId, CONNECTIVITY_TEST, "basic"); - newEntry = true; + connectivityVO = new RouterHealthCheckResultVO(routerId, checkName, checkType); + newHealthCheckEntry = true; } - connectivityVO.setCheckResult(connected); + connectivityVO.setCheckResult(checkResult); connectivityVO.setLastUpdateTime(new Date()); - if (StringUtils.isNotEmpty(message)) { - connectivityVO.setCheckDetails(message.getBytes(com.cloud.utils.StringUtils.getPreferredCharset())); + if (StringUtils.isNotEmpty(checkMessage)) { + connectivityVO.setCheckDetails(checkMessage.getBytes(com.cloud.utils.StringUtils.getPreferredCharset())); } - if (newEntry) { + if (newHealthCheckEntry) { routerHealthCheckResultDao.persist(connectivityVO); } else { routerHealthCheckResultDao.update(connectivityVO.getId(), connectivityVO); } - - return routerHealthCheckResultDao.getRouterHealthCheckResult(routerId, CONNECTIVITY_TEST, "basic"); } private RouterHealthCheckResultVO parseHealthCheckVOFromJson(final long routerId, @@ -1596,12 +1606,18 @@ public boolean performRouterHealthChecks(long routerId) { } // Step 2: Update health checks values in database. We do this irrespective of new health check config. - if (answer == null || !answer.getResult()) { + if (answer == null) { success = false; - resetRouterHealthChecksAndConnectivity(routerId, false, - answer == null ? "Communication failed " : "Failed to fetch results with details: " + answer.getDetails()); + resetRouterHealthChecksAndConnectivity(routerId, false, false, "Communication failed"); + } else if (!answer.getResult()) { + success = false; + if (StringUtils.isNotBlank(answer.getDetails()) && answer.getDetails().equalsIgnoreCase(READONLY_FILESYSTEM_ERROR)) { + resetRouterHealthChecksAndConnectivity(routerId, true, false, "Failed to write: " + answer.getDetails()); + } else { + resetRouterHealthChecksAndConnectivity(routerId, false, false, "Failed to fetch results with details: " + answer.getDetails()); + } } else { - resetRouterHealthChecksAndConnectivity(routerId, true, "Successfully fetched data"); + resetRouterHealthChecksAndConnectivity(routerId, true, true, "Successfully fetched data"); updateDbHealthChecksFromRouterResponse(routerId, answer.getMonitoringResults()); } diff --git a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java index 52a11069dd0b..30c4d65722d4 100755 --- a/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/main/java/com/cloud/resource/ResourceManagerImpl.java @@ -3016,12 +3016,15 @@ public HashMap> getGPUStatistics(final Ho } @Override - public HostVO findOneRandomRunningHostByHypervisor(HypervisorType type) { + public HostVO findOneRandomRunningHostByHypervisor(final HypervisorType type, final Long dcId) { final QueryBuilder sc = QueryBuilder.create(HostVO.class); sc.and(sc.entity().getHypervisorType(), Op.EQ, type); sc.and(sc.entity().getType(),Op.EQ, Type.Routing); sc.and(sc.entity().getStatus(), Op.EQ, Status.Up); sc.and(sc.entity().getResourceState(), Op.EQ, ResourceState.Enabled); + if (dcId != null) { + sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId); + } sc.and(sc.entity().getRemoved(), Op.NULL); List hosts = sc.list(); if (CollectionUtils.isEmpty(hosts)) { diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index 80b54c09dfa2..d24d176e0b4b 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -28,6 +28,7 @@ import java.util.List; import java.util.Map; import java.util.TimeZone; +import java.util.UUID; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -650,6 +651,7 @@ import com.cloud.storage.GuestOSVO; import com.cloud.storage.GuestOsCategory; import com.cloud.storage.ScopeType; +import com.cloud.storage.Storage; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.Volume; @@ -722,6 +724,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe static final ConfigKey vmPasswordLength = new ConfigKey("Advanced", Integer.class, "vm.password.length", "6", "Specifies the length of a randomly generated password", false); static final ConfigKey sshKeyLength = new ConfigKey("Advanced", Integer.class, "ssh.key.length", "2048", "Specifies custom SSH key length (bit)", true, ConfigKey.Scope.Global); static final ConfigKey humanReadableSizes = new ConfigKey("Advanced", Boolean.class, "display.human.readable.sizes", "true", "Enables outputting human readable byte sizes to logs and usage records.", false, ConfigKey.Scope.Global); + public static final ConfigKey customCsIdentifier = new ConfigKey("Advanced", String.class, "custom.cs.identifier", UUID.randomUUID().toString().split("-")[0].substring(4), "Custom identifier for the cloudstack installation", true, ConfigKey.Scope.Global); @Inject public AccountManager _accountMgr; @@ -1310,6 +1313,11 @@ public Ternary, Integer>, List, Map[] getConfigKeys() { - return new ConfigKey[] {vmPasswordLength, sshKeyLength, humanReadableSizes}; + return new ConfigKey[] {vmPasswordLength, sshKeyLength, humanReadableSizes, customCsIdentifier}; } protected class EventPurgeTask extends ManagedContextRunnable { diff --git a/server/src/main/java/com/cloud/server/StatsCollector.java b/server/src/main/java/com/cloud/server/StatsCollector.java index 43784daf9c81..7459285913aa 100644 --- a/server/src/main/java/com/cloud/server/StatsCollector.java +++ b/server/src/main/java/com/cloud/server/StatsCollector.java @@ -109,6 +109,7 @@ import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.ImageStoreDetailsUtil; import com.cloud.storage.ScopeType; +import com.cloud.storage.Storage; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageManager; import com.cloud.storage.StorageStats; @@ -932,7 +933,7 @@ protected void runInContext() { List volumes = _volsDao.findByPoolId(pool.getId(), null); List volumeLocators = new ArrayList(); for (VolumeVO volume : volumes) { - if (volume.getFormat() == ImageFormat.QCOW2 || volume.getFormat() == ImageFormat.VHD) { + if (volume.getFormat() == ImageFormat.QCOW2 || volume.getFormat() == ImageFormat.VHD || (volume.getFormat() == ImageFormat.RAW && pool.getPoolType() == Storage.StoragePoolType.PowerFlex)) { volumeLocators.add(volume.getPath()); } else if (volume.getFormat() == ImageFormat.OVA) { volumeLocators.add(volume.getChainInfo()); diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index c59a26d3bc66..62923252ad79 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -103,7 +103,12 @@ import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; import com.cloud.agent.api.DeleteStoragePoolCommand; +import com.cloud.agent.api.GetStorageStatsAnswer; +import com.cloud.agent.api.GetStorageStatsCommand; +import com.cloud.agent.api.GetVolumeStatsAnswer; +import com.cloud.agent.api.GetVolumeStatsCommand; import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.agent.api.VolumeStatsEntry; import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.manager.Commands; @@ -430,6 +435,12 @@ public Answer sendToPool(StoragePool pool, long[] hostIdsToTryFirst, Command cmd @Override public Answer sendToPool(StoragePool pool, Command cmd) throws StorageUnavailableException { + if (cmd instanceof GetStorageStatsCommand && pool.getPoolType() == StoragePoolType.PowerFlex) { + // Get stats from the pool directly instead of sending cmd to host + // Added support for ScaleIO/PowerFlex pool only + return getStoragePoolStats(pool, (GetStorageStatsCommand) cmd); + } + Answer[] answers = sendToPool(pool, new Commands(cmd)); if (answers == null) { return null; @@ -437,6 +448,52 @@ public Answer sendToPool(StoragePool pool, Command cmd) throws StorageUnavailabl return answers[0]; } + private GetStorageStatsAnswer getStoragePoolStats(StoragePool pool, GetStorageStatsCommand cmd) { + DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); + DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); + GetStorageStatsAnswer answer = null; + + if (storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).canProvideStorageStats()) { + PrimaryDataStoreDriver primaryStoreDriver = (PrimaryDataStoreDriver)storeDriver; + Pair storageStats = primaryStoreDriver.getStorageStats(pool); + if (storageStats == null) { + answer = new GetStorageStatsAnswer((GetStorageStatsCommand) cmd, "Failed to get storage stats for pool: " + pool.getId()); + } else { + answer = new GetStorageStatsAnswer((GetStorageStatsCommand) cmd, storageStats.first(), storageStats.second()); + } + } + + return answer; + } + + @Override + public Answer getVolumeStats(StoragePool pool, Command cmd) { + if (!(cmd instanceof GetVolumeStatsCommand)) { + return null; + } + + DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); + DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); + + if (storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).canProvideVolumeStats()) { + PrimaryDataStoreDriver primaryStoreDriver = (PrimaryDataStoreDriver)storeDriver; + HashMap statEntry = new HashMap(); + GetVolumeStatsCommand getVolumeStatsCommand = (GetVolumeStatsCommand) cmd; + for (String volumeUuid : getVolumeStatsCommand.getVolumeUuids()) { + Pair volumeStats = primaryStoreDriver.getVolumeStats(pool, volumeUuid); + if (volumeStats == null) { + return new GetVolumeStatsAnswer(getVolumeStatsCommand, "Failed to get stats for volume: " + volumeUuid, null); + } else { + VolumeStatsEntry volumeStatsEntry = new VolumeStatsEntry(volumeUuid, volumeStats.first(), volumeStats.second()); + statEntry.put(volumeUuid, volumeStatsEntry); + } + } + return new GetVolumeStatsAnswer(getVolumeStatsCommand, "", statEntry); + } + + return null; + } + public Long chooseHostForStoragePool(StoragePoolVO poolVO, List avoidHosts, boolean sendToVmResidesOn, Long vmId) { if (sendToVmResidesOn) { if (vmId != null) { @@ -961,6 +1018,17 @@ public void connectHostToSharedPool(long hostId, long poolId) throws StorageUnav listener.hostConnect(hostId, pool.getId()); } + @Override + public void disconnectHostFromSharedPool(long hostId, long poolId) throws StorageUnavailableException, StorageConflictException { + StoragePool pool = (StoragePool)_dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary); + assert (pool.isShared()) : "Now, did you actually read the name of this method?"; + s_logger.debug("Removing pool " + pool.getName() + " from host " + hostId); + + DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); + HypervisorHostListener listener = hostListeners.get(provider.getName()); + listener.hostDisconnected(hostId, pool.getId()); + } + @Override public BigDecimal getStorageOverProvisioningFactor(Long poolId) { return new BigDecimal(CapacityManager.StorageOverprovisioningFactor.valueIn(poolId)); @@ -1148,6 +1216,7 @@ public void cleanupStorage(boolean recurring) { try { VolumeInfo volumeInfo = volFactory.getVolume(vol.getId()); if (volumeInfo != null) { + volService.ensureVolumeIsExpungeReady(vol.getId()); volService.expungeVolumeAsync(volumeInfo); } else { s_logger.debug("Volume " + vol.getUuid() + " is already destroyed"); @@ -1283,6 +1352,9 @@ private void handleManagedStorage(Volume volume) { if (storagePool != null && storagePool.isManaged()) { VMInstanceVO vmInstanceVO = _vmInstanceDao.findById(instanceId); + if (vmInstanceVO == null) { + return; + } Long lastHostId = vmInstanceVO.getLastHostId(); @@ -1651,6 +1723,38 @@ public StoragePoolVO findLocalStorageOnHost(long hostId) { } } + @Override + @DB + public List findStoragePoolsConnectedToHost(long hostId) { + return _storagePoolHostDao.listByHostId(hostId); + } + + @Override + public boolean canHostAccessStoragePool(Host host, StoragePool pool) { + if (host == null || pool == null) { + return false; + } + + if (!pool.isManaged()) { + return true; + } + + DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); + DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); + + if (storeDriver instanceof PrimaryDataStoreDriver && ((PrimaryDataStoreDriver)storeDriver).canHostAccessStoragePool(host, pool)) { + return true; + } + + return false; + } + + @Override + @DB + public Host getHost(long hostId) { + return _hostDao.findById(hostId); + } + @Override public Host updateSecondaryStorage(long secStorageId, String newUrl) { HostVO secHost = _hostDao.findById(secStorageId); @@ -1726,7 +1830,8 @@ public HypervisorType getHypervisorTypeFromFormat(ImageFormat format) { private boolean checkUsagedSpace(StoragePool pool) { // Managed storage does not currently deal with accounting for physically used space (only provisioned space). Just return true if "pool" is managed. - if (pool.isManaged()) { + // StatsCollector gets the storage stats from the ScaleIO/PowerFlex pool directly, limit the usage based on the capacity disable threshold + if (pool.isManaged() && pool.getPoolType() != StoragePoolType.PowerFlex) { return true; } @@ -1854,14 +1959,14 @@ public boolean storagePoolHasEnoughSpace(List volumes, StoragePool pool, } @Override - public boolean storagePoolHasEnoughSpaceForResize(StoragePool pool, long currentSize, long newSiz) { + public boolean storagePoolHasEnoughSpaceForResize(StoragePool pool, long currentSize, long newSize) { if (!checkUsagedSpace(pool)) { return false; } if (s_logger.isDebugEnabled()) { s_logger.debug("Destination pool id: " + pool.getId()); } - long totalAskingSize = newSiz - currentSize; + long totalAskingSize = newSize - currentSize; if (totalAskingSize <= 0) { return true; @@ -1985,6 +2090,10 @@ private HypervisorType getHypervisorType(Volume volume) { } private long getBytesRequiredForTemplate(VMTemplateVO tmpl, StoragePool pool) { + if (tmplFactory.isTemplateMarkedForDirectDownload(tmpl.getId())) { + return tmpl.getSize(); + } + DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); @@ -2523,6 +2632,8 @@ public ConfigKey[] getConfigKeys() { KvmStorageOnlineMigrationWait, KvmAutoConvergence, MaxNumberOfManagedClusteredFileSystems, + STORAGE_POOL_DISK_WAIT, + STORAGE_POOL_CLIENT_TIMEOUT, PRIMARY_STORAGE_DOWNLOAD_WAIT, SecStorageMaxMigrateSessions, MaxDataMigrationWaitTime diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index b5c33eb5fa11..e16ccc186663 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -67,6 +67,8 @@ import org.apache.cloudstack.framework.jobs.impl.OutcomeImpl; import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO; import org.apache.cloudstack.jobs.JobInfo; +import org.apache.cloudstack.resourcedetail.DiskOfferingDetailVO; +import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao; import org.apache.cloudstack.storage.command.AttachAnswer; import org.apache.cloudstack.storage.command.AttachCommand; import org.apache.cloudstack.storage.command.DettachCommand; @@ -126,6 +128,7 @@ import com.cloud.storage.dao.StoragePoolTagsDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; +import com.cloud.storage.dao.VolumeDetailsDao; import com.cloud.storage.snapshot.SnapshotApiService; import com.cloud.storage.snapshot.SnapshotManager; import com.cloud.template.TemplateManager; @@ -206,6 +209,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic @Inject private VolumeDao _volsDao; @Inject + private VolumeDetailsDao _volsDetailsDao; + @Inject private HostDao _hostDao; @Inject private SnapshotDao _snapshotDao; @@ -224,6 +229,8 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic @Inject private DiskOfferingDao _diskOfferingDao; @Inject + private DiskOfferingDetailsDao _diskOfferingDetailsDao; + @Inject private AccountDao _accountDao; @Inject private DataCenterDao _dcDao; @@ -585,6 +592,7 @@ public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationExcept parentVolume = null; } + Map details = new HashMap<>(); if (cmd.getDiskOfferingId() != null) { // create a new volume diskOfferingId = cmd.getDiskOfferingId(); @@ -626,6 +634,15 @@ public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationExcept size = diskOffering.getDiskSize(); } + DiskOfferingDetailVO bandwidthLimitDetail = _diskOfferingDetailsDao.findDetail(diskOfferingId, Volume.BANDWIDTH_LIMIT_IN_MBPS); + if (bandwidthLimitDetail != null) { + details.put(Volume.BANDWIDTH_LIMIT_IN_MBPS, bandwidthLimitDetail.getValue()); + } + DiskOfferingDetailVO iopsLimitDetail = _diskOfferingDetailsDao.findDetail(diskOfferingId, Volume.IOPS_LIMIT); + if (iopsLimitDetail != null) { + details.put(Volume.IOPS_LIMIT, iopsLimitDetail.getValue()); + } + Boolean isCustomizedIops = diskOffering.isCustomizedIops(); if (isCustomizedIops != null) { @@ -653,6 +670,9 @@ public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationExcept minIops = diskOffering.getMinIops(); maxIops = diskOffering.getMaxIops(); } + } else { + minIops = diskOffering.getMinIops(); + maxIops = diskOffering.getMaxIops(); } if (!validateVolumeSizeRange(size)) {// convert size from mb to gb @@ -742,11 +762,11 @@ public VolumeVO allocVolume(CreateVolumeCmd cmd) throws ResourceAllocationExcept String userSpecifiedName = getVolumeNameFromCommand(cmd); return commitVolume(cmd, caller, owner, displayVolume, zoneId, diskOfferingId, provisioningType, size, minIops, maxIops, parentVolume, userSpecifiedName, - _uuidMgr.generateUuid(Volume.class, cmd.getCustomId())); + _uuidMgr.generateUuid(Volume.class, cmd.getCustomId()), details); } private VolumeVO commitVolume(final CreateVolumeCmd cmd, final Account caller, final Account owner, final Boolean displayVolume, final Long zoneId, final Long diskOfferingId, - final Storage.ProvisioningType provisioningType, final Long size, final Long minIops, final Long maxIops, final VolumeVO parentVolume, final String userSpecifiedName, final String uuid) { + final Storage.ProvisioningType provisioningType, final Long size, final Long minIops, final Long maxIops, final VolumeVO parentVolume, final String userSpecifiedName, final String uuid, final Map details) { return Transaction.execute(new TransactionCallback() { @Override public VolumeVO doInTransaction(TransactionStatus status) { @@ -778,6 +798,19 @@ public VolumeVO doInTransaction(TransactionStatus status) { Volume.class.getName(), volume.getUuid(), displayVolume); } + if (volume != null && details != null) { + List volumeDetailsVO = new ArrayList(); + if (details.containsKey(Volume.BANDWIDTH_LIMIT_IN_MBPS)) { + volumeDetailsVO.add(new VolumeDetailVO(volume.getId(), Volume.BANDWIDTH_LIMIT_IN_MBPS, details.get(Volume.BANDWIDTH_LIMIT_IN_MBPS), false)); + } + if (details.containsKey(Volume.IOPS_LIMIT)) { + volumeDetailsVO.add(new VolumeDetailVO(volume.getId(), Volume.IOPS_LIMIT, details.get(Volume.IOPS_LIMIT), false)); + } + if (!volumeDetailsVO.isEmpty()) { + _volsDetailsDao.saveDetails(volumeDetailsVO); + } + } + CallContext.current().setEventDetails("Volume Id: " + volume.getUuid()); // Increment resource count during allocation; if actual creation fails, @@ -951,7 +984,7 @@ public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationExcep newMaxIops = volume.getMaxIops(); } - validateIops(newMinIops, newMaxIops); + validateIops(newMinIops, newMaxIops, volume.getPoolType()); } else { if (newDiskOffering.getRemoved() != null) { throw new InvalidParameterValueException("Requested disk offering has been removed."); @@ -996,7 +1029,7 @@ public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationExcep newMinIops = cmd.getMinIops() != null ? cmd.getMinIops() : volume.getMinIops(); newMaxIops = cmd.getMaxIops() != null ? cmd.getMaxIops() : volume.getMaxIops(); - validateIops(newMinIops, newMaxIops); + validateIops(newMinIops, newMaxIops, volume.getPoolType()); } else { newMinIops = newDiskOffering.getMinIops(); newMaxIops = newDiskOffering.getMaxIops(); @@ -1139,7 +1172,12 @@ public VolumeVO resizeVolume(ResizeVolumeCmd cmd) throws ResourceAllocationExcep shrinkOk); } - private void validateIops(Long minIops, Long maxIops) { + private void validateIops(Long minIops, Long maxIops, Storage.StoragePoolType poolType) { + if (poolType == Storage.StoragePoolType.PowerFlex) { + // PowerFlex takes iopsLimit as input, skip minIops validation + minIops = (maxIops != null) ? Long.valueOf(0) : null; + } + if ((minIops == null && maxIops != null) || (minIops != null && maxIops == null)) { throw new InvalidParameterValueException("Either 'miniops' and 'maxiops' must both be provided or neither must be provided."); } @@ -1250,7 +1288,9 @@ private VolumeVO orchestrateResizeVolume(long volumeId, long currentSize, long n volume.setDiskOfferingId(newDiskOfferingId); } - if (currentSize != newSize) { + // Update size if volume has same size as before, else it is already updated + final VolumeVO volumeNow = _volsDao.findById(volumeId); + if (currentSize == volumeNow.getSize() && currentSize != newSize) { volume.setSize(newSize); } @@ -3020,6 +3060,7 @@ private VolumeVO sendAttachVolumeCommand(UserVmVO vm, VolumeVO volumeToAttach, L details.put(DiskTO.IQN, volumeToAttach.get_iScsiName()); details.put(DiskTO.MOUNT_POINT, volumeToAttach.get_iScsiName()); details.put(DiskTO.PROTOCOL_TYPE, (volumeToAttach.getPoolType() != null) ? volumeToAttach.getPoolType().toString() : null); + details.put(StorageManager.STORAGE_POOL_DISK_WAIT.toString(), String.valueOf(StorageManager.STORAGE_POOL_DISK_WAIT.valueIn(volumeToAttachStoragePool.getId()))); if (chapInfo != null) { details.put(DiskTO.CHAP_INITIATOR_USERNAME, chapInfo.getInitiatorUsername()); diff --git a/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java b/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java index 0b149189f419..eca96efff3e1 100644 --- a/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java +++ b/server/src/main/java/com/cloud/storage/listener/StoragePoolMonitor.java @@ -20,14 +20,13 @@ import javax.inject.Inject; -import org.apache.log4j.Logger; - import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; import com.cloud.agent.Listener; import com.cloud.agent.api.AgentControlAnswer; @@ -44,6 +43,7 @@ import com.cloud.storage.ScopeType; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StorageManagerImpl; +import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.StoragePoolStatus; public class StoragePoolMonitor implements Listener { @@ -137,7 +137,49 @@ public void processConnect(Host host, StartupCommand cmd, boolean forRebalance) @Override public synchronized boolean processDisconnect(long agentId, Status state) { - return true; + Host host = _storageManager.getHost(agentId); + if (host == null) { + s_logger.warn("Agent: " + agentId + " not found, not disconnecting pools"); + return false; + } + + if (host.getType() != Host.Type.Routing) { + return false; + } + + List storagePoolHosts = _storageManager.findStoragePoolsConnectedToHost(host.getId()); + if (storagePoolHosts == null) { + if (s_logger.isTraceEnabled()) { + s_logger.trace("No pools to disconnect for host: " + host.getId()); + } + return true; + } + + boolean disconnectResult = true; + for (StoragePoolHostVO storagePoolHost : storagePoolHosts) { + StoragePoolVO pool = _poolDao.findById(storagePoolHost.getPoolId()); + if (pool == null) { + continue; + } + + if (!pool.isShared()) { + continue; + } + + // Handle only PowerFlex pool for now, not to impact other pools behavior + if (pool.getPoolType() != StoragePoolType.PowerFlex) { + continue; + } + + try { + _storageManager.disconnectHostFromSharedPool(host.getId(), pool.getId()); + } catch (Exception e) { + s_logger.error("Unable to disconnect host " + host.getId() + " from storage pool id " + pool.getId() + " due to " + e.toString()); + disconnectResult = false; + } + } + + return disconnectResult; } @Override diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java index 35ec665b97d3..0e15a2a70cbd 100755 --- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -1287,7 +1287,7 @@ private DataStoreRole getDataStoreRole(Snapshot snapshot, SnapshotDataStoreDao s } StoragePoolVO storagePoolVO = _storagePoolDao.findById(storagePoolId); - if (storagePoolVO.getPoolType() == StoragePoolType.RBD && !BackupSnapshotAfterTakingSnapshot.value()) { + if ((storagePoolVO.getPoolType() == StoragePoolType.RBD || storagePoolVO.getPoolType() == StoragePoolType.PowerFlex) && !BackupSnapshotAfterTakingSnapshot.value()) { return DataStoreRole.Primary; } diff --git a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java index 80ca46912f24..057f672b4ea8 100644 --- a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java +++ b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.template; +import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; import java.util.LinkedList; @@ -136,12 +137,23 @@ public String getName() { * Validate on random running KVM host that URL is reachable * @param url url */ - private Long performDirectDownloadUrlValidation(final String url) { - HostVO host = resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM); + private Long performDirectDownloadUrlValidation(final String format, final String url, final List zoneIds) { + HostVO host = null; + if (zoneIds != null && !zoneIds.isEmpty()) { + for (Long zoneId : zoneIds) { + host = resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, zoneId); + if (host != null) { + break; + } + } + } else { + host = resourceManager.findOneRandomRunningHostByHypervisor(Hypervisor.HypervisorType.KVM, null); + } + if (host == null) { throw new CloudRuntimeException("Couldn't find a host to validate URL " + url); } - CheckUrlCommand cmd = new CheckUrlCommand(url); + CheckUrlCommand cmd = new CheckUrlCommand(format, url); s_logger.debug("Performing URL " + url + " validation on host " + host.getId()); Answer answer = _agentMgr.easySend(host.getId(), cmd); if (answer == null || !answer.getResult()) { @@ -158,7 +170,12 @@ public TemplateProfile prepare(RegisterIsoCmd cmd) throws ResourceAllocationExce UriUtils.validateUrl(ImageFormat.ISO.getFileExtension(), url); if (cmd.isDirectDownload()) { DigestHelper.validateChecksumString(cmd.getChecksum()); - Long templateSize = performDirectDownloadUrlValidation(url); + List zoneIds = null; + if (cmd.getZoneId() != null) { + zoneIds = new ArrayList<>(); + zoneIds.add(cmd.getZoneId()); + } + Long templateSize = performDirectDownloadUrlValidation(ImageFormat.ISO.getFileExtension(), url, zoneIds); profile.setSize(templateSize); } profile.setUrl(url); @@ -183,7 +200,7 @@ public TemplateProfile prepare(RegisterTemplateCmd cmd) throws ResourceAllocatio UriUtils.validateUrl(cmd.getFormat(), url); if (cmd.isDirectDownload()) { DigestHelper.validateChecksumString(cmd.getChecksum()); - Long templateSize = performDirectDownloadUrlValidation(url); + Long templateSize = performDirectDownloadUrlValidation(cmd.getFormat(), url, cmd.getZoneIds()); profile.setSize(templateSize); } profile.setUrl(url); @@ -575,6 +592,14 @@ public boolean delete(TemplateProfile profile) { // find all eligible image stores for this template List iStores = templateMgr.getImageStoreByTemplate(template.getId(), null); if (iStores == null || iStores.size() == 0) { + // remove any references from template_zone_ref + List templateZones = templateZoneDao.listByTemplateId(template.getId()); + if (templateZones != null) { + for (VMTemplateZoneVO templateZone : templateZones) { + templateZoneDao.remove(templateZone.getId()); + } + } + // Mark template as Inactive. template.setState(VirtualMachineTemplate.State.Inactive); _tmpltDao.update(template.getId(), template); @@ -592,7 +617,6 @@ public boolean delete(TemplateProfile profile) { } return success; - } @Override diff --git a/server/src/main/java/com/cloud/vm/UserVmManager.java b/server/src/main/java/com/cloud/vm/UserVmManager.java index 6edf7e6caec8..3b7708f884a0 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManager.java +++ b/server/src/main/java/com/cloud/vm/UserVmManager.java @@ -86,7 +86,7 @@ public interface UserVmManager extends UserVmService { HashMap> getVmDiskStatistics(long hostId, String hostName, List vmIds); - HashMap getVolumeStatistics(long clusterId, String poolUuid, StoragePoolType poolType, List volumeLocator, int timout); + HashMap getVolumeStatistics(long clusterId, String poolUuid, StoragePoolType poolType, List volumeLocator, int timeout); boolean deleteVmGroup(long groupId); diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index ef7617699ced..02389c6ca8f9 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -255,6 +255,7 @@ import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.Storage.TemplateType; +import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.StoragePoolStatus; import com.cloud.storage.TemplateOVFPropertyVO; @@ -504,6 +505,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir private ResourceTagDao resourceTagDao; @Inject private TemplateOVFPropertiesDao templateOVFPropertiesDao; + @Inject + private StorageManager _storageManager; private ScheduledExecutorService _executor = null; private ScheduledExecutorService _vmIpFetchExecutor = null; @@ -1952,10 +1955,8 @@ public HashMap getVolumeStatistics(long clusterId, Str // apply filters: // - managed storage // - local storage - if (storagePool.isManaged() || storagePool.isLocal()) { - + if ((storagePool.isManaged() && poolType != StoragePoolType.PowerFlex) || storagePool.isLocal()) { volumeLocators = getVolumesByHost(neighbor, storagePool); - } // - zone wide storage for specific hypervisortypes @@ -1965,14 +1966,21 @@ public HashMap getVolumeStatistics(long clusterId, Str } GetVolumeStatsCommand cmd = new GetVolumeStatsCommand(poolType, poolUuid, volumeLocators); + Answer answer = null; - if (timeout > 0) { - cmd.setWait(timeout/1000); - } + if (poolType == StoragePoolType.PowerFlex) { + // Get volume stats from the pool directly instead of sending cmd to host + // Added support for ScaleIO/PowerFlex pool only + answer = _storageManager.getVolumeStats(storagePool, cmd); + } else { + if (timeout > 0) { + cmd.setWait(timeout/1000); + } - Answer answer = _agentMgr.easySend(neighbor.getId(), cmd); + answer = _agentMgr.easySend(neighbor.getId(), cmd); + } - if (answer instanceof GetVolumeStatsAnswer){ + if (answer != null && answer instanceof GetVolumeStatsAnswer) { GetVolumeStatsAnswer volstats = (GetVolumeStatsAnswer)answer; return volstats.getVolumeStats(); } diff --git a/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java b/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java index a117af2bbab2..4a7840eb784f 100644 --- a/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java @@ -50,6 +50,8 @@ import org.apache.cloudstack.framework.jobs.impl.OutcomeImpl; import org.apache.cloudstack.framework.jobs.impl.VmWorkJobVO; import org.apache.cloudstack.jobs.JobInfo; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.cloudstack.utils.identity.ManagementServerNode; @@ -76,6 +78,7 @@ import com.cloud.storage.GuestOSVO; import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotVO; +import com.cloud.storage.Storage; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Volume; import com.cloud.storage.Volume.Type; @@ -109,12 +112,12 @@ import com.cloud.vm.UserVmDetailVO; import com.cloud.vm.UserVmManager; import com.cloud.vm.UserVmVO; -import com.cloud.vm.VmDetailConstants; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.VirtualMachineManager; import com.cloud.vm.VirtualMachineProfile; +import com.cloud.vm.VmDetailConstants; import com.cloud.vm.VmWork; import com.cloud.vm.VmWorkConstants; import com.cloud.vm.VmWorkJobHandler; @@ -166,6 +169,8 @@ public class VMSnapshotManagerImpl extends MutualExclusiveIdsManagerBase impleme protected UserVmDetailsDao _userVmDetailsDao; @Inject protected VMSnapshotDetailsDao _vmSnapshotDetailsDao; + @Inject + PrimaryDataStoreDao _storagePoolDao; VmWorkJobHandlerProxy _jobHandlerProxy = new VmWorkJobHandlerProxy(this); @@ -358,9 +363,33 @@ public VMSnapshot allocVMSnapshot(Long vmId, String vsDisplayName, String vsDesc throw new InvalidParameterValueException("Can not snapshot memory when VM is not in Running state"); } + List rootVolumes = _volumeDao.findReadyRootVolumesByInstance(userVmVo.getId()); + if (rootVolumes == null || rootVolumes.isEmpty()) { + throw new CloudRuntimeException("Unable to find root volume for the user vm:" + userVmVo.getUuid()); + } + + VolumeVO rootVolume = rootVolumes.get(0); + StoragePoolVO rootVolumePool = _storagePoolDao.findById(rootVolume.getPoolId()); + if (rootVolumePool == null) { + throw new CloudRuntimeException("Unable to find root volume storage pool for the user vm:" + userVmVo.getUuid()); + } + // for KVM, only allow snapshot with memory when VM is in running state - if (userVmVo.getHypervisorType() == HypervisorType.KVM && userVmVo.getState() == State.Running && !snapshotMemory) { - throw new InvalidParameterValueException("KVM VM does not allow to take a disk-only snapshot when VM is in running state"); + if (userVmVo.getHypervisorType() == HypervisorType.KVM) { + if (rootVolumePool.getPoolType() != Storage.StoragePoolType.PowerFlex) { + if (userVmVo.getState() == State.Running && !snapshotMemory) { + throw new InvalidParameterValueException("KVM VM does not allow to take a disk-only snapshot when VM is in running state"); + } + } else { + if (snapshotMemory) { + throw new InvalidParameterValueException("Can not snapshot memory for PowerFlex storage pool"); + } + + // All volumes should be on the same PowerFlex storage pool for VM Snapshot + if (!isVolumesOfUserVmOnSameStoragePool(userVmVo.getId(), rootVolumePool.getId())) { + throw new InvalidParameterValueException("All volumes of the VM: " + userVmVo.getUuid() + " should be on the same PowerFlex storage pool"); + } + } } // check access @@ -379,8 +408,14 @@ public VMSnapshot allocVMSnapshot(Long vmId, String vsDisplayName, String vsDesc if (activeSnapshots.size() > 0) { throw new CloudRuntimeException("There is other active volume snapshot tasks on the instance to which the volume is attached, please try again later."); } - if (userVmVo.getHypervisorType() == HypervisorType.KVM && volume.getFormat() != ImageFormat.QCOW2) { - throw new CloudRuntimeException("We only support create vm snapshots from vm with QCOW2 image"); + if (userVmVo.getHypervisorType() == HypervisorType.KVM) { + if (volume.getPoolType() != Storage.StoragePoolType.PowerFlex) { + if (volume.getFormat() != ImageFormat.QCOW2) { + throw new CloudRuntimeException("We only support create vm snapshots from vm with QCOW2 image"); + } + } else if (volume.getFormat() != ImageFormat.RAW) { + throw new CloudRuntimeException("Only support create vm snapshots for volumes on PowerFlex with RAW image"); + } } } @@ -393,6 +428,10 @@ public VMSnapshot allocVMSnapshot(Long vmId, String vsDisplayName, String vsDesc if (snapshotMemory && userVmVo.getState() == VirtualMachine.State.Running) vmSnapshotType = VMSnapshot.Type.DiskAndMemory; + if (rootVolumePool.getPoolType() == Storage.StoragePoolType.PowerFlex) { + vmSnapshotType = VMSnapshot.Type.Disk; + } + try { return createAndPersistVMSnapshot(userVmVo, vsDescription, vmSnapshotName, vsDisplayName, vmSnapshotType); } catch (Exception e) { @@ -402,6 +441,21 @@ public VMSnapshot allocVMSnapshot(Long vmId, String vsDisplayName, String vsDesc return null; } + private boolean isVolumesOfUserVmOnSameStoragePool(Long userVmId, Long poolId) { + List volumesOfVm = _volumeDao.findCreatedByInstance(userVmId); + if (volumesOfVm == null || volumesOfVm.isEmpty()) { + throw new CloudRuntimeException("Unable to find volumes for the user vm:" + userVmId); + } + + for (VolumeVO volume : volumesOfVm) { + if (volume == null || volume.getPoolId() != poolId) { + return false; + } + } + + return true; + } + /** * Create, persist and return vm snapshot for userVmVo with given parameters. * Persistence and support for custom service offerings are done on the same transaction diff --git a/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java b/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java index a05c4b9e4aa5..c1896b761751 100644 --- a/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/direct/download/DirectDownloadManagerImpl.java @@ -20,32 +20,6 @@ import static com.cloud.storage.Storage.ImageFormat; -import com.cloud.agent.AgentManager; -import com.cloud.agent.api.Answer; -import com.cloud.dc.DataCenterVO; -import com.cloud.dc.dao.DataCenterDao; -import com.cloud.event.ActionEventUtils; -import com.cloud.event.EventTypes; -import com.cloud.event.EventVO; -import com.cloud.exception.AgentUnavailableException; -import com.cloud.exception.OperationTimedoutException; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.host.Status; -import com.cloud.host.dao.HostDao; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.storage.DataStoreRole; -import com.cloud.storage.ScopeType; -import com.cloud.storage.Storage; -import com.cloud.storage.VMTemplateStoragePoolVO; -import com.cloud.storage.VMTemplateStorageResourceAssoc; -import com.cloud.storage.VMTemplateVO; -import com.cloud.storage.dao.VMTemplateDao; -import com.cloud.storage.dao.VMTemplatePoolDao; -import com.cloud.utils.component.ManagerBase; -import com.cloud.utils.concurrency.NamedThreadFactory; -import com.cloud.utils.exception.CloudRuntimeException; - import java.net.URI; import java.net.URISyntaxException; import java.security.cert.Certificate; @@ -79,6 +53,9 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.managed.context.ManagedContextRunnable; @@ -86,12 +63,39 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.log4j.Logger; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.event.ActionEventUtils; +import com.cloud.event.EventTypes; +import com.cloud.event.EventVO; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.ScopeType; +import com.cloud.storage.Storage; +import com.cloud.storage.StorageManager; +import com.cloud.storage.VMTemplateStoragePoolVO; +import com.cloud.storage.VMTemplateStorageResourceAssoc; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.concurrency.NamedThreadFactory; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.security.CertificateHelper; import sun.security.x509.X509CertImpl; @@ -126,6 +130,10 @@ public class DirectDownloadManagerImpl extends ManagerBase implements DirectDown private DataCenterDao dataCenterDao; @Inject private ConfigurationDao configDao; + @Inject + private TemplateDataFactory tmplFactory; + @Inject + private VolumeService volService; protected ScheduledExecutorService executorService; @@ -259,7 +267,14 @@ public void downloadTemplate(long templateId, long poolId, long hostId) { DownloadProtocol protocol = getProtocolFromUrl(url); DirectDownloadCommand cmd = getDirectDownloadCommandFromProtocol(protocol, url, templateId, to, checksum, headers); cmd.setTemplateSize(template.getSize()); - cmd.setIso(template.getFormat() == ImageFormat.ISO); + cmd.setFormat(template.getFormat()); + + if (tmplFactory.getTemplate(templateId, store) != null) { + cmd.setDestData((TemplateObjectTO) tmplFactory.getTemplate(templateId, store).getTO()); + } + + int cmdTimeOut = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); + cmd.setWait(cmdTimeOut); Answer answer = sendDirectDownloadCommand(cmd, template, poolId, host); @@ -277,6 +292,16 @@ public void downloadTemplate(long templateId, long poolId, long hostId) { sPoolRef.setLocalDownloadPath(ans.getInstallPath()); sPoolRef.setInstallPath(ans.getInstallPath()); vmTemplatePoolDao.persist(sPoolRef); + } else { + // For managed storage, update after template downloaded and copied to the disk + DirectDownloadAnswer ans = (DirectDownloadAnswer) answer; + sPoolRef.setDownloadPercent(100); + sPoolRef.setDownloadState(VMTemplateStorageResourceAssoc.Status.DOWNLOADED); + sPoolRef.setState(ObjectInDataStoreStateMachine.State.Ready); + sPoolRef.setTemplateSize(ans.getTemplateSize()); + sPoolRef.setLocalDownloadPath(ans.getInstallPath()); + sPoolRef.setInstallPath(ans.getInstallPath()); + vmTemplatePoolDao.update(sPoolRef.getId(), sPoolRef); } } @@ -294,20 +319,39 @@ private Answer sendDirectDownloadCommand(DirectDownloadCommand cmd, VMTemplateVO int retry = 3; StoragePoolVO storagePoolVO = primaryDataStoreDao.findById(poolId); + // TODO: Move the host retry attempts to upper layer Long[] hostsToRetry = getHostsToRetryOn(host, storagePoolVO); int hostIndex = 0; Answer answer = null; Long hostToSendDownloadCmd = hostsToRetry[hostIndex]; boolean continueRetrying = true; while (!downloaded && retry > 0 && continueRetrying) { - s_logger.debug("Sending Direct download command to host " + hostToSendDownloadCmd); - answer = agentManager.easySend(hostToSendDownloadCmd, cmd); - if (answer != null) { - DirectDownloadAnswer ans = (DirectDownloadAnswer)answer; - downloaded = answer.getResult(); - continueRetrying = ans.isRetryOnOtherHosts(); + PrimaryDataStore primaryDataStore = null; + TemplateInfo templateOnPrimary = null; + + try { + if (hostToSendDownloadCmd != host.getId() && storagePoolVO.isManaged()) { + primaryDataStore = (PrimaryDataStore) dataStoreManager.getPrimaryDataStore(poolId); + templateOnPrimary = primaryDataStore.getTemplate(template.getId()); + if (templateOnPrimary != null) { + volService.grantAccess(templateOnPrimary, host, primaryDataStore); + } + } + + s_logger.debug("Sending Direct download command to host " + hostToSendDownloadCmd); + answer = agentManager.easySend(hostToSendDownloadCmd, cmd); + if (answer != null) { + DirectDownloadAnswer ans = (DirectDownloadAnswer)answer; + downloaded = answer.getResult(); + continueRetrying = ans.isRetryOnOtherHosts(); + } + hostToSendDownloadCmd = hostsToRetry[(hostIndex + 1) % hostsToRetry.length]; + } finally { + if (templateOnPrimary != null) { + volService.revokeAccess(templateOnPrimary, host, primaryDataStore); + } } - hostToSendDownloadCmd = hostsToRetry[(hostIndex + 1) % hostsToRetry.length]; + retry --; } if (!downloaded) { @@ -488,6 +532,39 @@ public boolean uploadCertificate(long certificateId, long hostId) { return true; } + @Override + public boolean syncCertificatesToHost(long hostId, long zoneId) { + List zoneCertificates = directDownloadCertificateDao.listByZone(zoneId); + if (CollectionUtils.isEmpty(zoneCertificates)) { + if (s_logger.isTraceEnabled()) { + s_logger.trace("No certificates to sync on host: " + hostId); + } + return true; + } + + boolean syncCertificatesResult = true; + int certificatesSyncCount = 0; + s_logger.debug("Syncing certificates on host: " + hostId); + for (DirectDownloadCertificateVO certificateVO : zoneCertificates) { + DirectDownloadCertificateHostMapVO mapping = directDownloadCertificateHostMapDao.findByCertificateAndHost(certificateVO.getId(), hostId); + if (mapping == null) { + s_logger.debug("Syncing certificate " + certificateVO.getId() + " (" + certificateVO.getAlias() + ") on host: " + hostId + ", uploading it"); + if (!uploadCertificate(certificateVO.getId(), hostId)) { + String msg = "Could not sync certificate " + certificateVO.getId() + " (" + certificateVO.getAlias() + ") on host: " + hostId + ", upload failed"; + s_logger.error(msg); + syncCertificatesResult = false; + } else { + certificatesSyncCount++; + } + } else { + s_logger.debug("Certificate " + certificateVO.getId() + " (" + certificateVO.getAlias() + ") already synced on host: " + hostId); + } + } + + s_logger.debug("Synced " + certificatesSyncCount + " out of " + zoneCertificates.size() + " certificates on host: " + hostId); + return syncCertificatesResult; + } + @Override public boolean revokeCertificateAlias(String certificateAlias, String hypervisor, Long zoneId, Long hostId) { HypervisorType hypervisorType = HypervisorType.getType(hypervisor); diff --git a/server/src/test/java/com/cloud/network/element/ConfigDriveNetworkElementTest.java b/server/src/test/java/com/cloud/network/element/ConfigDriveNetworkElementTest.java index 5d206f4c16d6..ab3489f36b83 100644 --- a/server/src/test/java/com/cloud/network/element/ConfigDriveNetworkElementTest.java +++ b/server/src/test/java/com/cloud/network/element/ConfigDriveNetworkElementTest.java @@ -60,6 +60,7 @@ import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; +import com.cloud.agent.api.HandleConfigDriveIsoAnswer; import com.cloud.agent.api.HandleConfigDriveIsoCommand; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterVO; @@ -228,7 +229,7 @@ public void testExpunge() throws NoTransitionException, NoSuchFieldException, Il when(virtualMachine.getState()).thenReturn(VirtualMachine.State.Stopped); when(_vmInstanceDao.updateState(VirtualMachine.State.Stopped, VirtualMachine.Event.ExpungeOperation, VirtualMachine.State.Expunging, virtualMachine, null)).thenReturn(true); - final Answer answer = mock(Answer.class); + final HandleConfigDriveIsoAnswer answer = mock(HandleConfigDriveIsoAnswer.class); when(agentManager.easySend(anyLong(), any(HandleConfigDriveIsoCommand.class))).thenReturn(answer); when(answer.getResult()).thenReturn(true); @@ -267,10 +268,11 @@ public void testAddPasswordAndUserData() throws Exception { Method method = ReflectionUtils.getMethods(ConfigDriveBuilder.class, ReflectionUtils.withName("buildConfigDrive")).iterator().next(); PowerMockito.when(ConfigDriveBuilder.class, method).withArguments(Mockito.anyListOf(String[].class), Mockito.anyString(), Mockito.anyString()).thenReturn("content"); - final Answer answer = mock(Answer.class); + final HandleConfigDriveIsoAnswer answer = mock(HandleConfigDriveIsoAnswer.class); final UserVmDetailVO userVmDetailVO = mock(UserVmDetailVO.class); when(agentManager.easySend(anyLong(), any(HandleConfigDriveIsoCommand.class))).thenReturn(answer); when(answer.getResult()).thenReturn(true); + when(answer.getConfigDriveLocation()).thenReturn(NetworkElement.Location.PRIMARY); when(network.getTrafficType()).thenReturn(Networks.TrafficType.Guest); when(virtualMachine.getState()).thenReturn(VirtualMachine.State.Stopped); when(virtualMachine.getUuid()).thenReturn("vm-uuid"); diff --git a/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java b/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java index 5fc9a4dcfdb8..4e1daa87c346 100755 --- a/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java +++ b/server/src/test/java/com/cloud/resource/MockResourceManagerImpl.java @@ -617,7 +617,7 @@ public HashMap> getGPUStatistics(final Ho } @Override - public HostVO findOneRandomRunningHostByHypervisor(HypervisorType type) { + public HostVO findOneRandomRunningHostByHypervisor(HypervisorType type, Long dcId) { // TODO Auto-generated method stub return null; } diff --git a/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java b/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java index 2cf763fe93c8..91cdbb5863ca 100644 --- a/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java +++ b/server/src/test/java/com/cloud/vm/snapshot/VMSnapshotManagerTest.java @@ -23,9 +23,9 @@ import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import java.util.ArrayList; import java.util.Arrays; @@ -33,6 +33,12 @@ import java.util.List; import java.util.Map; +import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.acl.SecurityChecker.AccessType; +import org.apache.cloudstack.api.ResourceDetail; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.junit.Before; import org.junit.Test; import org.mockito.ArgumentCaptor; @@ -42,12 +48,6 @@ import org.mockito.MockitoAnnotations; import org.mockito.Spy; -import org.apache.cloudstack.acl.ControlledEntity; -import org.apache.cloudstack.acl.SecurityChecker.AccessType; -import org.apache.cloudstack.api.ResourceDetail; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; - import com.cloud.agent.AgentManager; import com.cloud.exception.AgentUnavailableException; import com.cloud.exception.ConcurrentOperationException; @@ -183,6 +183,7 @@ public void setup() { _vmSnapshotMgr._userVMDao = _userVMDao; _vmSnapshotMgr._vmSnapshotDao = _vmSnapshotDao; _vmSnapshotMgr._volumeDao = _volumeDao; + _vmSnapshotMgr._storagePoolDao = _storagePoolDao; _vmSnapshotMgr._accountMgr = _accountMgr; _vmSnapshotMgr._snapshotDao = _snapshotDao; _vmSnapshotMgr._guestOSDao = _guestOSDao; @@ -208,6 +209,8 @@ public void setup() { mockVolumeList.add(volumeMock); when(volumeMock.getInstanceId()).thenReturn(TEST_VM_ID); when(_volumeDao.findByInstance(anyLong())).thenReturn(mockVolumeList); + when(_volumeDao.findReadyRootVolumesByInstance(anyLong())).thenReturn(mockVolumeList); + when(_storagePoolDao.findById(anyLong())).thenReturn(mock(StoragePoolVO.class)); when(vmMock.getId()).thenReturn(TEST_VM_ID); when(vmMock.getServiceOfferingId()).thenReturn(SERVICE_OFFERING_ID); @@ -299,7 +302,6 @@ public void testAllocVMSnapshotF5() throws ResourceAllocationException { public void testCreateVMSnapshot() throws AgentUnavailableException, OperationTimedoutException, ResourceAllocationException, NoTransitionException { when(vmMock.getState()).thenReturn(State.Running); _vmSnapshotMgr.allocVMSnapshot(TEST_VM_ID, "", "", true); - } @Test diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index dd002a9a2367..abc932319028 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -100,6 +100,7 @@ import com.cloud.agent.api.DeleteSnapshotsDirCommand; import com.cloud.agent.api.GetStorageStatsAnswer; import com.cloud.agent.api.GetStorageStatsCommand; +import com.cloud.agent.api.HandleConfigDriveIsoAnswer; import com.cloud.agent.api.HandleConfigDriveIsoCommand; import com.cloud.agent.api.PingCommand; import com.cloud.agent.api.PingStorageCommand; @@ -138,6 +139,7 @@ import com.cloud.host.Host; import com.cloud.host.Host.Type; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.network.element.NetworkElement; import com.cloud.resource.ServerResourceBase; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Storage; @@ -318,7 +320,7 @@ public Answer executeRequest(Command cmd) { private Answer execute(HandleConfigDriveIsoCommand cmd) { if (cmd.isCreate()) { if (cmd.getIsoData() == null) { - return new Answer(cmd, false, "Invalid config drive ISO data"); + return new HandleConfigDriveIsoAnswer(cmd, "Invalid config drive ISO data"); } String nfsMountPoint = getRootDir(cmd.getDestStore().getUrl(), _nfsVersion); File isoFile = new File(nfsMountPoint, cmd.getIsoFile()); @@ -331,7 +333,7 @@ private Answer execute(HandleConfigDriveIsoCommand cmd) { File tmpIsoFile = ConfigDriveBuilder.base64StringToFile(cmd.getIsoData(), tempDir.toAbsolutePath().toString(), cmd.getIsoFile()); copyLocalToNfs(tmpIsoFile, new File(cmd.getIsoFile()), cmd.getDestStore()); } catch (IOException | ConfigurationException e) { - return new Answer(cmd, false, "Failed due to exception: " + e.getMessage()); + return new HandleConfigDriveIsoAnswer(cmd, "Failed due to exception: " + e.getMessage()); } finally { try { if (tempDir != null) { @@ -341,7 +343,7 @@ private Answer execute(HandleConfigDriveIsoCommand cmd) { s_logger.warn("Failed to delete ConfigDrive temporary directory: " + tempDir.toString(), ioe); } } - return new Answer(cmd, true, "Successfully saved config drive at secondary storage"); + return new HandleConfigDriveIsoAnswer(cmd, NetworkElement.Location.SECONDARY, "Successfully saved config drive at secondary storage"); } else { DataStoreTO dstore = cmd.getDestStore(); if (dstore instanceof NfsTO) { @@ -352,11 +354,11 @@ private Answer execute(HandleConfigDriveIsoCommand cmd) { try { Files.deleteIfExists(tmpltPath.toPath()); } catch (IOException e) { - return new Answer(cmd, e); + return new HandleConfigDriveIsoAnswer(cmd, e); } - return new Answer(cmd); + return new HandleConfigDriveIsoAnswer(cmd); } else { - return new Answer(cmd, false, "Not implemented yet"); + return new HandleConfigDriveIsoAnswer(cmd, "Not implemented yet"); } } } diff --git a/systemvm/debian/opt/cloud/bin/filesystem_writable_check.py b/systemvm/debian/opt/cloud/bin/filesystem_writable_check.py new file mode 100644 index 000000000000..07e50d5f5e05 --- /dev/null +++ b/systemvm/debian/opt/cloud/bin/filesystem_writable_check.py @@ -0,0 +1,45 @@ +#!/usr/bin/python +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import sys, os + +def check_filesystem(): + ST_RDONLY = 1 + if os.ST_RDONLY is not None: + ST_RDONLY = os.ST_RDONLY + + stat1 = os.statvfs('/root') + readOnly1 = bool(stat1.f_flag & ST_RDONLY) + + if (readOnly1): + print "Read-only file system : monitor results (/root) file system is mounted as read-only" + exit(1) + + stat2 = os.statvfs('/var/cache/cloud') + readOnly2 = bool(stat2.f_flag & ST_RDONLY) + + if (readOnly2): + print "Read-only file system : config info (/var/cache/cloud) file system is mounted as read-only" + exit(1) + + print "file system is writable" + exit(0) + + +if __name__ == "__main__": + check_filesystem() \ No newline at end of file diff --git a/ui/scripts/instances.js b/ui/scripts/instances.js index 3a272d9cd243..91d8e6207322 100644 --- a/ui/scripts/instances.js +++ b/ui/scripts/instances.js @@ -4189,6 +4189,10 @@ allowedActions.push("storageSnapshot"); } + if (jsonObj.hypervisor == 'KVM' && jsonObj.pooltype == 'PowerFlex') { + allowedActions.push("snapshot"); + } + allowedActions.push("scaleUp"); //when vm is stopped, scaleUp is supported for all hypervisors allowedActions.push("changeAffinity"); diff --git a/utils/pom.xml b/utils/pom.xml index 8a745aa21a71..d2922ce6c37a 100755 --- a/utils/pom.xml +++ b/utils/pom.xml @@ -178,6 +178,11 @@ jackson-databind ${cs.jackson.version} + + org.apache.commons + commons-compress + ${cs.commons-compress.version} + diff --git a/utils/src/main/java/com/cloud/utils/SerialVersionUID.java b/utils/src/main/java/com/cloud/utils/SerialVersionUID.java index 21fdbb4cf111..363248c99a96 100644 --- a/utils/src/main/java/com/cloud/utils/SerialVersionUID.java +++ b/utils/src/main/java/com/cloud/utils/SerialVersionUID.java @@ -70,4 +70,5 @@ public interface SerialVersionUID { public static final long SnapshotBackupException = Base | 0x2e; public static final long UnavailableCommandException = Base | 0x2f; public static final long OriginDeniedException = Base | 0x30; + public static final long StorageAccessException = Base | 0x31; } diff --git a/utils/src/main/java/com/cloud/utils/storage/QCOW2Utils.java b/utils/src/main/java/com/cloud/utils/storage/QCOW2Utils.java index 3e08bd6634ed..b45d5b4e3835 100644 --- a/utils/src/main/java/com/cloud/utils/storage/QCOW2Utils.java +++ b/utils/src/main/java/com/cloud/utils/storage/QCOW2Utils.java @@ -19,14 +19,27 @@ package com.cloud.utils.storage; +import java.io.BufferedInputStream; import java.io.IOException; import java.io.InputStream; +import java.net.MalformedURLException; +import java.net.URL; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; + +import org.apache.commons.compress.compressors.CompressorException; +import org.apache.commons.compress.compressors.CompressorInputStream; +import org.apache.commons.compress.compressors.CompressorStreamFactory; +import org.apache.log4j.Logger; import com.cloud.utils.NumbersUtil; public final class QCOW2Utils { + public static final Logger LOGGER = Logger.getLogger(QCOW2Utils.class.getName()); + private static final int VIRTUALSIZE_HEADER_LOCATION = 24; private static final int VIRTUALSIZE_HEADER_LENGTH = 8; + private static final int MAGIC_HEADER_LENGTH = 4; /** * Private constructor -> This utility class cannot be instantiated. @@ -57,4 +70,55 @@ public static long getVirtualSize(InputStream inputStream) throws IOException { return NumbersUtil.bytesToLong(bytes); } + + public static long getVirtualSize(String urlStr) { + InputStream inputStream = null; + + try { + URL url = new URL(urlStr); + BufferedInputStream bufferedInputStream = new BufferedInputStream(url.openStream()); + inputStream = bufferedInputStream; + + try { + CompressorInputStream compressorInputStream = new CompressorStreamFactory().createCompressorInputStream(bufferedInputStream); + inputStream = compressorInputStream; + } catch (CompressorException e) { + LOGGER.warn(e.getMessage()); + inputStream = bufferedInputStream; + } + + byte[] inputBytes = inputStream.readNBytes(VIRTUALSIZE_HEADER_LOCATION + VIRTUALSIZE_HEADER_LENGTH); + + ByteBuffer inputMagicBytes = ByteBuffer.allocate(MAGIC_HEADER_LENGTH); + inputMagicBytes.put(inputBytes, 0, MAGIC_HEADER_LENGTH); + + ByteBuffer qcow2MagicBytes = ByteBuffer.allocate(MAGIC_HEADER_LENGTH); + qcow2MagicBytes.put("QFI".getBytes(Charset.forName("UTF-8"))); + qcow2MagicBytes.put((byte)0xfb); + + long virtualSize = 0L; + // Validate the header magic bytes + if (qcow2MagicBytes.compareTo(inputMagicBytes) == 0) { + ByteBuffer virtualSizeBytes = ByteBuffer.allocate(VIRTUALSIZE_HEADER_LENGTH); + virtualSizeBytes.put(inputBytes, VIRTUALSIZE_HEADER_LOCATION, VIRTUALSIZE_HEADER_LENGTH); + virtualSize = virtualSizeBytes.getLong(0); + } + + return virtualSize; + } catch (MalformedURLException e) { + LOGGER.warn("Failed to validate for qcow2, malformed URL: " + urlStr + ", error: " + e.getMessage()); + throw new IllegalArgumentException("Invalid URL: " + urlStr); + } catch (IOException e) { + LOGGER.warn("Failed to validate for qcow2, error: " + e.getMessage()); + throw new IllegalArgumentException("Failed to connect URL: " + urlStr); + } finally { + if (inputStream != null) { + try { + inputStream.close(); + } catch (final IOException e) { + LOGGER.warn("Failed to close input stream due to: " + e.getMessage()); + } + } + } + } } \ No newline at end of file