Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ceph-iscsi: add erasure pool support #237

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 39 additions & 12 deletions README
Original file line number Diff line number Diff line change
Expand Up @@ -17,38 +17,48 @@ o- / ...........................................................................
o- cluster .................................................................. [Clusters: 1]
| o- ceph ..................................................................... [HEALTH_OK]
| o- pools ................................................................... [Pools: 3]
| | o- ec ........................................ [(2+1), Commit: 0b/40G (0%), Used: 0b]
| | o- ec ....................................... [(2+2), Commit: 0b/40G (0%), Used: 24K]
| | o- iscsi ..................................... [(x3), Commit: 0b/20G (0%), Used: 18b]
| | o- rbd ....................................... [(x3), Commit: 8G/20G (40%), Used: 5K]
| | o- rep ....................................... [(x3), Commit: 8G/20G (40%), Used: 5K]
| o- topology ......................................................... [OSDs: 3,MONs: 3]
o- disks ................................................................... [8G, Disks: 5]
| o- rbd ....................................................................... [rbd (8G)]
| o- disk_1 ............................................................... [disk_1 (1G)]
| o- disk_2 ............................................................... [disk_2 (2G)]
| o- disk_3 ............................................................... [disk_3 (2G)]
| o- disk_4 ............................................................... [disk_4 (1G)]
| o- disk_5 ............................................................... [disk_5 (2G)]
| o- rep ....................................................................... [rep (8G)]
| o- disk_1 ........................................................... [rep/disk_1 (1G)]
| o- disk_2 ........................................................... [rep/disk_2 (2G)]
| o- disk_3 ........................................................... [rep/disk_3 (2G)]
| o- disk_4 ........................................................ [rep+ec/disk_4 (1G)]
| o- disk_5 ........................................................ [rep+ec/disk_5 (2G)]
o- iscsi-targets ............................................................. [Targets: 1]
o- iqn.2003-01.com.redhat.iscsi-gw:ceph-gw1 ................... [Auth: CHAP, Gateways: 2]
| o- disks ................................................................... [Disks: 1]
| | o- rbd/disk_1 .............................................. [Owner: rh7-gw2, Lun: 0]
| | o- rep/disk_1 .............................................. [Owner: rh7-gw2, Lun: 0]
| o- gateways ..................................................... [Up: 2/2, Portals: 2]
| | o- rh7-gw1 .................................................... [192.168.122.69 (UP)]
| | o- rh7-gw2 .................................................... [192.168.122.14 (UP)]
o- host-groups ........................................................... [Groups : 0]
o- hosts ................................................ [Auth: ACL_ENABLED, Hosts: 1]
| o- iqn.1994-05.com.redhat:rh7-client .......... [LOGGED-IN, Auth: CHAP, Disks: 1(2G)]
| o- lun 0 ......................................... [rbd.disk_1(2G), Owner: rh7-gw2]
| o- lun 0 ......................................... [rep/disk_1(2G), Owner: rh7-gw2]
o- iqn.2003-01.com.redhat.iscsi-gw:ceph-gw2 ................... [Auth: None, Gateways: 2]
o- disks ................................................................... [Disks: 1]
| o- rbd/disk_2 .............................................. [Owner: rh7-gw1, Lun: 0]
| o- rep/disk_2 .............................................. [Owner: rh7-gw1, Lun: 0]
o- gateways ..................................................... [Up: 2/2, Portals: 2]
| o- rh7-gw1 ................................................... [2006:ac81::1103 (UP)]
| o- rh7-gw2 ................................................... [2006:ac81::1104 (UP)]
o- host-groups ........................................................... [Groups : 0]
o- hosts ................................................ [Auth: ACL_ENABLED, Hosts: 1]
o- iqn.1994-05.com.redhat:rh7-client .......... [LOGGED-IN, Auth: None, Disks: 1(2G)]
o- lun 0 ......................................... [rbd.disk_2(2G), Owner: rh7-gw1]
o- lun 0 ......................................... [rep/disk_2(2G), Owner: rh7-gw1]
o- iqn.2003-01.com.redhat.iscsi-gw:ceph-gw3 ................... [Auth: None, Gateways: 2]
o- disks ................................................................... [Disks: 1]
| o- rep/disk_4 .............................................. [Owner: rh7-gw2, Lun: 0]
o- gateways ..................................................... [Up: 2/2, Portals: 2]
| o- rh7-gw1 ................................................... [2006:ac81::1103 (UP)]
| o- rh7-gw2 ................................................... [2006:ac81::1104 (UP)]
o- host-groups ........................................................... [Groups : 0]
o- hosts ................................................ [Auth: ACL_ENABLED, Hosts: 1]
o- iqn.1994-05.com.redhat:rh7-client .......... [LOGGED-IN, Auth: None, Disks: 1(1G)]
o- lun 0 ......................................... [rep/disk_4(1G), Owner: rh7-gw1]



Expand Down Expand Up @@ -95,6 +105,23 @@ curl --user admin:admin -d ip_address=2006:ac81::1104 \
NOTE: please make sure both the IPv4 and IPv6 addresses are in the trusted
ip list in iscsi-gateway.cfg.

Erasure Pool Support:
For the erasure pool, you need to specify the "datapool=<erasure pool name>" parameter to store the
data when creating a disk, and the "pool=<name>" will contiue to be a replicated pool, which will
store the metadata only.

curl --user admin:admin -d mode=create -d size=1g -d pool=rbd -d datapool=ec -d count=5
-X PUT http://192.168.122.69:5000/api/disk/rbd/new0_
curl --user admin:admin -d mode=create -d size=1g -d pool=rbd -d datapool=ec -d create_image=false
-X PUT http://192.168.122.69:5000/api/disk/rbd/new1
curl --user admin:admin -X GET http://192.168.122.69:5000/api/disk/rbd/new2
curl --user admin:admin -X DELETE http://192.168.122.69:5000/api/disk/rbd/new3

curl --user admin:admin -d mode=create
-X PUT http://192.168.122.69:5000/api/disksnap/rbd/image/new1
curl --user admin:admin
-X DELETE http://192.168.122.69:5000/api/disksnap/rbd/image/new1


## Installation
### Via RPM
Expand Down
20 changes: 5 additions & 15 deletions ceph_iscsi_config/gateway.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import subprocess
import netifaces

from rtslib_fb.utils import RTSLibError
Expand All @@ -11,7 +10,7 @@
from ceph_iscsi_config.lun import LUN
from ceph_iscsi_config.client import GWClient
from ceph_iscsi_config.lio import LIO
from ceph_iscsi_config.utils import this_host, CephiSCSIError
from ceph_iscsi_config.utils import this_host, CephiSCSIError, run_shell_cmd

__author__ = '[email protected]'

Expand All @@ -26,15 +25,6 @@ def __init__(self, logger, config, name=None):
else:
self.hostname = this_host()

def _run_ceph_cmd(self, cmd, stderr=None, shell=True):
if not stderr:
stderr = subprocess.STDOUT
try:
result = subprocess.check_output(cmd, stderr=stderr, shell=shell)
except subprocess.CalledProcessError as err:
return None, err
return result, None

def ceph_rm_blocklist(self, blocklisted_ip):
"""
Issue a ceph osd blocklist rm command for a given IP on this host
Expand All @@ -46,13 +36,13 @@ def ceph_rm_blocklist(self, blocklisted_ip):
"{}".format(blocklisted_ip))

conf = settings.config
result, err = self._run_ceph_cmd(
result, err = run_shell_cmd(
"ceph -n {client_name} --conf {cephconf} osd blocklist rm "
"{blocklisted_ip}".format(blocklisted_ip=blocklisted_ip,
client_name=conf.cluster_client_name,
cephconf=conf.cephconf))
if err:
result, err = self._run_ceph_cmd(
result, err = run_shell_cmd(
"ceph -n {client_name} --conf {cephconf} osd blacklist rm "
"{blocklisted_ip}".format(blocklisted_ip=blocklisted_ip,
client_name=conf.cluster_client_name,
Expand Down Expand Up @@ -86,13 +76,13 @@ def osd_blocklist_cleanup(self):

# NB. Need to use the stderr override to catch the output from
# the command
blocklist, err = self._run_ceph_cmd(
blocklist, err = run_shell_cmd(
"ceph -n {client_name} --conf {cephconf} osd blocklist ls".
format(client_name=conf.cluster_client_name,
cephconf=conf.cephconf))

if err:
blocklist, err = self._run_ceph_cmd(
blocklist, err = run_shell_cmd(
"ceph -n {client_name} --conf {cephconf} osd blacklist ls".
format(client_name=conf.cluster_client_name,
cephconf=conf.cephconf))
Expand Down
89 changes: 71 additions & 18 deletions ceph_iscsi_config/lun.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import json
import rados
import rbd
import re
Expand All @@ -13,8 +14,8 @@
from ceph_iscsi_config.backstore import USER_RBD
from ceph_iscsi_config.utils import (convert_2_bytes, gen_control_string,
valid_size, get_pool_id, ip_addresses,
get_pools, get_rbd_size, this_host,
human_size, CephiSCSIError)
get_pools, get_rbd_size, run_shell_cmd,
human_size, CephiSCSIError, this_host)
from ceph_iscsi_config.gateway_object import GWObject
from ceph_iscsi_config.target import GWTarget
from ceph_iscsi_config.client import GWClient, CHAP
Expand Down Expand Up @@ -46,13 +47,14 @@ class RBDDev(object):
]
}

def __init__(self, image, size, backstore, pool=None):
def __init__(self, image, size, backstore, pool=None, datapool=None):
self.image = image
self.size_bytes = convert_2_bytes(size)
self.backstore = backstore
if pool is None:
pool = settings.config.pool
self.pool = pool
self.datapool = datapool
self.pool_id = get_pool_id(pool_name=self.pool)
self.error = False
self.error_msg = ''
Expand All @@ -74,14 +76,14 @@ def create(self):
self.image,
self.size_bytes,
features=RBDDev.default_features(self.backstore),
old_format=False)
old_format=False,
data_pool=self.datapool)

except (rbd.ImageExists, rbd.InvalidArgument) as err:
self.error = True
self.error_msg = ("Failed to create rbd image {} in "
"pool {} : {}".format(self.image,
self.pool,
err))
self.error_msg = ("Failed to create rbd image {} in pool {}, "
"datapool {} : {}".format(self.image, self.pool,
self.datapool, err))

def delete(self):
"""
Expand Down Expand Up @@ -289,11 +291,12 @@ class LUN(GWObject):
USER_RBD: TCMU_SETTINGS
}

def __init__(self, logger, pool, image, size, allocating_host,
def __init__(self, logger, pool, datapool, image, size, allocating_host,
backstore, backstore_object_name):
self.logger = logger
self.image = image
self.pool = pool
self.datapool = datapool
self.pool_id = 0
self.size_bytes = convert_2_bytes(size)
self.config_key = '{}/{}'.format(self.pool, self.image)
Expand Down Expand Up @@ -351,7 +354,7 @@ def remove_lun(self, preserve_image):
if self.error:
return

rbd_image = RBDDev(self.image, '0G', self.backstore, self.pool)
rbd_image = RBDDev(self.image, '0G', self.backstore, self.pool, self.datapool)

if local_gw == self.allocating_host:
# by using the allocating host we ensure the delete is not
Expand Down Expand Up @@ -574,6 +577,42 @@ def activate(self):
if client_err:
raise CephiSCSIError(client_err)

def _erasure_pool_check(self):
# skip it and if no ecpool specified return True
if not self.datapool:
return True
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A data pool doesn't have to be an EC pool. A user may want to create an image with a separate replicated pool. So two things:

  • _erasure_pool_check() should check the pool type and, only if the pool is EC, check whether allow_ec_overwrites is enabled
  • The objectstore check should be removed. As I already noted in ceph-iscsi: add erasure pool support #237 (comment), enabling allow_ec_overwrites on any object store other than bluestore isn't possible in normal configurations so allow_ec_overwrites check would be sufficient.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure, will fix it.


# if the datapool is not erasure code pool return True
data, err = run_shell_cmd(
"ceph -n {name} --conf {conf} osd dump --format=json".
format(name=settings.config.cluster_client_name,
conf=settings.config.cephconf))
if err:
self.logger.error("Cannot the pool type type for datapool")
return False
for _pool in json.loads(data)['pools']:
if _pool['pool_name'] == self.datapool:
if _pool['type'] == 3:
break
else:
self.logger.debug(f"datapool {self.datapool} is not erasure pool")
return True

# check whether allow_ec_overwrites is enabled for erasure code pool
data, err = run_shell_cmd(
"ceph -n {name} --conf {conf} osd pool get {pool} allow_ec_overwrites -f json".
format(name=settings.config.cluster_client_name,
conf=settings.config.cephconf, pool=self.datapool))
if err:
self.logger.error(f"Cannot get allow_ec_overwrites from pool ({self.pool})")
return False
result = json.loads(data)
if result['allow_ec_overwrites']:
self.logger.debug(f"erasure pool ({self.pool}) allow_ec_overwrites is enabled")
return True
self.logger.debug(f"erasure pool ({self.pool}) allow_ec_overwrites is disabled")
return False

def allocate(self, keep_dev_in_lio=True, in_wwn=None):
"""
Create image and add to LIO and config.
Expand All @@ -583,6 +622,9 @@ def allocate(self, keep_dev_in_lio=True, in_wwn=None):
:return: LIO storage object if successful and keep_dev_in_lio=True
else None.
"""
if not self._erasure_pool_check():
return None

self.logger.debug("LUN.allocate starting, listing rbd devices")
disk_list = RBDDev.rbd_list(pool=self.pool)
self.logger.debug("rados pool '{}' contains the following - "
Expand All @@ -593,7 +635,8 @@ def allocate(self, keep_dev_in_lio=True, in_wwn=None):
"allocations is {}".format(local_gw,
self.allocating_host))

rbd_image = RBDDev(self.image, self.size_bytes, self.backstore, self.pool)
rbd_image = RBDDev(self.image, self.size_bytes, self.backstore, self.pool,
self.datapool)
self.pool_id = rbd_image.pool_id

# if the image required isn't defined, create it!
Expand Down Expand Up @@ -703,6 +746,7 @@ def allocate(self, keep_dev_in_lio=True, in_wwn=None):
disk_attr = {"wwn": wwn,
"image": self.image,
"pool": self.pool,
"datapool": self.datapool,
"allocating_host": self.allocating_host,
"pool_id": rbd_image.pool_id,
"controls": self.controls,
Expand Down Expand Up @@ -963,7 +1007,10 @@ def valid_disk(ceph_iscsi_config, logger, **kwargs):

:param ceph_iscsi_config: Config object
:param logger: logger object
:param image_id: (str) <pool>.<image> format
:param pool: (str) pool name
:param datapool: (str) datapool name
:param image: (str) image name
:param size: (str) size
:return: (str) either 'ok' or an error description
"""

Expand Down Expand Up @@ -993,12 +1040,16 @@ def valid_disk(ceph_iscsi_config, logger, **kwargs):

config = ceph_iscsi_config.config

datapool = kwargs.get('datapool', None)
disk_key = "{}/{}".format(kwargs['pool'], kwargs['image'])

if mode in ['create', 'resize']:

if kwargs['pool'] not in get_pools():
return "pool name is invalid"
_pools = get_pools()
if kwargs['pool'] not in _pools:
return "pool '{}' doesn't exist".format(kwargs['pool'])
if datapool and datapool not in _pools:
return "datapool '{}' deosn't exist".format(datapool)

if mode == 'create':
if kwargs['size'] and not valid_size(kwargs['size']):
Expand All @@ -1010,6 +1061,8 @@ def valid_disk(ceph_iscsi_config, logger, **kwargs):
disk_regex = re.compile(r"^[a-zA-Z0-9\-_\.]+$")
if not disk_regex.search(kwargs['pool']):
return "Invalid pool name (use alphanumeric, '_', '.', or '-' characters)"
if datapool and not disk_regex.search(datapool):
return "Invalid datapool name (use alphanumeric, '_', '.', or '-' characters)"
if not disk_regex.search(kwargs['image']):
return "Invalid image name (use alphanumeric, '_', '.', or '-' characters)"

Expand Down Expand Up @@ -1040,9 +1093,7 @@ def valid_disk(ceph_iscsi_config, logger, **kwargs):
if mode in ["resize", "delete", "reconfigure"]:
# disk must exist in the config
if disk_key not in config['disks']:
return ("rbd {}/{} is not defined to the "
"configuration".format(kwargs['pool'],
kwargs['image']))
return ("rbd {} is not defined to the configuration".format(disk_key))

if mode == 'resize':

Expand Down Expand Up @@ -1231,13 +1282,15 @@ def define_luns(logger, config, target):
for disk_key in pool_disks:

pool, image_name = disk_key.split('/')

with rbd.Image(ioctx, image_name) as rbd_image:

disk_config = config.config['disks'][disk_key]
datapool = disk_config.get('datapool', None)
backstore = disk_config['backstore']
backstore_object_name = disk_config['backstore_object_name']

lun = LUN(logger, pool, image_name,
lun = LUN(logger, pool, datapool, image_name,
rbd_image.size(), local_gw, backstore,
backstore_object_name)

Expand Down
14 changes: 7 additions & 7 deletions ceph_iscsi_config/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,14 @@ class CephiSCSIInval(CephiSCSIError):
pass


def shellcommand(command_string):

def run_shell_cmd(cmd, stderr=None, shell=True):
if not stderr:
stderr = subprocess.STDOUT
try:
response = subprocess.check_output(command_string, shell=True)
except subprocess.CalledProcessError:
return None
else:
return response
result = subprocess.check_output(cmd, stderr=stderr, shell=shell)
except subprocess.CalledProcessError as err:
return None, err
return result, None


def normalize_ip_address(ip_address):
Expand Down
Loading