Skip to content

Commit

Permalink
feat(storage/linstor): Add provisioning type support and multi-disk e…
Browse files Browse the repository at this point in the history
…nhancements for Linstor SR tests

- Introduced `--provisioning` pytest parameter to enable specifying provisioning type (`thin` or `thick`) for Linstor SR.
- Enhanced `sr_disks_for_all_hosts` to support multiple disks, with validations for "auto" and specific disk configurations.
- Modified `lvm_disk` to handle multiple devices, ensuring `vgcreate` and `pvcreate` commands are executed for all disks collectively.
- Added `provisioning_type` and `storage_pool_name` pytest fixtures for dynamic SR configuration.
- Updated Linstor SR test cases to use `provisioning_type` and `storage_pool_name` fixtures.
- Improved disk validation and logging for multi-disk configurations.

Signed-off-by: Rushikesh Jadhav <[email protected]>
  • Loading branch information
rushikeshjadhav committed Feb 5, 2025
1 parent b015d4a commit e0d1dfe
Show file tree
Hide file tree
Showing 3 changed files with 89 additions and 29 deletions.
42 changes: 42 additions & 0 deletions conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,13 @@ def pytest_addoption(parser):
"4KiB blocksize to be formatted and used in storage tests. "
"Set it to 'auto' to let the fixtures auto-detect available disks."
)
parser.addoption(
"--provisioning",
action="store",
default="thin",
choices=["thin", "thick"],
help="Set the provisioning type for Linstor SR (thin or thick). Default is thin.",
)

def pytest_configure(config):
global_config.ignore_ssh_banner = config.getoption('--ignore-ssh-banner')
Expand Down Expand Up @@ -329,6 +336,41 @@ def sr_disk_for_all_hosts(pytestconfig, request, host):
logging.info(f">> Disk or block device {disk} is present and free on all pool members")
yield candidates[0]

@pytest.fixture(scope='session')
def sr_disks_for_all_hosts(pytestconfig, request, host):
disks = pytestconfig.getoption("sr_disk")
assert len(disks) > 0, "This test requires at least one --sr-disk parameter"
# Fetch available disks on the master host
master_disks = host.available_disks()
assert len(master_disks) > 0, "a free disk device is required on the master host"

if "auto" not in disks:
# Validate that all specified disks exist on the master host
for disk in disks:
assert disk in master_disks, \
f"Disk or block device {disk} is either not present or already used on the master host"
master_disks = [disk for disk in disks if disk in master_disks]

candidates = list(master_disks)

# Check if all disks are available on all hosts in the pool
for h in host.pool.hosts[1:]:
other_disks = h.available_disks()
candidates = [d for d in candidates if d in other_disks]

if "auto" in disks:
# Automatically select disks if "auto" is passed
assert len(candidates) > 0, \
f"Free disk devices are required on all pool members. Pool master has: {' '.join(master_disks)}."
logging.info(f">> Found free disk device(s) on all pool hosts: {' '.join(candidates)}. "
f"Using: {', '.join(candidates)}")
else:
# Ensure specified disks are free on all hosts
assert len(candidates) == len(disks), \
f"Some specified disks ({', '.join(disks)}) are not free or available on all hosts."
logging.info(f">> Disk(s) {', '.join(candidates)} are present and free on all pool members")
yield candidates

@pytest.fixture(scope='module')
def vm_ref(request):
ref = request.param
Expand Down
50 changes: 31 additions & 19 deletions tests/storage/linstor/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,31 +12,43 @@
LINSTOR_PACKAGE = 'xcp-ng-linstor'

@pytest.fixture(scope='package')
def lvm_disk(host, sr_disk_for_all_hosts):
device = '/dev/' + sr_disk_for_all_hosts
def lvm_disk(host, sr_disks_for_all_hosts, provisioning_type):
devices = [f"/dev/{disk}" for disk in sr_disks_for_all_hosts]
hosts = host.pool.hosts

for host in hosts:
try:
host.ssh(['pvcreate', '-ff', '-y', device])
except commands.SSHCommandFailed as e:
if e.stdout.endswith('Mounted filesystem?'):
host.ssh(['vgremove', '-f', GROUP_NAME, '-y'])
for device in devices:
try:
host.ssh(['pvcreate', '-ff', '-y', device])
elif e.stdout.endswith('excluded by a filter.'):
host.ssh(['wipefs', '-a', device])
host.ssh(['pvcreate', '-ff', '-y', device])
else:
raise e
except commands.SSHCommandFailed as e:
if e.stdout.endswith('Mounted filesystem?'):
host.ssh(['vgremove', '-f', GROUP_NAME, '-y'])
host.ssh(['pvcreate', '-ff', '-y', device])
elif e.stdout.endswith('excluded by a filter.'):
host.ssh(['wipefs', '-a', device])
host.ssh(['pvcreate', '-ff', '-y', device])
else:
raise e

host.ssh(['vgcreate', GROUP_NAME, device])
host.ssh(['lvcreate', '-l', '100%FREE', '-T', STORAGE_POOL_NAME])
device_list = " ".join(devices)
host.ssh(['vgcreate', GROUP_NAME] + devices)
if provisioning_type == 'thin':
host.ssh(['lvcreate', '-l', '100%FREE', '-T', STORAGE_POOL_NAME])

yield device
yield devices

for host in hosts:
host.ssh(['vgremove', '-f', GROUP_NAME])
host.ssh(['pvremove', device])
for device in devices:
host.ssh(['pvremove', device])

@pytest.fixture(scope="package")
def storage_pool_name(provisioning_type):
return GROUP_NAME if provisioning_type == "thick" else STORAGE_POOL_NAME

@pytest.fixture(scope="package")
def provisioning_type(request):
return request.config.getoption("--provisioning")

@pytest.fixture(scope='package')
def pool_with_linstor(hostA2, lvm_disk, pool_with_saved_yum_state):
Expand All @@ -58,11 +70,11 @@ def pool_with_linstor(hostA2, lvm_disk, pool_with_saved_yum_state):
yield pool

@pytest.fixture(scope='package')
def linstor_sr(pool_with_linstor):
def linstor_sr(pool_with_linstor, provisioning_type, storage_pool_name):
sr = pool_with_linstor.master.sr_create('linstor', 'LINSTOR-SR-test', {
'group-name': STORAGE_POOL_NAME,
'group-name': storage_pool_name,
'redundancy': str(min(len(pool_with_linstor.hosts), 3)),
'provisioning': 'thin'
'provisioning': provisioning_type
}, shared=True)
yield sr
sr.destroy()
Expand Down
26 changes: 16 additions & 10 deletions tests/storage/linstor/test_linstor_sr.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,15 @@ class TestLinstorSRCreateDestroy:
and VM import.
"""

def test_create_sr_without_linstor(self, host, lvm_disk):
def test_create_sr_without_linstor(self, host, lvm_disk, provisioning_type, storage_pool_name):
# This test must be the first in the series in this module
assert not host.is_package_installed('python-linstor'), \
"linstor must not be installed on the host at the beginning of the tests"
try:
sr = host.sr_create('linstor', 'LINSTOR-SR-test', {
'group-name': STORAGE_POOL_NAME,
'redundancy': '1',
'provisioning': 'thin'
'group-name': storage_pool_name,
'redundancy': '2',
'provisioning': provisioning_type
}, shared=True)
try:
sr.destroy()
Expand All @@ -36,13 +36,13 @@ def test_create_sr_without_linstor(self, host, lvm_disk):
except SSHCommandFailed as e:
logging.info("SR creation failed, as expected: {}".format(e))

def test_create_and_destroy_sr(self, pool_with_linstor):
def test_create_and_destroy_sr(self, pool_with_linstor, provisioning_type, storage_pool_name):
# Create and destroy tested in the same test to leave the host as unchanged as possible
master = pool_with_linstor.master
sr = master.sr_create('linstor', 'LINSTOR-SR-test', {
'group-name': STORAGE_POOL_NAME,
'redundancy': '1',
'provisioning': 'thin'
'group-name': storage_pool_name,
'redundancy': '2',
'provisioning': provisioning_type
}, shared=True)
# import a VM in order to detect vm import issues here rather than in the vm_on_linstor_sr fixture used in
# the next tests, because errors in fixtures break teardown
Expand Down Expand Up @@ -147,7 +147,7 @@ def _ensure_resource_remain_diskless(host, controller_option, volume_name, diskl

class TestLinstorDisklessResource:
@pytest.mark.small_vm
def test_diskless_kept(self, host, linstor_sr, vm_on_linstor_sr):
def test_diskless_kept(self, host, linstor_sr, vm_on_linstor_sr, provisioning_type):
vm = vm_on_linstor_sr
vdi_uuids = vm.vdi_uuids(sr_uuid=linstor_sr.uuid)
vdi_uuid = vdi_uuids[0]
Expand All @@ -157,10 +157,16 @@ def test_diskless_kept(self, host, linstor_sr, vm_on_linstor_sr):
for member in host.pool.hosts:
controller_option += f"{member.hostname_or_ip},"

# Determine the correct group name based on provisioning type
if provisioning_type == "thick":
group_name = "xcp-sr-linstor_group_device"
else:
group_name = "xcp-sr-linstor_group_thin_device"

# Get volume name from VDI uuid
# "xcp/volume/{vdi_uuid}/volume-name": "{volume_name}"
output = host.ssh([
"linstor-kv-tool", "--dump-volumes", "-g", "xcp-sr-linstor_group_thin_device",
"linstor-kv-tool", "--dump-volumes", "-g", group_name,
"|", "grep", "volume-name", "|", "grep", vdi_uuid
])
volume_name = output.split(': ')[1].split('"')[1]
Expand Down

0 comments on commit e0d1dfe

Please sign in to comment.