From 710279cfb44ce737ed3a26b370e0428878d38a57 Mon Sep 17 00:00:00 2001 From: Rushikesh Jadhav Date: Wed, 12 Feb 2025 19:16:34 +0530 Subject: [PATCH] feat(storage/linstor): Enhance multi-disk support and provisioning flexibility in Linstor SR tests - Added `sr_disks_for_all_hosts` fixture to support multiple disks, ensuring availability across all hosts and handling "auto" selection dynamically. - Updated `lvm_disks` (`lvm_disk`) fixture to provision multiple disks collectively, refining vgcreate and pvcreate logic. - Introduced `provisioning_type` and `storage_pool_name` fixtures to dynamically configure storage provisioning (thin or thick). - Refactored Linstor SR test cases to use the new fixtures, improving test coverage across provisioning types. - Optimized Linstor installation and cleanup using concurrent execution, reducing setup time. - Enhanced validation and logging for disk selection. Signed-off-by: Rushikesh Jadhav --- conftest.py | 37 +++++++++++- tests/storage/linstor/conftest.py | 72 ++++++++++++++++-------- tests/storage/linstor/test_linstor_sr.py | 24 ++++---- 3 files changed, 99 insertions(+), 34 deletions(-) diff --git a/conftest.py b/conftest.py index 67955a20d..cd3906659 100644 --- a/conftest.py +++ b/conftest.py @@ -307,7 +307,7 @@ def sr_disk_for_all_hosts(pytestconfig, request, host): pytest.fail("This test requires exactly one --sr-disk parameter") disk = disks[0] master_disks = host.available_disks() - assert len(master_disks) > 0, "a free disk device is required on the master host" + assert len(master_disks) > 0, "A free disk device is required on the master host" if disk != "auto": assert disk in master_disks, \ @@ -329,6 +329,41 @@ def sr_disk_for_all_hosts(pytestconfig, request, host): logging.info(f">> Disk or block device {disk} is present and free on all pool members") yield candidates[0] +@pytest.fixture(scope='session') +def sr_disks_for_all_hosts(pytestconfig, request, host): + disks = pytestconfig.getoption("sr_disk") + assert len(disks) > 0, "This test requires at least one --sr-disk parameter" + # Fetch available disks on the master host + master_disks = host.available_disks() + assert len(master_disks) > 0, "A free disk device is required on the master host" + + if "auto" not in disks: + # Validate that all specified disks exist on the master host + for disk in disks: + assert disk in master_disks, \ + f"Disk or block device {disk} is either not present or already used on the master host" + master_disks = [disk for disk in disks if disk in master_disks] + + candidates = list(master_disks) + + # Check if all disks are available on all hosts in the pool + for h in host.pool.hosts[1:]: + other_disks = h.available_disks() + candidates = [d for d in candidates if d in other_disks] + + if "auto" in disks: + # Automatically select disks if "auto" is passed + assert len(candidates) > 0, \ + f"Free disk devices are required on all pool members. Pool master has: {' '.join(master_disks)}." + logging.info(f">> Found free disk device(s) on all pool hosts: {' '.join(candidates)}. " + f"Using: {', '.join(candidates)}") + else: + # Ensure specified disks are free on all hosts + assert len(candidates) == len(disks), \ + f"Some specified disks ({', '.join(disks)}) are not free or available on all hosts." + logging.info(f">> Disk(s) {', '.join(candidates)} are present and free on all pool members") + yield candidates + @pytest.fixture(scope='module') def vm_ref(request): ref = request.param diff --git a/tests/storage/linstor/conftest.py b/tests/storage/linstor/conftest.py index 66a145867..193649a52 100644 --- a/tests/storage/linstor/conftest.py +++ b/tests/storage/linstor/conftest.py @@ -12,42 +12,60 @@ LINSTOR_PACKAGE = 'xcp-ng-linstor' @pytest.fixture(scope='package') -def lvm_disk(host, sr_disk_for_all_hosts): - device = '/dev/' + sr_disk_for_all_hosts +def lvm_disks(host, sr_disks_for_all_hosts, provisioning_type): + devices = [f"/dev/{disk}" for disk in sr_disks_for_all_hosts] hosts = host.pool.hosts for host in hosts: - try: - host.ssh(['pvcreate', '-ff', '-y', device]) - except commands.SSHCommandFailed as e: - if e.stdout.endswith('Mounted filesystem?'): - host.ssh(['vgremove', '-f', GROUP_NAME, '-y']) + for device in devices: + try: host.ssh(['pvcreate', '-ff', '-y', device]) - elif e.stdout.endswith('excluded by a filter.'): - host.ssh(['wipefs', '-a', device]) - host.ssh(['pvcreate', '-ff', '-y', device]) - else: - raise e + except commands.SSHCommandFailed as e: + if e.stdout.endswith('Mounted filesystem?'): + host.ssh(['vgremove', '-f', GROUP_NAME, '-y']) + host.ssh(['pvcreate', '-ff', '-y', device]) + elif e.stdout.endswith('excluded by a filter.'): + host.ssh(['wipefs', '-a', device]) + host.ssh(['pvcreate', '-ff', '-y', device]) + else: + raise e - host.ssh(['vgcreate', GROUP_NAME, device]) - host.ssh(['lvcreate', '-l', '100%FREE', '-T', STORAGE_POOL_NAME]) + device_list = " ".join(devices) + host.ssh(['vgcreate', GROUP_NAME] + devices) + if provisioning_type == 'thin': + host.ssh(['lvcreate', '-l', '100%FREE', '-T', STORAGE_POOL_NAME]) - yield device + yield devices for host in hosts: host.ssh(['vgremove', '-f', GROUP_NAME]) - host.ssh(['pvremove', device]) + for device in devices: + host.ssh(['pvremove', device]) + +@pytest.fixture(scope="package") +def storage_pool_name(provisioning_type): + return GROUP_NAME if provisioning_type == "thick" else STORAGE_POOL_NAME + +@pytest.fixture(params=["thin", "thick"], scope="session") +def provisioning_type(request): + return request.param @pytest.fixture(scope='package') -def pool_with_linstor(hostA2, lvm_disk, pool_with_saved_yum_state): +def pool_with_linstor(hostA2, lvm_disks, pool_with_saved_yum_state): + import concurrent.futures pool = pool_with_saved_yum_state - for host in pool.hosts: + + def is_linstor_installed(host): if host.is_package_installed(LINSTOR_PACKAGE): raise Exception( f'{LINSTOR_PACKAGE} is already installed on host {host}. This should not be the case.' ) - for host in pool.hosts: + with concurrent.futures.ThreadPoolExecutor() as executor: + executor.map(is_linstor_installed, pool.hosts) + + def install_linstor(host): + logging.info(f"Installing {LINSTOR_PACKAGE} on host {host}...") host.yum_install([LINSTOR_RELEASE_PACKAGE]) host.yum_install([LINSTOR_PACKAGE], enablerepo="xcp-ng-linstor-testing") # Needed because the linstor driver is not in the xapi sm-plugins list @@ -55,14 +73,24 @@ def pool_with_linstor(hostA2, lvm_disk, pool_with_saved_yum_state): host.ssh(["systemctl", "restart", "multipathd"]) host.restart_toolstack(verify=True) + with concurrent.futures.ThreadPoolExecutor() as executor: + executor.map(install_linstor, pool.hosts) + yield pool + def remove_linstor(host): + logging.info(f"Cleaning up {LINSTOR_PACKAGE} from host {host}...") + host.yum_remove([LINSTOR_PACKAGE]) + + with concurrent.futures.ThreadPoolExecutor() as executor: + executor.map(remove_linstor, pool.hosts) + @pytest.fixture(scope='package') -def linstor_sr(pool_with_linstor): +def linstor_sr(pool_with_linstor, provisioning_type, storage_pool_name): sr = pool_with_linstor.master.sr_create('linstor', 'LINSTOR-SR-test', { - 'group-name': STORAGE_POOL_NAME, + 'group-name': storage_pool_name, 'redundancy': str(min(len(pool_with_linstor.hosts), 3)), - 'provisioning': 'thin' + 'provisioning': provisioning_type }, shared=True) yield sr sr.destroy() diff --git a/tests/storage/linstor/test_linstor_sr.py b/tests/storage/linstor/test_linstor_sr.py index 3f05b5d37..3e65c6621 100644 --- a/tests/storage/linstor/test_linstor_sr.py +++ b/tests/storage/linstor/test_linstor_sr.py @@ -2,13 +2,13 @@ import pytest import time -from .conftest import STORAGE_POOL_NAME, LINSTOR_PACKAGE +from .conftest import LINSTOR_PACKAGE from lib.commands import SSHCommandFailed from lib.common import wait_for, vm_image from tests.storage import vdi_is_open # Requirements: -# - one XCP-ng host >= 8.2 with an additional unused disk for the SR +# - two or more XCP-ng hosts >= 8.2 with additional unused disk(s) for the SR # - access to XCP-ng RPM repository from the host class TestLinstorSRCreateDestroy: @@ -18,15 +18,15 @@ class TestLinstorSRCreateDestroy: and VM import. """ - def test_create_sr_without_linstor(self, host, lvm_disk): + def test_create_sr_without_linstor(self, host, lvm_disks, provisioning_type, storage_pool_name): # This test must be the first in the series in this module assert not host.is_package_installed('python-linstor'), \ "linstor must not be installed on the host at the beginning of the tests" try: sr = host.sr_create('linstor', 'LINSTOR-SR-test', { - 'group-name': STORAGE_POOL_NAME, + 'group-name': storage_pool_name, 'redundancy': '1', - 'provisioning': 'thin' + 'provisioning': provisioning_type }, shared=True) try: sr.destroy() @@ -36,13 +36,13 @@ def test_create_sr_without_linstor(self, host, lvm_disk): except SSHCommandFailed as e: logging.info("SR creation failed, as expected: {}".format(e)) - def test_create_and_destroy_sr(self, pool_with_linstor): + def test_create_and_destroy_sr(self, pool_with_linstor, provisioning_type, storage_pool_name): # Create and destroy tested in the same test to leave the host as unchanged as possible master = pool_with_linstor.master sr = master.sr_create('linstor', 'LINSTOR-SR-test', { - 'group-name': STORAGE_POOL_NAME, + 'group-name': storage_pool_name, 'redundancy': '1', - 'provisioning': 'thin' + 'provisioning': provisioning_type }, shared=True) # import a VM in order to detect vm import issues here rather than in the vm_on_linstor_sr fixture used in # the next tests, because errors in fixtures break teardown @@ -65,7 +65,7 @@ def test_start_and_shutdown_VM(self, vm_on_linstor_sr): vm = vm_on_linstor_sr vm.start() vm.wait_for_os_booted() - vm.shutdown(verify=True) + vm.shutdown(verify=True, force_if_fails=True) @pytest.mark.small_vm @pytest.mark.big_vm @@ -147,7 +147,7 @@ def _ensure_resource_remain_diskless(host, controller_option, volume_name, diskl class TestLinstorDisklessResource: @pytest.mark.small_vm - def test_diskless_kept(self, host, linstor_sr, vm_on_linstor_sr): + def test_diskless_kept(self, host, linstor_sr, vm_on_linstor_sr, storage_pool_name): vm = vm_on_linstor_sr vdi_uuids = vm.vdi_uuids(sr_uuid=linstor_sr.uuid) vdi_uuid = vdi_uuids[0] @@ -157,10 +157,12 @@ def test_diskless_kept(self, host, linstor_sr, vm_on_linstor_sr): for member in host.pool.hosts: controller_option += f"{member.hostname_or_ip}," + sr_group_name = "xcp-sr-" + storage_pool_name.replace("/", "_") + # Get volume name from VDI uuid # "xcp/volume/{vdi_uuid}/volume-name": "{volume_name}" output = host.ssh([ - "linstor-kv-tool", "--dump-volumes", "-g", "xcp-sr-linstor_group_thin_device", + "linstor-kv-tool", "--dump-volumes", "-g", sr_group_name, "|", "grep", "volume-name", "|", "grep", vdi_uuid ]) volume_name = output.split(': ')[1].split('"')[1]