Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(storage/linstor): Enhance multi-disk support and provisioning flexibility in Linstor SR tests #270

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 35 additions & 0 deletions conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,6 +329,41 @@ def sr_disk_for_all_hosts(pytestconfig, request, host):
logging.info(f">> Disk or block device {disk} is present and free on all pool members")
yield candidates[0]

@pytest.fixture(scope='session')
def sr_disks_for_all_hosts(pytestconfig, request, host):
disks = pytestconfig.getoption("sr_disk")
assert len(disks) > 0, "This test requires at least one --sr-disk parameter"
# Fetch available disks on the master host
master_disks = host.available_disks()
assert len(master_disks) > 0, "a free disk device is required on the master host"

if "auto" not in disks:
# Validate that all specified disks exist on the master host
for disk in disks:
assert disk in master_disks, \
f"Disk or block device {disk} is either not present or already used on the master host"
master_disks = [disk for disk in disks if disk in master_disks]

candidates = list(master_disks)

# Check if all disks are available on all hosts in the pool
for h in host.pool.hosts[1:]:
other_disks = h.available_disks()
candidates = [d for d in candidates if d in other_disks]

if "auto" in disks:
# Automatically select disks if "auto" is passed
assert len(candidates) > 0, \
f"Free disk devices are required on all pool members. Pool master has: {' '.join(master_disks)}."
logging.info(f">> Found free disk device(s) on all pool hosts: {' '.join(candidates)}. "
f"Using: {', '.join(candidates)}")
else:
# Ensure specified disks are free on all hosts
assert len(candidates) == len(disks), \
f"Some specified disks ({', '.join(disks)}) are not free or available on all hosts."
logging.info(f">> Disk(s) {', '.join(candidates)} are present and free on all pool members")
yield candidates

@pytest.fixture(scope='module')
def vm_ref(request):
ref = request.param
Expand Down
70 changes: 49 additions & 21 deletions tests/storage/linstor/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,57 +12,85 @@
LINSTOR_PACKAGE = 'xcp-ng-linstor'

@pytest.fixture(scope='package')
def lvm_disk(host, sr_disk_for_all_hosts):
device = '/dev/' + sr_disk_for_all_hosts
def lvm_disk(host, sr_disks_for_all_hosts, provisioning_type):
devices = [f"/dev/{disk}" for disk in sr_disks_for_all_hosts]
hosts = host.pool.hosts

for host in hosts:
try:
host.ssh(['pvcreate', '-ff', '-y', device])
except commands.SSHCommandFailed as e:
if e.stdout.endswith('Mounted filesystem?'):
host.ssh(['vgremove', '-f', GROUP_NAME, '-y'])
for device in devices:
try:
host.ssh(['pvcreate', '-ff', '-y', device])
elif e.stdout.endswith('excluded by a filter.'):
host.ssh(['wipefs', '-a', device])
host.ssh(['pvcreate', '-ff', '-y', device])
else:
raise e
except commands.SSHCommandFailed as e:
if e.stdout.endswith('Mounted filesystem?'):
host.ssh(['vgremove', '-f', GROUP_NAME, '-y'])
host.ssh(['pvcreate', '-ff', '-y', device])
elif e.stdout.endswith('excluded by a filter.'):
host.ssh(['wipefs', '-a', device])
host.ssh(['pvcreate', '-ff', '-y', device])
else:
raise e

host.ssh(['vgcreate', GROUP_NAME, device])
host.ssh(['lvcreate', '-l', '100%FREE', '-T', STORAGE_POOL_NAME])
device_list = " ".join(devices)
host.ssh(['vgcreate', GROUP_NAME] + devices)
if provisioning_type == 'thin':
host.ssh(['lvcreate', '-l', '100%FREE', '-T', STORAGE_POOL_NAME])

yield device
yield devices

for host in hosts:
host.ssh(['vgremove', '-f', GROUP_NAME])
host.ssh(['pvremove', device])
for device in devices:
host.ssh(['pvremove', device])

@pytest.fixture(scope="package")
def storage_pool_name(provisioning_type):
return GROUP_NAME if provisioning_type == "thick" else STORAGE_POOL_NAME

@pytest.fixture(params=["thin", "thick"], scope="session")
def provisioning_type(request):
return request.param

@pytest.fixture(scope='package')
def pool_with_linstor(hostA2, lvm_disk, pool_with_saved_yum_state):
import concurrent.futures
pool = pool_with_saved_yum_state
for host in pool.hosts:

def is_linstor_installed(host):
if host.is_package_installed(LINSTOR_PACKAGE):
raise Exception(
f'{LINSTOR_PACKAGE} is already installed on host {host}. This should not be the case.'
)

for host in pool.hosts:
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(is_linstor_installed, pool.hosts)

def install_linstor(host):
logging.info(f"Installing {LINSTOR_PACKAGE} on host {host}...")
host.yum_install([LINSTOR_RELEASE_PACKAGE])
host.yum_install([LINSTOR_PACKAGE], enablerepo="xcp-ng-linstor-testing")
# Needed because the linstor driver is not in the xapi sm-plugins list
# before installing the LINSTOR packages.
host.ssh(["systemctl", "restart", "multipathd"])
host.restart_toolstack(verify=True)

with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(install_linstor, pool.hosts)

yield pool

def remove_linstor(host):
logging.info(f"Cleaning up {LINSTOR_PACKAGE} from host {host}...")
host.yum_remove([LINSTOR_PACKAGE])

with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(remove_linstor, pool.hosts)

@pytest.fixture(scope='package')
def linstor_sr(pool_with_linstor):
def linstor_sr(pool_with_linstor, provisioning_type, storage_pool_name):
sr = pool_with_linstor.master.sr_create('linstor', 'LINSTOR-SR-test', {
'group-name': STORAGE_POOL_NAME,
'group-name': storage_pool_name,
'redundancy': str(min(len(pool_with_linstor.hosts), 3)),
'provisioning': 'thin'
'provisioning': provisioning_type
}, shared=True)
yield sr
sr.destroy()
Expand Down
31 changes: 19 additions & 12 deletions tests/storage/linstor/test_linstor_sr.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from tests.storage import vdi_is_open

# Requirements:
# - one XCP-ng host >= 8.2 with an additional unused disk for the SR
# - two or more XCP-ng hosts >= 8.2 with additional unused disk(s) for the SR
# - access to XCP-ng RPM repository from the host

class TestLinstorSRCreateDestroy:
Expand All @@ -18,15 +18,15 @@ class TestLinstorSRCreateDestroy:
and VM import.
"""

def test_create_sr_without_linstor(self, host, lvm_disk):
def test_create_sr_without_linstor(self, host, lvm_disk, provisioning_type, storage_pool_name):
# This test must be the first in the series in this module
assert not host.is_package_installed('python-linstor'), \
"linstor must not be installed on the host at the beginning of the tests"
try:
sr = host.sr_create('linstor', 'LINSTOR-SR-test', {
'group-name': STORAGE_POOL_NAME,
'redundancy': '1',
'provisioning': 'thin'
'group-name': storage_pool_name,
'redundancy': '2',
'provisioning': provisioning_type
}, shared=True)
try:
sr.destroy()
Expand All @@ -36,13 +36,13 @@ def test_create_sr_without_linstor(self, host, lvm_disk):
except SSHCommandFailed as e:
logging.info("SR creation failed, as expected: {}".format(e))

def test_create_and_destroy_sr(self, pool_with_linstor):
def test_create_and_destroy_sr(self, pool_with_linstor, provisioning_type, storage_pool_name):
# Create and destroy tested in the same test to leave the host as unchanged as possible
master = pool_with_linstor.master
sr = master.sr_create('linstor', 'LINSTOR-SR-test', {
'group-name': STORAGE_POOL_NAME,
'redundancy': '1',
'provisioning': 'thin'
'group-name': storage_pool_name,
'redundancy': '2',
'provisioning': provisioning_type
}, shared=True)
# import a VM in order to detect vm import issues here rather than in the vm_on_linstor_sr fixture used in
# the next tests, because errors in fixtures break teardown
Expand All @@ -65,7 +65,7 @@ def test_start_and_shutdown_VM(self, vm_on_linstor_sr):
vm = vm_on_linstor_sr
vm.start()
vm.wait_for_os_booted()
vm.shutdown(verify=True)
vm.shutdown(verify=True, force_if_fails=True)

@pytest.mark.small_vm
@pytest.mark.big_vm
Expand Down Expand Up @@ -147,7 +147,7 @@ def _ensure_resource_remain_diskless(host, controller_option, volume_name, diskl

class TestLinstorDisklessResource:
@pytest.mark.small_vm
def test_diskless_kept(self, host, linstor_sr, vm_on_linstor_sr):
def test_diskless_kept(self, host, linstor_sr, vm_on_linstor_sr, provisioning_type):
vm = vm_on_linstor_sr
vdi_uuids = vm.vdi_uuids(sr_uuid=linstor_sr.uuid)
vdi_uuid = vdi_uuids[0]
Expand All @@ -157,10 +157,17 @@ def test_diskless_kept(self, host, linstor_sr, vm_on_linstor_sr):
for member in host.pool.hosts:
controller_option += f"{member.hostname_or_ip},"

# Determine the correct group name based on provisioning type
group_mapping = {
"thick": "xcp-sr-linstor_group_device",
"thin": "xcp-sr-linstor_group_thin_device"
}
group_name = group_mapping.get(provisioning_type, "unknown")

# Get volume name from VDI uuid
# "xcp/volume/{vdi_uuid}/volume-name": "{volume_name}"
output = host.ssh([
"linstor-kv-tool", "--dump-volumes", "-g", "xcp-sr-linstor_group_thin_device",
"linstor-kv-tool", "--dump-volumes", "-g", group_name,
"|", "grep", "volume-name", "|", "grep", vdi_uuid
])
volume_name = output.split(': ')[1].split('"')[1]
Expand Down