From 4ecefac504b0b74352dfd247fe903c87b8e91185 Mon Sep 17 00:00:00 2001 From: Guillaume Date: Mon, 11 Sep 2023 15:02:00 +0200 Subject: [PATCH] Add kalray plugin to configure DPU This plugin allows to create raid, logical volume store and logical volumes on the Kalray DPU. It also allows the deletion of a volume. Currently restrications are due to the Kalray DPU. Next generation of the DPU will allow more volumes and won't be restricted on volume name. See the README for details. The unittest mocks the RPC server and it checks that required parameters are well set and that all parameters used are valid. Signed-off-by: Guillaume --- README.md | 98 +++++++++++++++ SOURCES/etc/xapi.d/plugins/kalray_dpu.py | 152 +++++++++++++++++++++++ tests/conftest.py | 3 + tests/mocked_kalray_rpc.py | 55 ++++++++ tests/test_kalray_dpu.py | 69 ++++++++++ 5 files changed, 377 insertions(+) create mode 100755 SOURCES/etc/xapi.d/plugins/kalray_dpu.py create mode 100644 tests/mocked_kalray_rpc.py create mode 100644 tests/test_kalray_dpu.py diff --git a/README.md b/README.md index a8e8385..7f308bc 100644 --- a/README.md +++ b/README.md @@ -194,6 +194,104 @@ $ xe host-call-plugin host-uuid= plugin=hyperthreading.py fn=get_hyperthre true ``` +## XCP-ng Kalray DPU + +A xapi plugin to get information about raids, logical volume store (LVS) and +devices that are present on the Kalray DPU. It also allow the management of +logical volumes (LV): creation and deletion. Parameters depends of the name of +the command some are always available: + - *username*: username to use to connect to DPU (required) + - *password*: password to connect to DPU (required) + - *server*: IP of the server for configuring the DPU (default: localhost) + - *port*: Port to use (default: 8080) + - *timeout*: timeout in second (default: 60.0) + +### Command details + +- Currently the Kalray DPU is still in developpement and there are some +restrictions: + - To be able to expose virtual functions the Kalray poller expects + specific names for the logical volume store and for the volume. It + depends of the configuration used in `/etc/kalray/0000:XX:00.1.conf`. + - By default the logical volume store name **must be** `lvs`. + - By default the volume must start with `volume_`. + - With the current DPU only four virtual functions (and so only four NVMe + disks) can be created and so you can only use the following name: + - `volume_09` + - `volume_10` + - `volume_11` + - `volume_12` + - Only volumes can be deleted. + +#### Block devices + +##### Get the list of devices on the Kalray DPU +``` +$ xe host-call-plugin host-uuid= plugin=kalray_dpu.py fn=get_devices \ + args:username= args:password= +[{"name": "HotInNvmeWDS500AFY0-22050C800415n1", "aliases": [], "product_name": "NVMe disk", "block_size": 512, "num_blocks": 976773168, "uuid": "e8238fa6-bf53-0001-001b-448b45afa6a7", "assigned_rate_limits": {"rw_ios_per_sec": 0, "rw_mbytes_per_sec": 0, "r_mbytes_per_sec": 0, "w_mbytes_per_sec": 0}, "claimed": false, "zoned": false, "supported_io_types": {"read": true, "write": true, "unmap": true, "write_zeroes": true, "flush": true, "reset": true, "nvme_admin": true, "nvme_io": true}, "driver_specific": {"nvme": [{"pci_address": "0000:00:00.0", "trid": {"trtype": "PCIe", "traddr": "0000:00:00.0"}, "ctrlr_data": {"cntlid": 8224, "vendor_id": "0x15b7", "model_number": "WDS500G1X0E-00AFY0", "serial_number": "22050C800415", "firmware_revision": "614900WD", "subnqn": "nqn.2018-01.com.wdc:nguid:E8238FA6BF53-0001-001B448B45AFA6A7", "oacs": {"security": 1, "format": 1, "firmware": 1, "ns_manage": 0}, "multi_ctrlr": false, "ana_reporting": false}, "vs": {"nvme_version": "1.4"}, "ns_data": {"id": 1, "can_share": false}, "security": {"opal": false}}], "mp_policy": "active_passive"}}, {"name": "HotInNvmeWDS500AFY0-22050C800378n1", "aliases": [], "product_name": "NVMe disk", "block_size": 512, "num_blocks": 976773168, "uuid": "e8238fa6-bf53-0001-001b-448b45afe330", "assigned_rate_limits": {"rw_ios_per_sec": 0, "rw_mbytes_per_sec": 0, "r_mbytes_per_sec": 0, "w_mbytes_per_sec": 0}, "claimed": false, "zoned": false, "supported_io_types": {"read": true, "write": true, "unmap": true, "write_zeroes": true, "flush": true, "reset": true, "nvme_admin": true, "nvme_io": true}, "driver_specific": {"nvme": [{"pci_address": "0000:00:01.0", "trid": {"trtype": "PCIe", "traddr": "0000:00:01.0"}, "ctrlr_data": {"cntlid": 8224, "vendor_id": "0x15b7", "model_number": "WDS500G1X0E-00AFY0", "serial_number": "22050C800378", "firmware_revision": "614900WD", "subnqn": "nqn.2018-01.com.wdc:nguid:E8238FA6BF53-0001-001B448B45AFE330", "oacs": {"security": 1, "format": 1, "firmware": 1, "ns_manage": 0}, "multi_ctrlr": false, "ana_reporting": false}, "vs": {"nvme_version": "1.4"}, "ns_data": {"id": 1, "can_share": false}, "security": {"opal": false}}], "mp_policy": "active_passive"}}] +``` + +#### RAID + +##### Create a raid on the Kalray DPU +- Supported RAID are raid0, raid1 and raid10 +``` +$ xe host-call-plugin host-uuid= plugin=kalray_dpu.py fn=raid_create \ + args:username= args:password= \ + args:base_bdevs=HotInNvmeWDS500AFY0-22050C800415n1,HotInNvmeWDS500AFY0-22050C800378n1 \ + args:raid_name=raid0 \ + args:raid_level=raid0 +true +``` + +##### Get the list of raids on the Kalray DPU +``` +$ xe host-call-plugin host-uuid= plugin=kalray_dpu.py fn=get_raids \ + args:username= args:password= +["raid0"] +``` + +#### Logical Volume Store (LVS) +##### Create an LVS on the Kalray DPU +``` +$ xe host-call-plugin host-uuid= plugin=kalray_dpu.py fn=lvs_create \ + args:username= args:password= \ + args:lvs_name=lvs \ + args:bdev_name=raid0 +"6fb90332-56e4-4d03-aa6a-f858a2c2ca97" +``` + +##### Get the list of LVS on the Kalray DPU +``` +$ xe host-call-plugin host-uuid= plugin=kalray_dpu.py fn=get_lvs \ + args:username= args:password= +[{"uuid": "6fb90332-56e4-4d03-aa6a-f858a2c2ca97", "name": "lvs", "passive": false, "base_bdev": "raid0", "total_data_clusters": 29804, "free_clusters": 29772, "block_size": 512, "cluster_size": 33554432}] +``` + +#### Logical Volume (LVOL) +##### Create a new logical volume +``` +$ xe host-call-plugin host-uuid= plugin=kalray_dpu.py fn=lvol_create \ + args:username= args:password= \ + args:lvol_name=volume_09 \ + args:lvol_size_in_bytes=1073741824 \ + args:lvs_name=lvs +"6c84b44c-a61b-41a4-8b19-32ab643b57d9" +``` + +##### Delete a logical volume +- The name of the volume to be deleted is not the same than the one used to +create it. You need to prepend the name of the logical volume store as shown +in the example: + +``` +$ xe host-call-plugin host-uuid= plugin=kalray_dpu.py fn=lvol_delete \ + args:username= args:password= \ + args:lvol_name=lvs/volume_09 +true +``` + ## Tests To run the plugins' unit tests you'll need to install `pytest`, `pyfakefs` and `mock`. diff --git a/SOURCES/etc/xapi.d/plugins/kalray_dpu.py b/SOURCES/etc/xapi.d/plugins/kalray_dpu.py new file mode 100755 index 0000000..95e7297 --- /dev/null +++ b/SOURCES/etc/xapi.d/plugins/kalray_dpu.py @@ -0,0 +1,152 @@ +#!/usr/bin/python3 +"""XAPI plugin to manage Kalray DPU.""" + +import json +import XenAPIPlugin # pylint: disable=import-error + +from kalray.acs.spdk.rpc.client import HTTPJSONRPCClient, JSONRPCException # pylint: disable=import-error +from xcpngutils import error_wrapped + +class KalrayCmd: + """Describe a command to be ran on the Kalray DPU.""" + + def __init__(self, rpc_name: str, updates: dict): + self.server = 'localhost' + self.port = 8080 + self.username = None + self.password = None + self.timeout = 60.0 + self.rpc_name = rpc_name + self.rpc_params = {} # will be updated using add_rpc_params + + for k, v in updates.items(): + if hasattr(self, k): + setattr(self, k, v) + + # Check that username & password are well set + if self.username is None: + raise XenAPIPlugin.Failure("-1", ["'username' is required"]) + if self.password is None: + raise XenAPIPlugin.Failure("-1", ["'password' is required"]) + + def add_rpc_params(self, key, value): + """Adds a parameter that will be passed to the RPC.""" + self.rpc_params[key] = value + + def call_rpc(self): + """Do the RPC call.""" + try: + client = HTTPJSONRPCClient( + self.server, + self.port, + self.timeout, + self.username, + self.password, + log_level="ERROR") + message = client.call(self.rpc_name, self.rpc_params) + except JSONRPCException as exc: + raise XenAPIPlugin.Failure("-1", [exc.message]) + + return json.dumps(message) + +@error_wrapped +def get_devices(_session, args): + """Get the list of devices available on the Kalray DPU.""" + kc = KalrayCmd("bdev_get_bdevs", args) + return kc.call_rpc() + +@error_wrapped +def get_raids(_session, args): + """Get the list of raids available on the Kalray DPU.""" + kc = KalrayCmd("bdev_raid_get_bdevs", args) + kc.add_rpc_params("category", "all") + return kc.call_rpc() + +@error_wrapped +def get_lvs(_session, args): + """Get the list of logical volume stores available on the Kalray DPU.""" + kc = KalrayCmd("bdev_lvol_get_lvstores", args) + return kc.call_rpc() + +@error_wrapped +def raid_create(_session, args): + """Create a raid.""" + kc = KalrayCmd("bdev_raid_create", args) + try: + raid_name = args["raid_name"] + raid_level = args["raid_level"] + base_bdevs = args["base_bdevs"].split(',') + except KeyError as msg: + raise XenAPIPlugin.Failure("-1", [f"Key {msg} is missing"]) + + # Check supported raids + if raid_level not in ["raid0", "raid1", "raid10"]: + raise XenAPIPlugin.Failure("-1", ["Only raid0, raid1 and raid10 are supported"]) + + kc.add_rpc_params("name", raid_name) + kc.add_rpc_params("raid_level", raid_level) + kc.add_rpc_params("base_bdevs", base_bdevs) + kc.add_rpc_params("strip_size_kb", 128) + kc.add_rpc_params("persist", True) + kc.add_rpc_params("split_dp", True) + return kc.call_rpc() + +@error_wrapped +def lvs_create(_session, args): + """Create a logical volume store.""" + kc = KalrayCmd("bdev_lvol_create_lvstore", args) + try: + lvs_name = args["lvs_name"] + bdev_name = args["bdev_name"] + except KeyError as msg: + raise XenAPIPlugin.Failure("-1", [f"Key {msg} is missing"]) + + kc.add_rpc_params("lvs_name", lvs_name) + kc.add_rpc_params("bdev_name", bdev_name) + + return kc.call_rpc() + +@error_wrapped +def lvol_create(_session, args): + """Create a new lvol on the Kalray DPU.""" + kc = KalrayCmd("bdev_lvol_create", args) + + try: + lvol_name = args["lvol_name"] + lvol_size = int(args["lvol_size_in_bytes"]) + lvs_name = args["lvs_name"] + except KeyError as msg: + raise XenAPIPlugin.Failure("-1", [f"Key {msg} is missing"]) + except ValueError as msg: + raise XenAPIPlugin.Failure("-1", [f"Wrong size: {msg}"]) + + kc.add_rpc_params("lvol_name", lvol_name) + # size is deprecated but Kalray DPU uses an old version of SPDK that + # does not provide the new 'size_in_mib' parameter. + kc.add_rpc_params("size", lvol_size) + kc.add_rpc_params("lvs_name", lvs_name) + return kc.call_rpc() + +@error_wrapped +def lvol_delete(_session, args): + """Delete the lvol passed as parameter on the Kalray DPU if exists.""" + kc = KalrayCmd("bdev_lvol_delete", args) + + try: + lvol_name = args["lvol_name"] + except KeyError as msg: + raise XenAPIPlugin.Failure("-1", [f"Key {msg} is missing"]) + + kc.add_rpc_params("name", lvol_name) + return kc.call_rpc() + +if __name__ == "__main__": + XenAPIPlugin.dispatch({ + "get_devices": get_devices, + "get_raids": get_raids, + "get_lvs": get_lvs, + "raid_create": raid_create, + "lvs_create": lvs_create, + "lvol_create": lvol_create, + "lvol_delete": lvol_delete, + }) diff --git a/tests/conftest.py b/tests/conftest.py index c1454b3..299e8a2 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -5,6 +5,7 @@ import mocked_configparser import mocked_xen_api_plugin import mocked_yum +import mocked_kalray_rpc def pytest_configure(): pytest.plugins_lock_file = '/var/lib/xcp-ng-xapi-plugins/pytest.lock' @@ -18,6 +19,8 @@ def pytest_configure(): # Mock yum globally, module is not necessarily present on the system. sys.modules['yum'] = mocked_yum +sys.modules['kalray.acs.spdk.rpc.client'] = mocked_kalray_rpc + sys.path.append(str(pathlib.Path(__file__).parent.resolve()) + '/../SOURCES/etc/xapi.d/plugins') pytest_plugins = ("pyfakefs",) diff --git a/tests/mocked_kalray_rpc.py b/tests/mocked_kalray_rpc.py new file mode 100644 index 0000000..b9917fc --- /dev/null +++ b/tests/mocked_kalray_rpc.py @@ -0,0 +1,55 @@ +class HTTPJSONRPCClient(object): + def __init__(self, addr, port=None, timeout=60.0, user='admin', password='admin', **kwargs): + pass + + def call(self, method, params=None): + """We will juste check that parameters are ok.""" + parameters = { + "bdev_get_bdevs": { + "required": [], + "optional": ['name', 'timeout'], + }, + "bdev_raid_get_bdevs": { + "required": ['category'], + "optional": [], + }, + "bdev_lvol_get_lvstores": { + "required": [], + "optional": ['uuid', 'lvs_name'], + }, + "bdev_raid_create": { + "required": ['name', 'strip_size_kb', 'raid_level', 'base_bdevs'], + "optional": ['persist', 'split_dp'], + }, + "bdev_lvol_create_lvstore": { + "required": ['bdev_name', 'lvs_name'], + "optional": ['cluster_sz', 'clear_method', 'num_md_pages_per_cluster_ratio'], + }, + "bdev_lvol_create": { + "required": ['lvol_name'], + "optional": ['size', 'size_in_mib', 'thin_provision', 'uuid', 'lvs_name', 'clear_method'], + }, + "bdev_lvol_delete": { + "required": ['name'], + "optional": [], + }, + } + + # Check that method is mocked + try: + p = parameters[method] + except KeyError: + assert False, f"{method} is not mocked" + + # Check that required parameters are given + for k in p['required']: + assert k in params, f"Required parameter '{k}' is missing for {method}" + + # Check that params passed to method are valid + for k in params: + assert k in p['required'] or k in p['optional'], f"Invalid parameter '{k}' for {method}" + + +class JSONRPCException(BaseException): + def __init__(self, message): + assert False, "Mock me!" diff --git a/tests/test_kalray_dpu.py b/tests/test_kalray_dpu.py new file mode 100644 index 0000000..f22224d --- /dev/null +++ b/tests/test_kalray_dpu.py @@ -0,0 +1,69 @@ +from kalray_dpu import ( + get_devices, + get_raids, + get_lvs, + raid_create, + lvs_create, + lvol_create, + lvol_delete) + +def test_get_devices(): + args = { + "username": "user", + "password": "pass", + } + get_devices(None, args) + +def test_get_raids(): + args = { + "username": "user", + "password": "pass", + } + get_raids(None, args) + +def test_get_lvs(): + args = { + "username": "user", + "password": "pass", + } + get_lvs(None, args) + +def test_raid_create(): + args = { + "username": "user", + "password": "pass", + "raid_name": "raid_test", + "raid_level": "raid0", + "base_bdevs": "bdev0,bdev1", + "strip_size_kb": 128, + "persist": True, + "slip_dp": True, + } + raid_create(None, args) + +def test_lvs_create(): + args = { + "username": "user", + "password": "pass", + "lvs_name": "lvs_test", + "bdev_name": "raid_test", + } + lvs_create(None, args) + +def test_lvol_create(): + args = { + "username": "user", + "password": "pass", + "lvol_name": "lvol_test", + "lvol_size_in_bytes": 1234, + "lvs_name": "lvs_test", + } + lvol_create(None, args) + +def test_lvol_delete(): + args = { + "username": "user", + "password": "pass", + "lvol_name": "lvol_test", + } + lvol_delete(None, args)