Skip to content

Commit

Permalink
Merge pull request #105 from japokorn/master-mdraid_support_volumes
Browse files Browse the repository at this point in the history
MDRAID support for volumes
  • Loading branch information
japokorn authored Jun 11, 2020
2 parents 98fd8bd + 3adcee1 commit 576f710
Show file tree
Hide file tree
Showing 12 changed files with 421 additions and 13 deletions.
19 changes: 18 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ This specifies the name of the volume.

##### `type`
This specifies the type of volume on which the file system will reside.
Valid values for `type`: `lvm` or `disk`.
Valid values for `type`: `lvm`, `disk` or `raid`.
The default is determined according to the OS and release (currently `lvm`).

##### `disks`
Expand Down Expand Up @@ -78,6 +78,23 @@ The `mount_point` specifies the directory on which the file system will be mount
##### `mount_options`
The `mount_options` specifies custom mount options as a string, e.g.: 'ro'.

##### `raid_level`
Specifies RAID level when type is `raid`.
Accepted values are: `linear`, `striped`, `raid0`, `raid1`, `raid4`, `raid5`, `raid6`, `raid10`

#### `raid_device_count`
When type is `raid` specifies number of active RAID devices.

#### `raid_spare_count`
When type is `raid` specifies number of spare RAID devices.

#### `raid_metadata_version`
When type is `raid` specifies RAID metadata version as a string, e.g.: '1.0'.

#### `raid_chunk_size`
When type is `raid` specifies RAID chunk size as a string, e.g.: '512 KiB'.
Chunk size has to be multiple of 4 KiB.

##### `encryption`
This specifies whether or not the volume will be encrypted using LUKS.
__WARNING__: Toggling encryption for a volume is a destructive operation, meaning
Expand Down
6 changes: 6 additions & 0 deletions defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,12 @@ storage_volume_defaults:
mount_passno: 0
mount_device_identifier: "uuid" # uuid|label|path

raid_level: null
raid_device_count: null
raid_spare_count: null
raid_chunk_size: null
raid_metadata_version: null

encryption: false
encryption_passphrase: null
encryption_key_file: null
Expand Down
132 changes: 127 additions & 5 deletions library/blivet.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@
from blivet3 import Blivet
from blivet3.callbacks import callbacks
from blivet3 import devices
from blivet3.devicelibs.mdraid import MD_CHUNK_SIZE
from blivet3.flags import flags as blivet_flags
from blivet3.formats import get_format
from blivet3.partitioning import do_partitioning
Expand All @@ -107,6 +108,7 @@
from blivet import Blivet
from blivet.callbacks import callbacks
from blivet import devices
from blivet.devicelibs.mdraid import MD_CHUNK_SIZE
from blivet.flags import flags as blivet_flags
from blivet.formats import get_format
from blivet.partitioning import do_partitioning
Expand Down Expand Up @@ -374,7 +376,6 @@ def _look_up_device(self):
raise BlivetAnsibleError("unable to resolve disk specified for volume '%s' (%s)" % (self._volume['name'], self._volume['disks']))



class BlivetPartitionVolume(BlivetVolume):
blivet_device_class = devices.PartitionDevice

Expand Down Expand Up @@ -446,10 +447,130 @@ def _create(self):
self._device = device


class BlivetMDRaidVolume(BlivetVolume):

def _process_device_numbers(self, members_count, requested_actives, requested_spares):

active_count = members_count
spare_count = 0

if requested_actives is not None and requested_spares is not None:
if (requested_actives + requested_spares != members_count or
requested_actives < 0 or requested_spares < 0):
raise BlivetAnsibleError("failed to set up volume '%s': cannot create RAID "
"with %s members (%s active and %s spare)"
% (self._volume['name'], members_count,
requested_actives, requested_spares))

if requested_actives is not None:
active_count = requested_actives
spare_count = members_count - active_count

if requested_spares is not None:
spare_count = requested_spares
active_count = members_count - spare_count

return members_count, active_count

def _create_raid_members(self, member_names):
members = list()

for member_name in member_names:
member_disk = self._blivet.devicetree.resolve_device(member_name)
if member_disk is not None:
if use_partitions:
# create partition table
label = get_format("disklabel", device=member_disk.path)
self._blivet.format_device(member_disk, label)

# create new partition
member = self._blivet.new_partition(parents=[member_disk], grow=True)
self._blivet.create_device(member)
self._blivet.format_device(member, fmt=get_format("mdmember"))
members.append(member)
else:
self._blivet.format_device(member_disk, fmt=get_format("mdmember"))
members.append(member_disk)

return members

def _create(self):
global safe_mode

if self._device:
return

raid_name = self._volume["name"]
member_names = self._volume["disks"]
raid_level = self._volume["raid_level"]
members_count, active_count = self._process_device_numbers(len(member_names),
self._volume.get("raid_device_count"),
self._volume.get("raid_spare_count"))

chunk_size = Size(self._volume.get("raid_chunk_size", MD_CHUNK_SIZE))
metadata_version = self._volume.get("raid_metadata_version", "default")

# chunk size should be divisible by 4 KiB but mdadm ignores that. why?
if chunk_size % Size("4 KiB") != Size(0):
raise BlivetAnsibleError("chunk size must be multiple of 4 KiB")

if safe_mode:
raise BlivetAnsibleError("cannot create new RAID '%s' in safe mode" % safe_mode)

# begin creating the devices
members = self._create_raid_members(member_names)

if use_partitions:
try:
do_partitioning(self._blivet)
except Exception as e:
raise BlivetAnsibleError("failed to allocate partitions for mdraid '%s': %s" % (raid_name, str(e)))

try:
raid_array = self._blivet.new_mdarray(name=raid_name,
level=raid_level,
member_devices=active_count,
total_devices=members_count,
parents=members,
chunk_size=chunk_size,
metadata_version=metadata_version,
fmt=self._get_format())
except ValueError as e:
raise BlivetAnsibleError("cannot create RAID '%s': %s" % (raid_name, str(e)))

self._blivet.create_device(raid_array)

self._device = raid_array

def _destroy(self):
""" Schedule actions as needed to ensure the pool does not exist. """

if self._device is None:
return

ancestors = self._device.ancestors
self._blivet.devicetree.recursive_remove(self._device)
ancestors.remove(self._device)

leaves = [a for a in ancestors if a.isleaf]
while leaves:
for ancestor in leaves:
log.info("scheduling destruction of %s", ancestor.name)
if ancestor.is_disk:
self._blivet.devicetree.recursive_remove(ancestor)
else:
self._blivet.destroy_device(ancestor)

ancestors.remove(ancestor)

leaves = [a for a in ancestors if a.isleaf]


_BLIVET_VOLUME_TYPES = {
"disk": BlivetDiskVolume,
"lvm": BlivetLVMVolume,
"partition": BlivetPartitionVolume
"partition": BlivetPartitionVolume,
"raid": BlivetMDRaidVolume
}


Expand Down Expand Up @@ -989,12 +1110,12 @@ def action_dict(action):
action.format.teardown()

if scheduled:
## execute the scheduled actions, committing changes to disk
# execute the scheduled actions, committing changes to disk
callbacks.action_executed.add(record_action)
try:
b.devicetree.actions.process(devices=b.devicetree.devices, dry_run=module.check_mode)
except Exception:
module.fail_json(msg="Failed to commit changes to disk", **result)
except Exception as e:
module.fail_json(msg="Failed to commit changes to disk: %s" % str(e), **result)
finally:
result['changed'] = True
result['actions'] = [action_dict(a) for a in actions]
Expand All @@ -1011,6 +1132,7 @@ def action_dict(action):
# success - return result
module.exit_json(**result)


def main():
run_module()

Expand Down
1 change: 1 addition & 0 deletions tests/provision.fmf
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@ standard-inventory-qcow2:
drive:
- size: 10737418240
- size: 10737418240
- size: 10737418240
34 changes: 32 additions & 2 deletions tests/test-verify-volume-device.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,25 @@
---

#
# MDRaid devices paths are returned as a symlinks. But sometimes we need their targets.
#
- name: (1/3) Process device path (set initial value)
set_fact:
storage_test_device_path: "{{ storage_test_volume._raw_device }}"

# realpath fails when given empty string; the task then returns completely different dict.
- block:
- name: (2/3) Process device path (get device file info)
command: realpath "{{ storage_test_volume._raw_device }}"
register: storage_test_realpath

- name: (3/3) Process device path (replace device with its target if it is a symlink)
set_fact:
storage_test_device_path: "{{ storage_test_realpath.stdout }}"
when: storage_test_device_path not in storage_test_blkinfo.info

when: storage_test_volume._device != ""

# name/path
- name: See whether the device node is present
stat:
Expand All @@ -16,13 +37,22 @@

- name: Make sure we got info about this volume
assert:
that: "{{ storage_test_volume._raw_device in storage_test_blkinfo.info }}"
that: "{{ storage_test_device_path in storage_test_blkinfo.info }}"
msg: "Failed to gather info about volume '{{ storage_test_volume.name }}'"
when: _storage_test_volume_present

- name: (1/2) Process volume type (set initial value)
set_fact:
st_volume_type: "{{ storage_test_volume.type }}"

- name: (2/2) Process volume type (get RAID value)
set_fact:
st_volume_type: "{{ storage_test_volume.raid_level }}"
when: storage_test_volume.type == "raid"

- name: Verify the volume's device type
assert:
that: "{{ storage_test_blkinfo.info[storage_test_volume._raw_device].type == storage_test_volume.type }}"
that: "{{ storage_test_blkinfo.info[storage_test_device_path].type == st_volume_type }}"
when: _storage_test_volume_present

# disks
Expand Down
25 changes: 23 additions & 2 deletions tests/test-verify-volume-fs.yml
Original file line number Diff line number Diff line change
@@ -1,12 +1,33 @@
---

#
# MDRaid devices paths are returned as a symlinks. But sometimes we need their targets.
#
- name: (1/3) Process device path (set initial value)
set_fact:
storage_test_device_path: "{{ storage_test_volume._device }}"

# realpath fails when given empty string; the task then returns completely different dict.
- block:
- name: (2/3) Process device path (get device file info)
command: realpath "{{ storage_test_volume._device }}"
register: storage_test_realpath

- name: (3/3) Process device path (replace device with its target if it is a symlink)
set_fact:
storage_test_device_path: "{{ storage_test_realpath.stdout }}"
when: storage_test_device_path not in storage_test_blkinfo.info

when: storage_test_volume._device != ""

# type
- name: Verify fs type
assert:
that: "{{ storage_test_blkinfo.info[storage_test_volume._device].fstype == storage_test_volume.fs_type }}"
that: "{{ storage_test_blkinfo.info[storage_test_device_path].fstype == storage_test_volume.fs_type }}"
when: storage_test_volume.fs_type and _storage_test_volume_present

# label
- name: Verify fs label
assert:
that: "{{ storage_test_blkinfo.info[storage_test_volume._device].label == storage_test_volume.fs_label }}"
that: "{{ storage_test_blkinfo.info[storage_test_device_path].label == storage_test_volume.fs_label }}"
when: _storage_test_volume_present
43 changes: 43 additions & 0 deletions tests/test-verify-volume-md.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
---

# Only when volume is raid
- name: check RAID options
block:

- name: get information about RAID
command: "mdadm --detail {{ storage_test_volume._raw_device }}"
register: storage_test_mdadm
changed_when: false

# pre-chew regex search patterns
- set_fact:
storage_test_md_active_devices_re: "{{('Active Devices : ' ~ storage_test_volume.raid_device_count ~ '\n')|regex_escape()}}"
when: storage_test_volume.raid_device_count is defined

- set_fact:
storage_test_md_spare_devices_re: "{{('Spare Devices : ' ~ storage_test_volume.raid_spare_count ~ '\n')|regex_escape()}}"
when: storage_test_volume.raid_spare_count is defined

- set_fact:
storage_test_md_metadata_version_re: "{{('Version : ' ~ storage_test_volume.raid_metadata_version ~ '\n')|regex_escape()}}"
when: storage_test_volume.raid_metadata_version is defined

- name: check RAID active devices count
assert:
that: "storage_test_mdadm.stdout is regex(storage_test_md_active_devices_re)"
msg: "Expected {{ storage_test_volume.raid_device_count }} active RAID devices."
when: storage_test_volume.raid_device_count is not none

- name: check RAID spare devices count
assert:
that: "storage_test_mdadm.stdout is regex(storage_test_md_spare_devices_re)"
msg: "Expected {{ storage_test_volume.raid_spare_count }} spare RAID devices."
when: storage_test_volume.raid_spare_count is not none

- name: check RAID metadata version
assert:
that: "storage_test_mdadm.stdout is regex(storage_test_md_metadata_version_re)"
msg: "Expected {{ storage_test_volume.raid_metadata_version }} RAID metadata version."
when: storage_test_volume.raid_metadata_version is not none

when: storage_test_volume.type == 'raid' and storage_test_volume._device != ""
23 changes: 22 additions & 1 deletion tests/test-verify-volume-mount.yml
Original file line number Diff line number Diff line change
@@ -1,9 +1,30 @@
---

#
# MDRaid devices paths are returned as a symlinks. But sometimes we need their targets.
#
- name: (1/3) Process device path (set initial value)
set_fact:
storage_test_device_path: "{{ storage_test_volume._device }}"

# realpath fails when given empty string; the task then returns completely different dict.
- block:
- name: (2/3) Process device path (get device file info)
command: realpath "{{ storage_test_volume._device }}"
register: storage_test_realpath

- name: (3/3) Process device path (replace device with its target if it is a symlink)
set_fact:
storage_test_device_path: "{{ storage_test_realpath.stdout }}"
when: storage_test_device_path not in storage_test_blkinfo.info

when: storage_test_volume._device != ""

- name: Set some facts
set_fact:
# json_query(...) used instead of "|selectattr('device', 'equalto', storage_test_volume._device)|list"
# as that expression wouldn't work with Jinja versions <2.8
storage_test_mount_device_matches: "{{ ansible_mounts|json_query('[?device==`\"{}\"`]'.format(storage_test_volume._device))}}"
storage_test_mount_device_matches: "{{ ansible_mounts|json_query('[?device==`\"{}\"`]'.format(storage_test_device_path))}}"
storage_test_mount_point_matches: "{{ ansible_mounts|json_query('[?mount==`\"{}\"`]'.format(storage_test_volume.mount_point))}}"
storage_test_mount_expected_match_count: "{{ 1 if _storage_test_volume_present and storage_test_volume.mount_point else 0 }}"
storage_test_swap_expected_matches: "{{ 1 if _storage_test_volume_present and storage_test_volume.fs_type == 'swap' else 0 }}"
Expand Down
Loading

0 comments on commit 576f710

Please sign in to comment.