diff --git a/README.md b/README.md index 859c3a79..9d9adcc1 100644 --- a/README.md +++ b/README.md @@ -39,6 +39,27 @@ Accepted values are: `linear`, `striped`, `raid0`, `raid1`, `raid4`, `raid5`, `r This is a list of volumes that belong to the current pool. It follows the same pattern as the `storage_volumes` variable, explained below. +##### `encryption` +This specifies whether or not the pool will be encrypted using LUKS. +__WARNING__: Toggling encryption for a pool is a destructive operation, meaning + the pool itself will be removed as part of the process of + adding/removing the encryption layer. + +##### `encryption_passphrase` +This string specifies a passphrase used to unlock/open the LUKS volume(s). + +##### `encryption_key_file` +This string specifies the full path to the key file used to unlock the LUKS volume(s). + +##### `encryption_cipher` +This string specifies a non-default cipher to be used by LUKS. + +##### `encryption_key_size` +This integer specifies the LUKS key size (in bytes). + +##### `encryption_luks_version` +This integer specifies the LUKS version to use. + #### `storage_volumes` The `storage_volumes` variable is a list of volumes to manage. Each volume has the following diff --git a/defaults/main.yml b/defaults/main.yml index 5fe229fb..79b630e2 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -9,6 +9,15 @@ storage_pool_defaults: state: "present" type: lvm + encryption: false + encryption_passphrase: null + encryption_key_file: null + encryption_cipher: null + encryption_key_size: null + encryption_luks_version: null + + raid_level: null + storage_volume_defaults: state: "present" type: lvm diff --git a/library/blivet.py b/library/blivet.py index 12d0a3e5..22ded133 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -148,15 +148,72 @@ class BlivetAnsibleError(Exception): pass -class BlivetVolume(object): +class BlivetBase(object): blivet_device_class = None + _type = None - def __init__(self, blivet_obj, volume, bpool=None): + def __init__(self, blivet_obj, spec_dict): self._blivet = blivet_obj - self._volume = volume - self._blivet_pool = bpool + self._spec_dict = spec_dict self._device = None + def _manage_one_encryption(self, device): + ret = device + # Make sure to handle adjusting both existing stacks and future stacks. + if device == device.raw_device and self._spec_dict['encryption']: + # add luks + luks_name = "luks-%s" % device._name + if not device.format.exists: + fmt = device.format + else: + fmt = get_format(None) + + self._blivet.format_device(device, + get_format("luks", + name=luks_name, + cipher=self._spec_dict.get('encryption_cipher'), + key_size=self._spec_dict.get('encryption_key_size'), + luks_version=self._spec_dict.get('encryption_luks_version'), + passphrase=self._spec_dict.get('encryption_passphrase') or None, + key_file=self._spec_dict.get('encryption_key_file') or None)) + + if not device.format.has_key: + raise BlivetAnsibleError("encrypted %s '%s' missing key/passphrase" % (self._type, self._spec_dict['name'])) + + luks_device = devices.LUKSDevice(luks_name, + fmt=fmt, + parents=[device]) + self._blivet.create_device(luks_device) + ret = luks_device + elif device != device.raw_device and not self._spec_dict['encryption']: + # remove luks + if not device.format.exists: + fmt = device.format + else: + fmt = get_format(None) + + ret = self._device.raw_device + self._blivet.destroy_device(device) + if fmt.type is not None: + self._blivet.format_device(ret, fmt) + + # XXX: blivet has to store cipher, key_size, luks_version for existing before we + # can support re-encrypting based on changes to those parameters + + return ret + + +class BlivetVolume(BlivetBase): + _type = "volume" + + def __init__(self, blivet_obj, volume, bpool=None): + super(BlivetVolume, self).__init__(blivet_obj, volume) + self._blivet_pool = bpool + + @property + def _volume(self): + return self._spec_dict + @property def required_packages(self): packages = list() @@ -243,46 +300,7 @@ def _destroy(self): self._blivet.devicetree.recursive_remove(self._device.raw_device) def _manage_encryption(self): - # Make sure to handle adjusting both existing stacks and future stacks. - if self._device == self._device.raw_device and self._volume['encryption']: - # add luks - luks_name = "luks-%s" % self._device._name - if not self._device.format.exists: - fmt = self._device.format - else: - fmt = get_format(None) - - self._blivet.format_device(self._device, - get_format("luks", - name=luks_name, - cipher=self._volume.get('encryption_cipher'), - key_size=self._volume.get('encryption_key_size'), - luks_version=self._volume.get('encryption_luks_version'), - passphrase=self._volume.get('encryption_passphrase') or None, - key_file=self._volume.get('encryption_key_file') or None)) - - if not self._device.format.has_key: - raise BlivetAnsibleError("encrypted volume '%s' missing key/passphrase" % self._volume['name']) - - luks_device = devices.LUKSDevice(luks_name, - fmt=fmt, - parents=[self._device]) - self._blivet.create_device(luks_device) - self._device = luks_device - elif self._device != self._device.raw_device and not self._volume['encryption']: - # remove luks - if not self._device.format.exists: - fmt = self._device.format - else: - fmt = get_format(None) - - self._device = self._device.raw_device - self._blivet.destroy_device(self._device.children[0]) - if fmt.type is not None: - self._blivet.format_device(self._device, fmt) - - # XXX: blivet has to store cipher, key_size, luks_version for existing before we - # can support re-encrypting based on changes to those parameters + self._device = self._manage_one_encryption(self._device) def _resize(self): """ Schedule actions as needed to ensure the device has the desired size. """ @@ -462,16 +480,18 @@ def _get_blivet_volume(blivet_obj, volume, bpool=None): return _BLIVET_VOLUME_TYPES[volume_type](blivet_obj, volume, bpool=bpool) -class BlivetPool(object): - blivet_device_class = None +class BlivetPool(BlivetBase): + _type = "pool" def __init__(self, blivet_obj, pool): - self._blivet = blivet_obj - self._pool = pool - self._device = None + super(BlivetPool, self).__init__(blivet_obj, pool) self._disks = list() self._blivet_volumes = list() + @property + def _pool(self): + return self._spec_dict + @property def required_packages(self): packages = list() @@ -489,6 +509,9 @@ def ultimately_present(self): def _is_raid(self): return self._pool.get('raid_level') not in [None, "null", ""] + def _member_management_is_destructive(self): + return False + def _create(self): """ Schedule actions as needed to ensure the pool exists. """ pass @@ -515,6 +538,8 @@ def _destroy(self): leaves = [a for a in ancestors if a.isleaf] + self._device = None + def _type_check(self): # pylint: disable=no-self-use return True @@ -624,10 +649,12 @@ def manage(self): self._look_up_device() # schedule destroy if appropriate, including member type change - if not self.ultimately_present: # TODO: member type changes - self._manage_volumes() + if not self.ultimately_present or self._member_management_is_destructive(): + if not self.ultimately_present: + self._manage_volumes() self._destroy() - return + if not self.ultimately_present: + return # schedule create if appropriate self._create() @@ -660,6 +687,17 @@ class BlivetLVMPool(BlivetPool): def _type_check(self): return self._device.type == "lvmvg" + def _member_management_is_destructive(self): + if self._device is None: + return False + + if self._pool['encryption'] and not all(m.encrypted for m in self._device.parents): + return True + elif not self._pool['encryption'] and any(m.encrypted for m in self._device.parents): + return True + + return False + def _get_format(self): fmt = get_format("lvmpv") if not fmt.supported or not fmt.formattable: @@ -667,12 +705,18 @@ def _get_format(self): return fmt + def _manage_encryption(self, members): + managed_members = list() + for member in members: + managed_members.append(self._manage_one_encryption(member)) + + return managed_members + def _create(self): if self._device: return - members = self._create_members() - + members = self._manage_encryption(self._create_members()) try: pool_device = self._blivet.new_vg(name=self._pool['name'], parents=members) except Exception as e: diff --git a/library/blockdev_info.py b/library/blockdev_info.py index 87916a8f..7d8bc41c 100644 --- a/library/blockdev_info.py +++ b/library/blockdev_info.py @@ -31,12 +31,32 @@ type: dict ''' +import os import shlex from ansible.module_utils.basic import AnsibleModule LSBLK_DEVICE_TYPES = {"part": "partition"} +DEV_MD_DIR = '/dev/md' + + +def fixup_md_path(path): + if not path.startswith("/dev/md"): + return path + + if not os.path.exists(DEV_MD_DIR): + return path + + ret = path + for md in os.listdir(DEV_MD_DIR): + md_path = "%s/%s" % (DEV_MD_DIR, md) + if os.path.realpath(md_path) == os.path.realpath(path): + ret = md_path + break + + return ret + def get_block_info(run_cmd): buf = run_cmd(["lsblk", "-o", "NAME,FSTYPE,LABEL,UUID,TYPE", "-p", "-P", "-a"])[1] @@ -50,6 +70,9 @@ def get_block_info(run_cmd): print(pair) raise if key: + if key.lower() == "name": + value = fixup_md_path(value) + dev[key.lower()] = LSBLK_DEVICE_TYPES.get(value, value) if dev: info[dev['name']] = dev diff --git a/tests/test-verify-pool-members.yml b/tests/test-verify-pool-members.yml new file mode 100644 index 00000000..791f966c --- /dev/null +++ b/tests/test-verify-pool-members.yml @@ -0,0 +1,57 @@ +- set_fact: + _storage_test_pool_pvs_lvm: "{{ ansible_lvm.pvs|dict2items(key_name='path', value_name='info')|json_query('[?info.vg==`\"{}\"`].path'.format(storage_test_pool.name))|list }}" + _storage_test_pool_pvs: [] + _storage_test_expected_pv_count: "{{ 0 if storage_test_pool.state == 'absent' else (storage_test_pool.raid_level | ternary(1, storage_test_pool.disks|length)) }}" + when: storage_test_pool.type == 'lvm' + +- name: Get the canonical device path for each member device + resolve_blockdev: + spec: "{{ pv }}" + loop: "{{ _storage_test_pool_pvs_lvm }}" + loop_control: + loop_var: pv + register: pv_paths + when: storage_test_pool.type == 'lvm' + +- set_fact: + _storage_test_pool_pvs: "{{ _storage_test_pool_pvs }} + [ '{{ pv_paths.results[idx].device }}' ]" + loop: "{{ _storage_test_pool_pvs_lvm }}" + loop_control: + index_var: idx + when: storage_test_pool.type == 'lvm' + +- name: Verify PV count + assert: + that: "{{ ansible_lvm.pvs|dict2items|json_query('[?value.vg==`\"{}\"`]'.format(storage_test_pool.name))|length == _storage_test_expected_pv_count|int }}" + msg: "Unexpected PV count for pool {{ storage_test_pool.name }}" + when: storage_test_pool.type == 'lvm' + +- set_fact: + _storage_test_expected_pv_type: "{{ 'crypt' if storage_test_pool.encryption else 'disk' }}" + when: storage_test_pool.type == 'lvm' + +- set_fact: + _storage_test_expected_pv_type: "{{ 'partition' if storage_use_partitions|default(false) else 'disk' }}" + when: storage_test_pool.type == 'lvm' and not storage_test_pool.encryption + +- set_fact: + _storage_test_expected_pv_type: "{{ storage_test_pool.raid_level }}" + when: storage_test_pool.type == 'lvm' and storage_test_pool.raid_level + +- name: Check the type of each PV + assert: + that: "{{ storage_test_blkinfo.info[pv]['type'] == _storage_test_expected_pv_type }}" + msg: "Incorrect type for PV {{ pv }} in pool {{ storage_test_pool.name }}" + loop: "{{ _storage_test_pool_pvs }}" + loop_control: + loop_var: pv + when: storage_test_pool.type == 'lvm' + +- name: Check member encryption + include_tasks: verify-pool-members-encryption.yml + +- set_fact: + _storage_test_expected_pv_type: null + _storage_test_expected_pv_count: null + _storage_test_pool_pvs_lvm: [] + _storage_test_pool_pvs: [] diff --git a/tests/test-verify-pool.yml b/tests/test-verify-pool.yml index f37089ec..e8b40b6d 100644 --- a/tests/test-verify-pool.yml +++ b/tests/test-verify-pool.yml @@ -3,20 +3,21 @@ # Verify the pool configuration. # - set_fact: - _storage_pool_tests: ['name', 'type', 'size', 'members'] # members: disks, types + _storage_pool_tests: ['members'] # future: + # name + # type + # size # members: - # encryption + # disks # raid # compression # deduplication -# -# Verify the pool's volumes are configured correctly. -# -- name: Verify the volumes in this pool were correctly managed - include_tasks: "test-verify-volume.yml" - loop: "{{ storage_test_pool.volumes }}" +- name: + include_tasks: "test-verify-pool-{{ storage_test_pool_subset }}.yml" + loop: "{{ _storage_pool_tests }}" loop_control: - loop_var: storage_test_volume - when: storage_test_pool is defined and storage_test_pool.volumes | length > 0 + loop_var: storage_test_pool_subset + + diff --git a/tests/tests_luks_pool.yml b/tests/tests_luks_pool.yml new file mode 100644 index 00000000..e5d57268 --- /dev/null +++ b/tests/tests_luks_pool.yml @@ -0,0 +1,135 @@ +--- +- hosts: all + become: true + vars: + storage_safe_mode: false + mount_location: '/opt/test1' + mount_location_2: '/opt/test2' + volume_size: '5g' + + tasks: + - include_role: + name: storage + + - include_tasks: get_unused_disk.yml + vars: + min_size: "{{ volume_size }}" + max_return: 1 + + ## + ## LVM Pool + ## + + - name: Test for correct handling of new encrypted pool w/ no key + block: + - name: Create an encrypted lvm pool + include_role: + name: storage + vars: + storage_pools: + - name: foo + type: lvm + disks: "{{ unused_disks }}" + encryption: true + volumes: + - name: test1 + mount_point: "{{ mount_location }}" + size: 4g + + - name: unreachable task + fail: + msg: UNREACH + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_result.msg != 'UNREACH' + msg: "Role has not failed when it should have" + + - name: Verify the output of the keyless luks test + assert: + that: "blivet_output.failed and + blivet_output.msg|regex_search('encrypted pool.*missing key') and + not blivet_output.changed" + msg: "Unexpected behavior w/ encrypted pool w/ no key" + + - name: Create an encrypted lvm volume w/ default fs + include_role: + name: storage + vars: + storage_pools: + - name: foo + type: lvm + disks: "{{ unused_disks }}" + encryption: true + encryption_passphrase: 'yabbadabbadoo' + volumes: + - name: test1 + mount_point: "{{ mount_location }}" + size: 4g + + - include_tasks: verify-role-results.yml + + - name: Remove the encryption layer + include_role: + name: storage + vars: + storage_pools: + - name: foo + type: lvm + disks: "{{ unused_disks }}" + encryption: false + encryption_passphrase: 'yabbadabbadoo' + volumes: + - name: test1 + mount_point: "{{ mount_location }}" + size: 4g + + - include_tasks: verify-role-results.yml + + - name: Add encryption to the volume + include_role: + name: storage + vars: + storage_pools: + - name: foo + type: lvm + disks: "{{ unused_disks }}" + encryption: true + encryption_passphrase: 'yabbadabbadoo' + volumes: + - name: test1 + mount_point: "{{ mount_location }}" + size: 4g + + - include_tasks: verify-role-results.yml + + - name: Change the mountpoint, leaving encryption in place + include_role: + name: storage + vars: + storage_pools: + - name: foo + type: lvm + disks: "{{ unused_disks }}" + encryption: true + encryption_passphrase: 'yabbadabbadoo' + volumes: + - name: test1 + mount_point: "{{ mount_location_2 }}" + size: 4g + + - include_tasks: verify-role-results.yml + + - name: Clean up + include_role: + name: storage + vars: + storage_volumes: + - name: foo + type: disk + disks: "{{ unused_disks }}" + state: absent + + - include_tasks: verify-role-results.yml diff --git a/tests/verify-pool-member-crypttab.yml b/tests/verify-pool-member-crypttab.yml new file mode 100644 index 00000000..ac49348f --- /dev/null +++ b/tests/verify-pool-member-crypttab.yml @@ -0,0 +1,28 @@ +- set_fact: + _storage_test_crypttab_entries: "{{ storage_test_crypttab.stdout_lines|map('regex_search', '^' + _storage_test_pool_member_path|basename + ' .*$')|select('string')|list }}" + +- name: Check for /etc/crypttab entry + assert: + that: "{{ _storage_test_crypttab_entries|length == _storage_test_expected_crypttab_entries|int }}" + msg: "Incorrect number of crypttab entries found for pool {{ storage_test_pool.name }} member {{ _storage_test_pool_member_path|basename }}" + +- name: Validate the format of the crypttab entry + assert: + that: "{{ _storage_test_crypttab_entries[0].split()|length >= 3 }}" + msg: "Incorrectly formatted crypttab line for volume {{ storage_test_pool.name }} member {{ _storage_test_pool_member_path|basename }}" + when: _storage_test_expected_crypttab_entries|int == 1 + +- name: Check backing device of crypttab entry + assert: + that: "{{ _storage_test_crypttab_entries[0].split()[1] == storage_test_volume._raw_device }}" + msg: "Incorrect backing device in crypttab entry for volume {{ storage_test_volume.name }}" + when: false and _storage_test_expected_crypttab_entries|int == 1 + +- name: Check key file of crypttab entry + assert: + that: "{{ _storage_test_crypttab_entries[0].split()[2] == _storage_test_expected_crypttab_key_file }}" + msg: "Incorrect key file in crypttab entry for volume {{ storage_test_pool.name }} member {{ _storage_test_pool_member_path|basename }}" + when: _storage_test_expected_crypttab_entries|int == 1 + +- set_fact: + _storage_test_crypttab_entries: null diff --git a/tests/verify-pool-members-encryption.yml b/tests/verify-pool-members-encryption.yml new file mode 100644 index 00000000..8adde5ba --- /dev/null +++ b/tests/verify-pool-members-encryption.yml @@ -0,0 +1,17 @@ +# +# /etc/crypttab +# +- set_fact: + _storage_test_expected_crypttab_entries: "{{ (storage_test_pool.encryption and storage_test_pool.state == 'present')|ternary(1, 0) }}" + _storage_test_expected_crypttab_key_file: "{{ storage_test_pool.encryption_key_file or '-' }}" + +- name: Validate pool member crypttab entries + include_tasks: verify-pool-member-crypttab.yml + loop: "{{ _storage_test_pool_pvs }}" + loop_control: + loop_var: _storage_test_pool_member_path + when: storage_test_pool.type == 'lvm' + +- set_fact: + _storage_test_crypttab_entries: null + _storage_test_crypttab_key_file: null