Exemple #1
0
def delete(resource_name, uri_list):
    """
    Deletes a resource with all it's snapshots

    :param str resource_name: name of the resource
    :param str uri_list: linstor uris string
    :return: True
    """
    with MultiLinstor(MultiLinstor.controller_uri_list(uri_list)) as lin:
        snapshots = lin.snapshot_dfn_list()[0]
        for snap in [
                x for x in snapshots.snapshots if x.rsc_name == resource_name
        ]:
            util.log_info("Deleting snapshot '{r}/{s}'".format(
                r=resource_name, s=snap.snapshot_name))
            lin.snapshot_delete(rsc_name=resource_name,
                                snapshot_name=snap.snapshot_name)

        # there is a regression in python-linstor 0.9.5, were it isn't possible to try to delete
        # non existing resources (exception with None value)
        # so deleting it with the low level api still works, also opennebula doesn't need the external name feature
        util.log_info("Deleting resource '{r}'".format(r=resource_name))
        rs = lin.resource_dfn_delete(name=resource_name)
        if not rs[0].is_success():
            raise LinstorError('Could not delete resource {}: {}'.format(
                resource_name, rs[0]))
    return True
Exemple #2
0
def deploy(linstor_controllers,
           resource_name,
           vlm_size_str,
           resource_group=None,
           prefer_node=None):
    """
    Deploys resource depending on resource_group, deployment nodes or auto_place setting.

    :param str linstor_controllers:
    :param str resource_name: Name of the new resource definition
    :param str vlm_size_str: volume size string
    :param str resource_group: Name of the resource group to use
    :param Optional[str] prefer_node: Tries to place a diskful on this node(if autoplace)
    :return: Resource object of the new deployment
    :rtype: Resource
    """
    util.log_info(
        "Deploying resource '{}' using resource group '{}', prefer node: {n}".
        format(resource_name, resource_group, n=prefer_node))
    resource = Resource.from_resource_group(linstor_controllers,
                                            resource_group,
                                            resource_name, [vlm_size_str],
                                            definitions_only=bool(prefer_node))
    if prefer_node:
        resource.placement.redundancy = None  # force resource group values, default would be 2
        resource.diskful(prefer_node)
        resource.autoplace()
    return resource
Exemple #3
0
def wait_resource_ready(resource, timeout=1200):
    """
    waits for the drbd resource to be in sync

    :param Resource resource: Resource object to check
    :param int timeout: timeout in seconds
    :return: return code from command
    """
    with MultiLinstor(resource.client.uri_list) as lin:
        util.log_info(
            "Waiting for resource '{r}' to be ready.".format(r=resource.name))
        lin.resource_dfn_wait_synced(resource.name, timeout=timeout)
        util.log_info("Resource '{r}' ready.".format(r=resource.name))
        return True
Exemple #4
0
    def _run_command(self, command):
        client_opts = ["linstor", "--no-color"]
        if self._controllers:
            client_opts += ["--controllers", self._controllers]
        final = client_opts + command

        util.log_info("running linstor {}".format(" ".join(command)))

        try:
            return subprocess.check_output(
                " ".join(final), shell=True, stderr=subprocess.STDOUT
            )
        except subprocess.CalledProcessError as cpe:
            util.error_message(cpe.output)
            raise
Exemple #5
0
def resize_disk(resource, target_vm, disk_id, new_size):
    """

    :param Resource resource:
    :param vm.Vm target_vm:
    :param str disk_id:
    :param int new_size: new size in mega bytes
    :return:
    """
    util.log_info("Resizing resource {r} new size: {s}MiB".format(
        r=resource.name, s=new_size))
    resource.volumes[0].size = SizeCalc.convert(new_size, SizeCalc.UNIT_MiB,
                                                SizeCalc.UNIT_B)

    resize_if_qcow2(resource, target_vm, disk_id, new_size)
Exemple #6
0
    def delete(self):
        # Resource definitions cannot be removed if they contain snapshots.
        for snap in self.snapshots():
            self.snap_delete(snap)

        # Looks like deleting snapshots is actually async, poll to give it
        # time to clear.
        for _ in range(10):
            snaps = self.snapshots()
            if snaps != []:
                util.log_info("snapshots still remainting on {}: {}".format(
                    self.name, snaps))
                time.sleep(1)
            else:
                break
        else:
            raise RuntimeError(
                "Failed to remove snapshots from image after 10 tries. Unable to delete"
            )

        self._run_command(["resource-definition", "delete", self.name])
Exemple #7
0
def delete(resource):
    """
    Deletes a resource with all it's snapshots

    :param Resource resource:
    :return: True
    """
    with MultiLinstor(resource.client.uri_list) as lin:
        snapshots = lin.snapshot_dfn_list()[0]
        for snap in [
                x for x in snapshots.proto_msg.snapshot_dfns
                if x.rsc_name == resource.name
        ]:
            util.log_info("Deleting snapshot '{r}/{s}'".format(
                r=resource.name, s=snap.snapshot_name))
            lin.snapshot_delete(rsc_name=resource.name,
                                snapshot_name=snap.snapshot_name)

    util.log_info("Deleting resource '{r}'".format(r=resource.name))
    resource.delete()
    return True
Exemple #8
0
def get_rsc_name(target_vm, disk_id):
    """
    Tries to detect the correct resource name.

    :param Vm target_vm: Vm object
    :param int disk_id: Id of the disk vm
    :return: The linstor resource name
    :rtype: str
    """
    res_name = None
    disk_source = target_vm.disk_source(disk_id)
    if not disk_source:  # volatile
        res_name = consts.VOLATILE_PREFIX + "-vm{}-disk{}".format(target_vm.ID, disk_id)
    else:
        res_name = target_vm.disk_source(disk_id)

        if not target_vm.disk_persistent(disk_id):
            if target_vm.disk_type(disk_id) == "CDROM":
                util.log_info("{} is a non-persistent CDROM image".format(res_name))
            else:
                res_name = "{}-vm{}-disk{}".format(res_name, target_vm.ID, disk_id)
                util.log_info(
                    "{} is a non-persistent OS or DATABLOCK image".format(res_name)
                )
        else:
            util.log_info("{} is a persistent OS or DATABLOCK image".format(res_name))

    return res_name
Exemple #9
0
def deploy(linstor_controllers,
           resource_name,
           storage_pool,
           vlm_size_str,
           deployment_nodes,
           auto_place_count,
           resource_group=None):
    """
    Deploys resource depending on resource_group, deployment nodes or auto_place setting.

    :param str linstor_controllers:
    :param str resource_name: Name of the new resource definition
    :param str storage_pool: Name of the storage pool to use
    :param str vlm_size_str: volume size string
    :param list[str] deployment_nodes: list of node names
    :param int auto_place_count:
    :param Optional[str] resource_group: Name of the resource group to use
    :return: Resource object of the new deployment
    :rtype: Resource
    """
    if resource_group:
        util.log_info(
            "Deploying resource '{}' using resource group '{}'".format(
                resource_name, resource_group))
        resource = Resource.from_resource_group(linstor_controllers,
                                                resource_group, resource_name,
                                                [vlm_size_str])
    else:
        resource = Resource(resource_name, linstor_controllers)
        resource.placement.storage_pool = storage_pool
        resource.volumes[0] = Volume(vlm_size_str)
        if deployment_nodes:
            util.log_info(
                "Deploying resource '{}' using deployment_nodes '{}'".format(
                    resource_name, deployment_nodes))
            for node in deployment_nodes:
                resource.diskful(node)
        elif auto_place_count:
            util.log_info(
                "Deploying resource '{}' using auto-place-count {}".format(
                    resource_name, auto_place_count))
            resource.placement.redundancy = auto_place_count
            resource.autoplace()
        else:
            raise RuntimeError(
                "No deploy mode selected. nodes: {n}, resource_group: {a}".
                format(n=deployment_nodes, a=resource_group))
    return resource
Exemple #10
0
def clone(resource, clone_name, place_nodes, auto_place_count, mode=CloneMode.SNAPSHOT):
    """
    Clones a resource to a new resource.

    :param Resource resource: resource object to clone
    :param str clone_name: name of the new resource
    :param list[str] place_nodes: deployment nodes string, e.g. "alpha bravo charly"
    :param int auto_place_count:
    :param int mode:
    :return: True if clone was successful
    :rtype: bool
    """
    return_code = 0
    util.log_info("Cloning from resource '{src}' to '{tgt}' clone mode {m}.".format(
        src=resource.name, tgt=clone_name, m=CloneMode.to_str(mode))
    )
    if mode == CloneMode.SNAPSHOT:
        snap_name = "for-" + clone_name
        try:
            resource.snapshot_create(snap_name)
            resource.restore_from_snapshot(snap_name, clone_name)
            time.sleep(1)  # wait a second for deletion, here is a potential race condition
        finally:
            # always try to get rid of the temporary snapshot
            try:
                resource.snapshot_delete(snap_name)
            except LinstorError as le:
                #  the snapshot delete will always fail for zfs storage pools (parent-child relation)
                util.log_info("Snapshot '{s}' delete failed: {ex}".format(s=snap_name, ex=le))
    elif mode == CloneMode.COPY:
        clone_res = Resource(
            name=clone_name,
            uri=",".join(resource.client.uri_list)
        )
        clone_res.placement.storage_pool = resource.volumes[0].storage_pool_name
        clone_res.volumes[0] = Volume(str(resource.volumes[0].size))
        deploy(clone_res, place_nodes, auto_place_count)

        # use copy source on the current primary node or on one with a disk, if all secondary
        copy_node = get_in_use_node(resource)
        if copy_node is None:
            nodes = resource.diskful_nodes()
            copy_node = nodes[0]
        clone_res.activate(copy_node)

        from_dev_path = get_device_path(resource)
        to_dev_path = get_device_path(clone_res)

        block_count = int(resource.volumes[0].size) // 1024 / 64 + 1

        conv_opts = ["sync"]
        if clone_res.is_thin():
            conv_opts.append("sparse")

        dd_cmd = '"dd if={_if} of={_of} bs=64K count={c} conv={conv}"'.format(
            _if=from_dev_path,
            _of=to_dev_path,
            c=block_count,
            conv=",".join(conv_opts)
        )
        # dd on the node
        return_code = util.ssh_exec_and_log(
            " ".join([
                '"{}"'.format(copy_node),
                dd_cmd,
                '"error copying image data from {_if} to {_of}"'.format(_if=from_dev_path, _of=to_dev_path)
            ])
        )

        time.sleep(0.5)  # wait a bit until we are sure dd closed the block device

        clone_res.deactivate(copy_node)

    return return_code == 0
Exemple #11
0
def clone(resource,
          clone_name,
          resource_group=None,
          prefer_node=None,
          new_size=None,
          allow_dependent_clone=False):
    """
    Clones a resource to a new resource.

    :param Resource resource: resource object to clone
    :param str clone_name: name of the new resource
    :param str resource_group: resource group to use
    :param Optional[str] prefer_node: try to place resource on this node
    :param Optional[int] new_size: new volume size, None to keep original size
    :param bool allow_dependent_clone: allow the clone to depend on source resource
    :return: Tuple, first item if success, second if linstor clone was used
    :rtype: Tuple[bool, bool]
    """
    return_code = 0
    util.log_info("Cloning from resource '{src}' to '{tgt}'.".format(
        src=resource.name, tgt=clone_name))

    use_linstor_clone = True

    if resource.resource_group_name and resource.resource_group_name != resource_group:
        # maybe check if resource group is using same storage pools
        util.log_info(
            "Deployment storage pool '{dp}' in different storage pool '{sp}', fall back to clone mode COPY"
            .format(dp=resource.placement.storage_pool,
                    sp=resource.volumes[0].storage_pool_name))
        use_linstor_clone = False

    linstor_controllers = ",".join(resource.client.uri_list)

    if use_linstor_clone:
        clone_res = resource.clone(clone_name,
                                   use_zfs_clone=allow_dependent_clone)
        if prefer_node:
            clone_res.diskful(prefer_node)
    else:
        vol_size_str = str(new_size) + "MiB" if new_size else str(
            resource.volumes[0].size) + "b"
        clone_res = deploy(linstor_controllers=linstor_controllers,
                           resource_name=clone_name,
                           vlm_size_str=vol_size_str,
                           resource_group=resource_group,
                           prefer_node=prefer_node)

        # use copy source on the current primary node or on one with a disk, if all secondary
        copy_node = get_in_use_node(resource)
        if copy_node is None:
            if prefer_node in resource.diskful_nodes():
                copy_node = prefer_node
            else:
                copy_node = resource.diskful_nodes()[0]
        clone_res.activate(copy_node)

        from_dev_path = get_device_path(resource)
        to_dev_path = get_device_path(clone_res)

        block_size_kb = 64

        block_count = resource.volumes[
            0].size / 1024.0 / block_size_kb  # float division
        block_count_int = int(block_count) if block_count.is_integer() else (
            block_count + 1)

        conv_opts = ["fsync"]
        if clone_res.is_thin():
            conv_opts.append("sparse")

        dd_cmd = 'dd if={_if} of={_of} bs={bs}K count={c} conv={conv}'.format(
            _if=from_dev_path,
            _of=to_dev_path,
            bs=block_size_kb,
            c=block_count_int,
            conv=",".join(conv_opts))
        # dd on the node
        return_code = util.ssh_exec_and_log(
            host=copy_node,
            cmd=dd_cmd,
            error_msg='error copying image data from {_if} to {_of}'.format(
                _if=from_dev_path, _of=to_dev_path))

        time.sleep(
            0.5)  # wait a bit until we are sure dd closed the block device

        clone_res.deactivate(copy_node)

    return return_code == 0, use_linstor_clone
Exemple #12
0
def clone(resource,
          clone_name,
          place_nodes,
          auto_place_count,
          mode=CloneMode.SNAPSHOT):
    """

    :param Resource resource:
    :param str clone_name:
    :param place_nodes:
    :param int auto_place_count:
    :param int mode:
    :return:
    """
    return_code = 0
    util.log_info("Cloning from resource '{src}' to '{tgt}'.".format(
        src=resource.name, tgt=clone_name))
    if mode == CloneMode.SNAPSHOT:
        snap_name = "for-" + clone_name
        resource.snapshot_create(snap_name)
        resource.restore_from_snapshot(snap_name, clone_name)
        time.sleep(
            1
        )  # wait a second for deletion, here is a potential race condition
        resource.snapshot_delete(snap_name)
    elif mode == CloneMode.COPY:
        clone_res = Resource(name=clone_name,
                             uri=",".join(resource.client.uri_list))
        clone_res.placement.storage_pool = resource.volumes[
            0].storage_pool_name
        clone_res.volumes[0] = Volume(str(resource.volumes[0].size))
        deploy(clone_res, place_nodes, auto_place_count)
        nodes = resource.diskful_nodes()
        copy_node = nodes[0]
        clone_res.activate(copy_node)

        from_dev_path = resource.volumes[0].device_path if resource.volumes[0].device_path \
            else "/dev/drbd{minor}".format(minor=resource.volumes[0].minor)
        to_dev_path = clone_res.volumes[0].device_path if clone_res.volumes[0].device_path \
            else "/dev/drbd{minor}".format(minor=clone_res.volumes[0].minor)

        block_count = int(resource.volumes[0].size) // 1024 / 64 + 1

        conv_opts = ["sync"]
        if clone_res.is_thin():
            conv_opts.append("sparse")

        dd_cmd = '"dd if={_if} of={_of} bs=64K count={c} conv={conv}"'.format(
            _if=from_dev_path,
            _of=to_dev_path,
            c=block_count,
            conv=",".join(conv_opts))
        # dd on the node
        return_code = util.ssh_exec_and_log(" ".join([
            '"{}"'.format(copy_node), dd_cmd,
            '"error copying image data from {_if} to {_of}"'.format(
                _if=from_dev_path, _of=to_dev_path)
        ]))

        clone_res.deactivate(copy_node)

    return return_code == 0