コード例 #1
0
ファイル: utils.py プロジェクト: gitsridhar/ocs-ci
def cleanup_ceph_nodes(osp_cred, pattern=None, timeout=300):
    user = os.getlogin()
    name = pattern if pattern else "-{user}-".format(user=user)
    driver = get_openstack_driver(osp_cred)
    timeout = datetime.timedelta(seconds=timeout)
    with parallel() as p:
        for node in driver.list_nodes():
            if name in node.name:
                for ip in node.public_ips:
                    log.info("removing ip %s from node %s", ip, node.name)
                    driver.ex_detach_floating_ip_from_node(node, ip)
                starttime = datetime.datetime.now()
                log.info(
                    "Destroying node {node_name} with {timeout} timeout".format(
                        node_name=node.name, timeout=timeout
                    )
                )
                while True:
                    try:
                        p.spawn(node.destroy)
                        break
                    except AttributeError:
                        if datetime.datetime.now() - starttime > timeout:
                            raise RuntimeError(
                                "Failed to destroy node {node_name} with {timeout} timeout:\n{stack_trace}".format(
                                    node_name=node.name,
                                    timeout=timeout,
                                    stack_trace=traceback.format_exc(),
                                )
                            )
                        else:
                            sleep(1)
                sleep(5)
    with parallel() as p:
        for fips in driver.ex_list_floating_ips():
            if fips.node_id is None:
                log.info("Releasing ip %s", fips.ip_address)
                driver.ex_delete_floating_ip(fips)
    with parallel() as p:
        errors = {}
        for volume in driver.list_volumes():
            if volume.name is None:
                log.info("Volume has no name, skipping")
            elif name in volume.name:
                log.info("Removing volume %s", volume.name)
                sleep(10)
                try:
                    volume.destroy()
                except BaseHTTPError as e:
                    log.error(e, exc_info=True)
                    errors.update({volume.name: e.message})
        if errors:
            for vol, err in errors.items():
                log.error("Error destroying {vol}: {err}".format(vol=vol, err=err))
            raise RuntimeError(
                "Encountered errors during volume deletion. Volume names and messages have been logged."
            )
コード例 #2
0
ファイル: utils.py プロジェクト: MeridianExplorer/ocs-ci
def setup_cdn_repos(ceph_nodes, build=None):
    repos_13x = [
        "rhel-7-server-rhceph-1.3-mon-rpms",
        "rhel-7-server-rhceph-1.3-osd-rpms",
        "rhel-7-server-rhceph-1.3-calamari-rpms",
        "rhel-7-server-rhceph-1.3-installer-rpms",
        "rhel-7-server-rhceph-1.3-tools-rpms",
    ]

    repos_20 = [
        "rhel-7-server-rhceph-2-mon-rpms",
        "rhel-7-server-rhceph-2-osd-rpms",
        "rhel-7-server-rhceph-2-tools-rpms",
        "rhel-7-server-rhscon-2-agent-rpms",
        "rhel-7-server-rhscon-2-installer-rpms",
        "rhel-7-server-rhscon-2-main-rpms",
    ]

    repos_30 = [
        "rhel-7-server-rhceph-3-mon-rpms",
        "rhel-7-server-rhceph-3-osd-rpms",
        "rhel-7-server-rhceph-3-tools-rpms",
        "rhel-7-server-extras-rpms",
    ]

    repos = None
    if build.startswith("1"):
        repos = repos_13x
    elif build.startswith("2"):
        repos = repos_20
    elif build.startswith("3"):
        repos = repos_30
    with parallel() as p:
        for node in ceph_nodes:
            p.spawn(set_cdn_repo, node, repos)
コード例 #3
0
def create_ebs_volumes(
    worker_pattern,
    size=100,
    region_name=None,
):
    """
    Create volumes on workers

    Args:
        worker_pattern (string): Worker name pattern e.g.:
            cluster-55jx2-worker*
        size (int): Size in GB (default: 100)
        region_name (str): Region name (default: config.ENV_DATA['region'])
    """
    aws = AWS(region_name)
    region_name = region_name or config.ENV_DATA['region']
    worker_instances = aws.get_instances_by_name_pattern(worker_pattern)
    with parallel() as p:
        for worker in worker_instances:
            log.info(
                f"Creating and attaching {size} GB volume to {worker['name']}")
            p.spawn(
                aws.create_volume_and_attach,
                availability_zone=worker['avz'],
                instance_id=worker['id'],
                name=f"{worker['name']}_extra_volume",
                size=size,
            )
コード例 #4
0
ファイル: aws.py プロジェクト: gobindadas/ocs-ci
    def create_ebs_volumes(self, worker_pattern, size=100):
        """
        Add new ebs volumes to the workers

        Args:
            worker_pattern (str):  Worker name pattern e.g.:
                cluster-55jx2-worker*
            size (int): Size in GB (default: 100)
        """
        worker_instances = self.aws.get_instances_by_name_pattern(
            worker_pattern
        )
        with parallel() as p:
            for worker in worker_instances:
                logger.info(
                    f"Creating and attaching {size} GB "
                    f"volume to {worker['name']}"
                )
                p.spawn(
                    self.aws.create_volume_and_attach,
                    availability_zone=worker['avz'],
                    instance_id=worker['id'],
                    name=f"{worker['name']}_extra_volume",
                    size=size,
                )
コード例 #5
0
ファイル: utils.py プロジェクト: raghavendra-talur/ocs-ci
def create_ceph_nodes(cluster_conf, inventory, osp_cred, run_id, instances_name=None):
    osp_glbs = osp_cred.get('globals')
    os_cred = osp_glbs.get('openstack-credentials')
    params = dict()
    ceph_cluster = cluster_conf.get('ceph-cluster')
    if ceph_cluster.get('inventory'):
        inventory_path = os.path.abspath(ceph_cluster.get('inventory'))
        with open(inventory_path, 'r') as inventory_stream:
            inventory = yaml.safe_load(inventory_stream)
    params['cloud-data'] = inventory.get('instance').get('setup')
    params['username'] = os_cred['username']
    params['password'] = os_cred['password']
    params['auth-url'] = os_cred['auth-url']
    params['auth-version'] = os_cred['auth-version']
    params['tenant-name'] = os_cred['tenant-name']
    params['service-region'] = os_cred['service-region']
    params['keypair'] = os_cred.get('keypair', None)
    ceph_nodes = dict()
    if inventory.get('instance').get('create'):
        if ceph_cluster.get('image-name'):
            params['image-name'] = ceph_cluster.get('image-name')
        else:
            params['image-name'] = inventory.get('instance').get('create').get('image-name')
        params['cluster-name'] = ceph_cluster.get('name')
        params['vm-size'] = inventory.get('instance').get('create').get('vm-size')
        if params.get('root-login') is False:
            params['root-login'] = False
        else:
            params['root-login'] = True
        with parallel() as p:
            for node in range(1, 100):
                node = "node" + str(node)
                if not ceph_cluster.get(node):
                    break
                node_dict = ceph_cluster.get(node)
                node_params = params.copy()
                node_params['role'] = RolesContainer(node_dict.get('role'))
                role = node_params['role']
                user = os.getlogin()
                if instances_name:
                    node_params['node-name'] = "{}-{}-{}-{}-{}".format(
                        node_params.get('cluster-name', 'ceph'), instances_name, run_id, node, '+'.join(role))
                else:
                    node_params['node-name'] = "{}-{}-{}-{}-{}".format(
                        node_params.get('cluster-name', 'ceph'), user, run_id, node, '+'.join(role))
                if node_dict.get('no-of-volumes'):
                    node_params['no-of-volumes'] = node_dict.get('no-of-volumes')
                    node_params['size-of-disks'] = node_dict.get('disk-size')
                if node_dict.get('image-name'):
                    node_params['image-name'] = node_dict.get('image-name')
                if node_dict.get('cloud-data'):
                    node_params['cloud-data'] = node_dict.get('cloud-data')
                p.spawn(setup_vm_node, node, ceph_nodes, **node_params)
    log.info("Done creating nodes")
    return ceph_nodes
コード例 #6
0
ファイル: utils.py プロジェクト: MeridianExplorer/ocs-ci
def create_ceph_nodes(cluster_conf,
                      inventory,
                      osp_cred,
                      run_id,
                      instances_name=None):
    osp_glbs = osp_cred.get("globals")
    os_cred = osp_glbs.get("openstack-credentials")
    params = dict()
    ceph_cluster = cluster_conf.get("ceph-cluster")
    if ceph_cluster.get("inventory"):
        inventory_path = os.path.abspath(ceph_cluster.get("inventory"))
        with open(inventory_path, "r") as inventory_stream:
            inventory = yaml.safe_load(inventory_stream)
    params["cloud-data"] = inventory.get("instance").get("setup")
    params["username"] = os_cred["username"]
    params["password"] = os_cred["password"]
    params["auth-url"] = os_cred["auth-url"]
    params["auth-version"] = os_cred["auth-version"]
    params["tenant-name"] = os_cred["tenant-name"]
    params["service-region"] = os_cred["service-region"]
    params["keypair"] = os_cred.get("keypair", None)
    ceph_nodes = dict()
    if inventory.get("instance").get("create"):
        if ceph_cluster.get("image-name"):
            params["image-name"] = ceph_cluster.get("image-name")
        else:
            params["image-name"] = (
                inventory.get("instance").get("create").get("image-name"))
        params["cluster-name"] = ceph_cluster.get("name")
        params["vm-size"] = inventory.get("instance").get("create").get(
            "vm-size")
        if params.get("root-login") is False:
            params["root-login"] = False
        else:
            params["root-login"] = True
        with parallel() as p:
            for node in range(1, 100):
                node = "node" + str(node)
                if not ceph_cluster.get(node):
                    break
                node_dict = ceph_cluster.get(node)
                node_params = params.copy()
                node_params["role"] = RolesContainer(node_dict.get("role"))
                role = node_params["role"]
                user = os.getlogin()
                if instances_name:
                    node_params["node-name"] = "{}-{}-{}-{}-{}".format(
                        node_params.get("cluster-name", "ceph"),
                        instances_name,
                        run_id,
                        node,
                        "+".join(role),
                    )
                else:
                    node_params["node-name"] = "{}-{}-{}-{}-{}".format(
                        node_params.get("cluster-name", "ceph"),
                        user,
                        run_id,
                        node,
                        "+".join(role),
                    )
                if node_dict.get("no-of-volumes"):
                    node_params["no-of-volumes"] = node_dict.get(
                        "no-of-volumes")
                    node_params["size-of-disks"] = node_dict.get("disk-size")
                if node_dict.get("image-name"):
                    node_params["image-name"] = node_dict.get("image-name")
                if node_dict.get("cloud-data"):
                    node_params["cloud-data"] = node_dict.get("cloud-data")
                p.spawn(setup_vm_node, node, ceph_nodes, **node_params)
    log.info("Done creating nodes")
    return ceph_nodes