def join_cluster(vapp, template_name, template_revision, target_nodes=None):
    script = "#!/usr/bin/env bash\n" \
             "kubeadm token create\n" \
             "ip route get 1 | awk '{print $NF;exit}'\n"
    node_names = get_node_names(vapp, NodeType.MASTER)
    master_result = execute_script_in_nodes(vapp=vapp,
                                            node_names=node_names,
                                            script=script)
    errors = _get_script_execution_errors(master_result)
    if errors:
        raise ScriptExecutionError(
            f"Join cluster script execution failed on master node "
            f"{node_names}:{errors}")
    init_info = master_result[0][1].content.decode().split()

    node_names = get_node_names(vapp, NodeType.WORKER)
    if target_nodes is not None:
        node_names = [name for name in node_names if name in target_nodes]
    tmp_script_filepath = get_local_script_filepath(template_name,
                                                    template_revision,
                                                    ScriptFile.NODE)
    tmp_script = utils.read_data_file(tmp_script_filepath, logger=LOGGER)
    script = tmp_script.format(token=init_info[0], ip=init_info[1])
    worker_results = execute_script_in_nodes(vapp=vapp,
                                             node_names=node_names,
                                             script=script)
    errors = _get_script_execution_errors(worker_results)
    if errors:
        raise ScriptExecutionError(
            f"Join cluster script execution failed on worker node "
            f"{node_names}:{errors}")
    for result in worker_results:
        if result[0] != 0:
            raise ClusterJoiningError(f"Couldn't join cluster:"
                                      f"\n{result[2].content.decode()}")
def init_cluster(vapp, template_name, template_revision):
    script_filepath = get_local_script_filepath(template_name,
                                                template_revision,
                                                ScriptFile.MASTER)
    script = utils.read_data_file(script_filepath, logger=LOGGER)
    node_names = get_node_names(vapp, NodeType.MASTER)
    result = execute_script_in_nodes(vapp=vapp,
                                     node_names=node_names,
                                     script=script)
    errors = _get_script_execution_errors(result)
    if errors:
        raise ScriptExecutionError(
            f"Initialize cluster script execution failed on node "
            f"{node_names}:{errors}")
    if result[0][0] != 0:
        raise ClusterInitializationError(
            f"Couldn\'t initialize cluster:\n{result[0][2].content.decode()}")
def delete_nodes_from_cluster(vapp, node_names):
    script = "#!/usr/bin/env bash\n" \
             "kubectl delete node "
    for node_name in node_names:
        script += f' {node_name}'
    script += '\n'
    master_node_names = get_node_names(vapp, NodeType.MASTER)
    result = execute_script_in_nodes(vapp=vapp,
                                     node_names=master_node_names,
                                     script=script,
                                     check_tools=False)
    errors = _get_script_execution_errors(result)
    if errors:
        raise ScriptExecutionError(
            f"Delete node from cluster script execution failed on node "
            f"{master_node_names}:{errors}")
    if result[0][0] != 0:
        raise DeleteNodeError(f"Couldn't delete node(s):"
                              f"\n{result[0][2].content.decode()}")
def get_master_ip(vapp):
    LOGGER.debug(f"Getting master IP for vapp: "
                 f"{vapp.get_resource().get('name')}")
    script = "#!/usr/bin/env bash\n" \
             "ip route get 1 | awk '{print $NF;exit}'\n" \

    node_names = get_node_names(vapp, NodeType.MASTER)
    result = execute_script_in_nodes(vapp=vapp,
                                     node_names=node_names,
                                     script=script,
                                     check_tools=False)
    errors = _get_script_execution_errors(result)
    if errors:
        raise ScriptExecutionError(
            f"Get master IP script execution failed on master node "
            f"{node_names}:{errors}")
    master_ip = result[0][1].content.decode().split()[0]
    LOGGER.debug(f"Retrieved master IP for vapp: "
                 f"{vapp.get_resource().get('name')}, ip: {master_ip}")
    return master_ip
def add_nodes(client,
              num_nodes,
              node_type,
              org,
              vdc,
              vapp,
              catalog_name,
              template,
              network_name,
              num_cpu=None,
              memory_in_mb=None,
              storage_profile=None,
              ssh_key=None):
    specs = []
    try:
        if num_nodes < 1:
            return None

        # DEV NOTE: With api v33.0 and onwards, get_catalog operation will fail
        # for non admin users of an an org which is not hosting the catalog,
        # even if the catalog is explicitly shared with the org in question.
        # This happens because for api v 33.0 and onwards, the Org XML no
        # longer returns the href to catalogs accessible to the org, and typed
        # queries hide the catalog link from non admin users.
        # As a workaround, we will use a sys admin client to get the href and
        # pass it forward. Do note that the catalog itself can still be
        # accessed by these non admin users, just that they can't find by the
        # href on their own.

        sys_admin_client = None
        try:
            sys_admin_client = vcd_utils.get_sys_admin_client()
            org_name = org.get_name()
            org_resource = sys_admin_client.get_org_by_name(org_name)
            org_sa = Org(sys_admin_client, resource=org_resource)
            catalog_item = org_sa.get_catalog_item(
                catalog_name, template[LocalTemplateKey.CATALOG_ITEM_NAME])
            catalog_item_href = catalog_item.Entity.get('href')
        finally:
            if sys_admin_client:
                sys_admin_client.logout()

        source_vapp = VApp(client, href=catalog_item_href)
        source_vm = source_vapp.get_all_vms()[0].get('name')
        if storage_profile is not None:
            storage_profile = vdc.get_storage_profile(storage_profile)

        cust_script = None
        if ssh_key is not None:
            cust_script = \
                "#!/usr/bin/env bash\n" \
                "if [ x$1=x\"postcustomization\" ];\n" \
                "then\n" \
                "mkdir -p /root/.ssh\n" \
                f"echo '{ssh_key}' >> /root/.ssh/authorized_keys\n" \
                "chmod -R go-rwx /root/.ssh\n" \
                "fi"

        for n in range(num_nodes):
            name = None
            while True:
                name = f"{node_type}-{''.join(random.choices(string.ascii_lowercase + string.digits, k=4))}"  # noqa: E501
                try:
                    vapp.get_vm(name)
                except Exception:
                    break
            spec = {
                'source_vm_name': source_vm,
                'vapp': source_vapp.resource,
                'target_vm_name': name,
                'hostname': name,
                'password_auto': True,
                'network': network_name,
                'ip_allocation_mode': 'pool'
            }
            if cust_script is not None:
                spec['cust_script'] = cust_script
            if storage_profile is not None:
                spec['storage_profile'] = storage_profile
            specs.append(spec)

        task = vapp.add_vms(specs, power_on=False)
        client.get_task_monitor().wait_for_status(task)
        vapp.reload()

        if not num_cpu:
            num_cpu = template[LocalTemplateKey.CPU]
        if not memory_in_mb:
            memory_in_mb = template[LocalTemplateKey.MEMORY]
        for spec in specs:
            vm_name = spec['target_vm_name']
            vm_resource = vapp.get_vm(vm_name)
            vm = VM(client, resource=vm_resource)

            task = vm.modify_cpu(num_cpu)
            client.get_task_monitor().wait_for_status(task)

            task = vm.modify_memory(memory_in_mb)
            client.get_task_monitor().wait_for_status(task)

            task = vm.power_on()
            client.get_task_monitor().wait_for_status(task)
            vapp.reload()

            if node_type == NodeType.NFS:
                LOGGER.debug(f"Enabling NFS server on {vm_name}")
                script_filepath = get_local_script_filepath(
                    template[LocalTemplateKey.NAME],
                    template[LocalTemplateKey.REVISION], ScriptFile.NFSD)
                script = utils.read_data_file(script_filepath, logger=LOGGER)
                exec_results = execute_script_in_nodes(vapp=vapp,
                                                       node_names=[vm_name],
                                                       script=script)
                errors = _get_script_execution_errors(exec_results)
                if errors:
                    raise ScriptExecutionError(
                        f"VM customization script execution failed on node "
                        f"{vm_name}:{errors}")
    except Exception as e:
        # TODO: get details of the exception to determine cause of failure,
        # e.g. not enough resources available.
        node_list = [entry.get('target_vm_name') for entry in specs]
        raise NodeCreationError(node_list, str(e))

    vapp.reload()
    return {'task': task, 'specs': specs}
def add_nodes(client,
              num_nodes,
              node_type,
              org,
              vdc,
              vapp,
              catalog_name,
              template,
              network_name,
              num_cpu=None,
              memory_in_mb=None,
              storage_profile=None,
              ssh_key_filepath=None):
    specs = []
    try:
        if num_nodes < 1:
            return None
        catalog_item = org.get_catalog_item(
            catalog_name, template[LocalTemplateKey.CATALOG_ITEM_NAME])
        source_vapp = VApp(client, href=catalog_item.Entity.get('href'))
        source_vm = source_vapp.get_all_vms()[0].get('name')
        if storage_profile is not None:
            storage_profile = vdc.get_storage_profile(storage_profile)

        cust_script = None
        if ssh_key_filepath is not None:
            cust_script = \
                "#!/usr/bin/env bash\n" \
                "if [ x$1=x\"postcustomization\" ];\n" \
                "then\n" \
                "mkdir -p /root/.ssh\n" \
                f"echo '{ssh_key_filepath}' >> /root/.ssh/authorized_keys\n" \
                "chmod -R go-rwx /root/.ssh\n" \
                "fi"

        for n in range(num_nodes):
            name = None
            while True:
                name = f"{node_type}-{''.join(random.choices(string.ascii_lowercase + string.digits, k=4))}"  # noqa: E501
                try:
                    vapp.get_vm(name)
                except Exception:
                    break
            spec = {
                'source_vm_name': source_vm,
                'vapp': source_vapp.resource,
                'target_vm_name': name,
                'hostname': name,
                'password_auto': True,
                'network': network_name,
                'ip_allocation_mode': 'pool'
            }
            if cust_script is not None:
                spec['cust_script'] = cust_script
            if storage_profile is not None:
                spec['storage_profile'] = storage_profile
            specs.append(spec)

        task = vapp.add_vms(specs, power_on=False)
        client.get_task_monitor().wait_for_status(task)
        vapp.reload()

        if not num_cpu:
            num_cpu = template[LocalTemplateKey.CPU]
        if not memory_in_mb:
            memory_in_mb = template[LocalTemplateKey.MEMORY]
        for spec in specs:
            vm_name = spec['target_vm_name']
            vm_resource = vapp.get_vm(vm_name)
            vm = VM(client, resource=vm_resource)

            task = vm.modify_cpu(num_cpu)
            client.get_task_monitor().wait_for_status(task)

            task = vm.modify_memory(memory_in_mb)
            client.get_task_monitor().wait_for_status(task)

            task = vm.power_on()
            client.get_task_monitor().wait_for_status(task)
            vapp.reload()

            if node_type == NodeType.NFS:
                LOGGER.debug(f"Enabling NFS server on {vm_name}")
                script_filepath = get_local_script_filepath(
                    template[LocalTemplateKey.NAME],
                    template[LocalTemplateKey.REVISION], ScriptFile.NFSD)
                script = utils.read_data_file(script_filepath, logger=LOGGER)
                exec_results = execute_script_in_nodes(vapp=vapp,
                                                       node_names=[vm_name],
                                                       script=script)
                errors = _get_script_execution_errors(exec_results)
                if errors:
                    raise ScriptExecutionError(
                        f"VM customization script execution failed on node "
                        f"{vm_name}:{errors}")
    except Exception as e:
        # TODO: get details of the exception to determine cause of failure,
        # e.g. not enough resources available.
        node_list = [entry.get('target_vm_name') for entry in specs]
        raise NodeCreationError(node_list, str(e))

    vapp.reload()
    return {'task': task, 'specs': specs}
Beispiel #7
0
def add_nodes(qty, template, node_type, config, client, org, vdc, vapp,
              req_spec):
    try:
        if qty < 1:
            return None
        specs = []
        catalog_item = org.get_catalog_item(config['broker']['catalog'],
                                            template['catalog_item_name'])
        source_vapp = VApp(client, href=catalog_item.Entity.get('href'))
        source_vm = source_vapp.get_all_vms()[0].get('name')
        storage_profile = req_spec.get(RequestKey.STORAGE_PROFILE_NAME)
        if storage_profile is not None:
            storage_profile = vdc.get_storage_profile(storage_profile)

        cust_script_common = ''

        cust_script_init = \
"""
#!/usr/bin/env bash
if [ x$1=x"postcustomization" ];
then
""" # noqa: E128

        cust_script_end = \
"""
fi
"""  # noqa: E128

        ssh_key_filepath = req_spec.get(RequestKey.SSH_KEY_FILEPATH)
        if ssh_key_filepath is not None:
            cust_script_common += \
f"""
mkdir -p /root/.ssh
echo '{ssh_key_filepath}' >> /root/.ssh/authorized_keys
chmod -R go-rwx /root/.ssh
""" # noqa

        if cust_script_common == '':
            cust_script = None
        else:
            cust_script = cust_script_init + cust_script_common + \
                cust_script_end
        for n in range(qty):
            name = None
            while True:
                name = f"{node_type}-{''.join(random.choices(string.ascii_lowercase + string.digits, k=4))}"  # noqa: E501
                try:
                    vapp.get_vm(name)
                except Exception:
                    break
            spec = {
                'source_vm_name': source_vm,
                'vapp': source_vapp.resource,
                'target_vm_name': name,
                'hostname': name,
                'password_auto': True,
                'network': req_spec.get(RequestKey.NETWORK_NAME),
                'ip_allocation_mode': 'pool'
            }
            if cust_script is not None:
                spec['cust_script'] = cust_script
            if storage_profile is not None:
                spec['storage_profile'] = storage_profile
            specs.append(spec)

        num_cpu = req_spec.get(RequestKey.NUM_CPU)
        mb_memory = req_spec.get(RequestKey.MB_MEMORY)
        configure_hw = bool(num_cpu or mb_memory)
        task = vapp.add_vms(specs, power_on=not configure_hw)
        # TODO(get details of the exception like not enough resources avail)
        client.get_task_monitor().wait_for_status(task)
        vapp.reload()
        if configure_hw:
            for spec in specs:
                vm_resource = vapp.get_vm(spec['target_vm_name'])
                if num_cpu:
                    vm = VM(client, resource=vm_resource)
                    task = vm.modify_cpu(num_cpu)
                    client.get_task_monitor().wait_for_status(task)
                if mb_memory:
                    vm = VM(client, resource=vm_resource)
                    task = vm.modify_memory(mb_memory)
                    client.get_task_monitor().wait_for_status(task)
                vm = VM(client, resource=vm_resource)
                task = vm.power_on()
                client.get_task_monitor().wait_for_status(task)
            vapp.reload()

        password = vapp.get_admin_password(spec['target_vm_name'])
        for spec in specs:
            vm_resource = vapp.get_vm(spec['target_vm_name'])
            command = \
                f"/bin/echo \"root:{template['admin_password']}\" | chpasswd"
            nodes = [vm_resource]
            execute_script_in_nodes(config,
                                    vapp,
                                    password,
                                    command,
                                    nodes,
                                    check_tools=True,
                                    wait=False)
            if node_type == NodeType.NFS:
                LOGGER.debug(
                    f"enabling NFS server on {spec['target_vm_name']}")
                script_filepath = get_local_script_filepath(
                    template['name'], template['revision'], ScriptFile.NFSD)
                script = read_data_file(script_filepath, logger=LOGGER)
                exec_results = execute_script_in_nodes(
                    config, vapp, template['admin_password'], script, nodes)
                errors = get_script_execution_errors(exec_results)
                if errors:
                    raise ScriptExecutionError(
                        f"Script execution failed on node "
                        f"{spec['target_vm_name']}:{errors}")
    except Exception as e:
        node_list = [entry.get('target_vm_name') for entry in specs]
        raise NodeCreationError(node_list, str(e))
    return {'task': task, 'specs': specs}
def add_nodes(qty, template, node_type, config, client, org, vdc, vapp, body):
    try:
        if qty < 1:
            return None
        specs = []
        catalog_item = org.get_catalog_item(config['broker']['catalog'],
                                            template['catalog_item'])
        source_vapp = VApp(client, href=catalog_item.Entity.get('href'))
        source_vm = source_vapp.get_all_vms()[0].get('name')
        storage_profile = None
        if 'storage_profile' in body and body['storage_profile'] is not None:
            storage_profile = vdc.get_storage_profile(body['storage_profile'])
        cust_script_init = \
    """#!/usr/bin/env bash
    if [ x$1=x"postcustomization" ];
    then
    """ # NOQA
        cust_script_common = ''
        cust_script_end = \
    """
    fi
    """  # NOQA
        if 'ssh_key' in body and body['ssh_key'] is not None:
            cust_script_common += \
    """
    mkdir -p /root/.ssh
    echo '{ssh_key}' >> /root/.ssh/authorized_keys
    chmod -R go-rwx /root/.ssh
    """.format(ssh_key=body['ssh_key'])  # NOQA

        if cust_script_common is '':
            cust_script = None
        else:
            cust_script = cust_script_init + cust_script_common + cust_script_end
        for n in range(qty):
            name = None
            while True:
                name = '%s-%s' % (node_type, ''.join(
                    random.choices(string.ascii_lowercase + string.digits,
                                   k=4)))
                try:
                    vapp.get_vm(name)
                except Exception:
                    break
            spec = {
                'source_vm_name': source_vm,
                'vapp': source_vapp.resource,
                'target_vm_name': name,
                'hostname': name,
                'network': body['network'],
                'ip_allocation_mode': 'pool'
            }
            if cust_script is not None:
                spec['cust_script'] = cust_script
            if storage_profile is not None:
                spec['storage_profile'] = storage_profile
            specs.append(spec)
        if ('cpu' in body and body['cpu'] is not None) or \
           ('memory' in body and body['memory'] is not None):
            reconfigure_hw = True
        else:
            reconfigure_hw = False
        task = vapp.add_vms(specs, power_on=not reconfigure_hw)
        # TODO(get details of the exception like not enough resources avail)
        client.get_task_monitor().wait_for_status(task)
        if reconfigure_hw:
            vapp.reload()
            for spec in specs:
                vm_resource = vapp.get_vm(spec['target_vm_name'])
                if 'cpu' in body and body['cpu'] is not None:
                    vm = VM(client, resource=vm_resource)
                    task = vm.modify_cpu(body['cpu'])
                    client.get_task_monitor().wait_for_status(task)
                if 'memory' in body and body['memory'] is not None:
                    vm = VM(client, resource=vm_resource)
                    task = vm.modify_memory(body['memory'])
                    client.get_task_monitor().wait_for_status(task)
                vm = VM(client, resource=vm_resource)
                task = vm.power_on()
                client.get_task_monitor().wait_for_status(task)
        password = source_vapp.get_admin_password(source_vm)
        vapp.reload()
        for spec in specs:
            vm_resource = vapp.get_vm(spec['target_vm_name'])
            command = '/bin/echo "root:{password}" | chpasswd'.format(
                password=template['admin_password'])
            nodes = [vm_resource]
            execute_script_in_nodes(config,
                                    vapp,
                                    password,
                                    command,
                                    nodes,
                                    check_tools=True,
                                    wait=False)
            if node_type == TYPE_NFS:
                LOGGER.debug('Enabling NFS server on %s' %
                             spec['target_vm_name'])
                script = get_data_file('nfsd-%s.sh' % template['name'])
                exec_results = execute_script_in_nodes(
                    config, vapp, template['admin_password'], script, nodes)
                errors = get_script_execution_errors(exec_results)
                if errors:
                    raise ScriptExecutionError(
                        f"Script execution failed on node {spec['target_vm_name']}:{errors}"
                    )
    except Exception as e:
        node_list = [entry.get('target_vm_name') for entry in specs]
        raise NodeCreationError(node_list, str(e))
    return {'task': task, 'specs': specs}