Ejemplo n.º 1
0
def create_server_group(name=None,
                        policy='affinity',
                        rule=None,
                        fail_ok=False,
                        auth_info=None,
                        con_ssh=None,
                        rtn_exist=False,
                        field='id'):
    """
    Create a server group with given criteria

    Args:
        name (str): name of the server group
        policy (str): affinity or anti_infinity
        rule (str|None): max_server_per_host can be specified when
        policy=anti-affinity
        fail_ok (bool):
        auth_info (dict):
        con_ssh (SSHClient):
        rtn_exist (bool): Whether to return existing server group that
        matches the given name
        field (str): id or name

    Returns (tuple): (rtn_code (int), err_msg_or_srv_grp_id (str))
        - (0, <server_group_id>)    # server group created successfully
        - (1, <stderr>)     # create server group cli rejected

    """
    # process server group metadata
    if name and rtn_exist:
        existing_grp = get_server_groups(name=name,
                                         strict=False,
                                         con_ssh=con_ssh,
                                         auth_info=auth_info,
                                         field=field)
        if existing_grp:
            LOG.debug("Returning existing server group {}".format(
                existing_grp[0]))
            return -1, existing_grp[0]

    # process server group name and policy
    if not name:
        name = 'grp_{}'.format(policy.replace('-', '_'))
    name = common.get_unique_name(name_str=name)
    args = '{}{} {}'.format('--rule {} '.format(rule) if rule else '', name,
                            policy.replace('_', '-'))

    LOG.info("Creating server group with args: {}...".format(args))
    exit_code, output = cli.nova('server-group-create',
                                 args,
                                 ssh_client=con_ssh,
                                 fail_ok=fail_ok,
                                 auth_info=auth_info)
    if exit_code > 0:
        return 1, output

    table_ = table_parser.table(output)
    srv_grp_id = table_parser.get_values(table_, field)[0]
    LOG.info("Server group {} created successfully.".format(name))
    return 0, srv_grp_id
Ejemplo n.º 2
0
def test_orphan_audit(orphan_audit_setup, clear_virsh_vms):
    """
    Tests the orphan audit by booting an instance directly on compute node to bypass nova, wait for 5 minutes and
    ensure that it gets cleaned up (TC2990 on rally)

    Test setup:
        - SCP two files to a compute node:
        - The DEFAULT_GUEST image currently on the controller node
        - An XML file that is on the test server (orphan_guest.xml) that will be used to define an start a VM
          with virsh
        - Change domain type in XML file to qemu if the test is being ran in a vbox

    Test steps:
        - Change the vm name in the XML file to an auto-generated name
        - SSH onto the node hosting the VM and run virsh define orphan_guest.xml and then virsh start Orphan_VM
          to start the VM
            - Assert that vm creation was successful by checking output of virsh start. Output of virsh list is logged
          as well
        - Check virsh list output to make sure that openstack has automatically cleaned up the orphan instance by
          5.5 minutes. This check is periodically done every 10 seconds to a maximum of 5.5 minutes. The test
          immediately passes if any of the checks reports the abscence of the orphan_vm and fails if the vm is
          still present in the list after 5.5 minutes.

    Test Teardown:
        - Delete created VMs

    """
    vm_host = orphan_audit_setup

    # Create standalone vm
    vm_name = common.get_unique_name('orphan', resource_type='vm')

    LOG.tc_step("Change orphan_vm name to an auto-generated name")
    with host_helper.ssh_to_host(vm_host) as host_ssh:
        host_ssh.exec_sudo_cmd(
            "sed -r -i 's#<name>.*</name>#<name>{}</name>#g' orphan_guest.xml".
            format(vm_name))

        LOG.tc_step("Create a simple orphan vm")
        create_simple_orphan(host_ssh, vm_host, vm_name)

        list_cmd = 'virsh list --all'
        host_ssh.exec_sudo_cmd(list_cmd)

        # wait and check for deletion
        LOG.tc_step("Check for deletion of vm")
        assert wait_for_deletion(
            host_ssh,
            vm_name), "{} is still in virsh list after 330 seconds".format(
                vm_name)

    global generated_vm_dict
    generated_vm_dict[vm_host].remove(vm_name)
Ejemplo n.º 3
0
def test_dynamic_vxlan_functional(version, mode):
    """
        Vxlan feature test cases

        Test Steps:
            - Make sure Vxlan provider net is configured only on Internal net
            - Find out a internal network that matches the vxlan mode and IP version
            - Use the mgmt-net and the internal net to create vms for tenant-1 and tenant-2
            - Make sure the vms are occupied on separate hosts achieved with host-aggregates
            - ssh to the compute where the vm is hosted to check the vshell stats
            - Ping from the vm and check the stats for known-vtep on the compute
            - Ping from the vm to a unknown IP and check compute for stats


        Test Teardown:
            - Delete vms, volumes created

    """
    vxlan_provider_name = 'group0-data0b'
    vif_model = 'avp'
    providernets = system_helper.get_data_networks(field='name', network_type='vxlan')
    if not providernets or (len(providernets) > 1) or (vxlan_provider_name not in providernets):
        skip("Vxlan provider-net not configured or Vxlan provider-net configured on more than one provider net\
         or not configurd on internal net")

    # get the id of the providr net
    vxlan_provider_net_id = system_helper.get_data_networks(field='id', type='vxlan')
    vm_ids = []

    # get 2 computes so we can create the aggregate and force vm-ccupancy
    computes = host_helper.get_up_hypervisors()

    if len(computes) < 2:
        skip(" Need at least 2 computes to run the Vxlan test cases")

    aggregate_name = 'vxlan'
    vxlan_computes = computes[0:2]

    # create aggregate with 2 computes
    ret_val = nova_helper.create_aggregate(name=aggregate_name, avail_zone=aggregate_name)[1]
    assert ret_val == aggregate_name, "Aggregate is not create as expected."
    ResourceCleanup.add('aggregate', aggregate_name)

    nova_helper.add_hosts_to_aggregate(aggregate=aggregate_name, hosts=vxlan_computes)

    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    LOG.tc_step("Getting Internal net ids.")
    internal_net_ids = network_helper.get_internal_net_ids_on_vxlan(vxlan_provider_net_id=vxlan_provider_net_id,
                                                                    ip_version=version, mode=mode)
    if not internal_net_ids:
        skip("No networks found for ip version {} on the vxlan provider net".format(version))

    LOG.tc_step("Creating vms for both tenants.")
    primary_tenant = Tenant.get_primary()
    other_tenant = Tenant.get_secondary()

    for auth_info, vm_host in zip([primary_tenant, other_tenant], vxlan_computes):
        mgmt_net_id = network_helper.get_mgmt_net_id(auth_info=auth_info)
        nics = [{'net-id': mgmt_net_id},
                {'net-id': internal_net_ids[0], 'vif-model': vif_model}]
        vm_name = common.get_unique_name(name_str='vxlan')
        vm_ids.append(vm_helper.boot_vm(name=vm_name, vm_host=vm_host, nics=nics, avail_zone=aggregate_name,
                                        auth_info=auth_info, cleanup='function')[1])

    # make sure VMS are not in the same compute, I don;t need it but just in case (double checking):
    if vm_helper.get_vm_host(vm_id=vm_ids[0]) == vm_helper.get_vm_host(vm_id=vm_ids[1]):
        vm_helper.cold_migrate_vm(vm_id=vm_ids[0])

    filter_known_vtep = 'packets-unicast'
    filter_stat_at_boot = 'packets-multicast'
    filter_unknown_vtep = 'packets-multicast'

    if mode is 'static':
        filter_stat_at_boot = 'packets-unicast'
        filter_unknown_vtep = 'packets-unicast'

    LOG.tc_step("Checking stats on computes after vms are launched.")
    for compute in computes:
        stats_after_boot_vm = get_vxlan_endpoint_stats(compute, field=filter_stat_at_boot)
        if len(stats_after_boot_vm) is 3:
            stats = int(stats_after_boot_vm[1]) + int(stats_after_boot_vm[2])
            LOG.info("Got the stats for packets {} after vm launched is {}".format(filter_stat_at_boot, stats))
        elif len(stats_after_boot_vm) is 2:
            stats = int(stats_after_boot_vm[1])
            LOG.info("Got the stats for packets {} after vm launched is {}".format(filter_stat_at_boot, stats))
        else:
            assert 0, "Failed to get stats from compute"
        assert 0 < int(stats), "stats are not incremented as expected"

    # clear stats
    LOG.tc_step("Clearing vxlan-endpoint-stats on computes: {}".format(computes))
    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    # Ping b/w vm over Internal nets and check stats, ping from 2nd vm
    LOG.tc_step("Ping between two vms over internal network")
    vm_helper.ping_vms_from_vm(to_vms=vm_ids[0], from_vm=vm_ids[1], net_types=['internal'])

    stats_after_ping = get_vxlan_endpoint_stats(computes[0], field=filter_known_vtep)
    if not stats_after_ping:
        assert "Compute stats are empty"

    LOG.tc_step("Checking stats on computes after vm ping over the internal net.")
    if len(stats_after_ping) is 3:
        stats_known_vtep = int(stats_after_ping[1]) + int(stats_after_ping[2])
        LOG.info("Got the stats for packets {} after ping {}".format(filter_known_vtep, stats_known_vtep))
    elif len(stats_after_ping) is 2:
        stats_known_vtep = int(stats_after_ping[1])
        LOG.info("Got the stats for packets {} after ping {}".format(filter_known_vtep, stats_known_vtep))
    else:
        assert 0, "Failed to get stats from compute"
    assert 0 < int(stats_known_vtep), "stats are not incremented as expected"

    # clear stats
    LOG.tc_step("Clearing vxlan-endpoint-stats on computes: {}".format(computes))
    for compute in computes:
        assert 0 == clear_vxlan_endpoint_stats(compute), "clear stats failed"

    # ping unknown IP over the internal net and check stats
    LOG.tc_step("Ping to an unknown IP from vms over internal network")
    unknown_ip = '10.10.10.30'
    with vm_helper.ssh_to_vm_from_natbox(vm_ids[1]) as vm2_ssh:
        LOG.tc_step("Ping unknown ip from guest")
        cmd = 'ping -I eth1 -c 5 {}'.format(unknown_ip)
        code, output = vm2_ssh.exec_cmd(cmd=cmd, expect_timeout=60)
        assert int(code) > 0, "Expected to see 100% ping failure"

    LOG.tc_step("Checking stats on computes after vm ping on unknown IP.")
    stats_after_ping_unknown_vtep = get_vxlan_endpoint_stats(computes[1], field=filter_unknown_vtep)
    if not stats_after_ping_unknown_vtep:
        assert 0, "Compute stats are empty"

    if len(stats_after_ping_unknown_vtep) is 3:
        stats_unknown_vtep = int(stats_after_ping_unknown_vtep[1]) + int(stats_after_ping_unknown_vtep[2])
        LOG.info("Got the stats for packets {} after ping unknown vtep {}".format(filter_unknown_vtep,
                                                                                  stats_unknown_vtep))
    elif len(stats_after_ping_unknown_vtep) is 2:
        stats_unknown_vtep = int(stats_after_ping_unknown_vtep[1])
        LOG.info("Got the stats for packets {} after ping uknown vtep {}".format(filter_unknown_vtep,
                                                                                 stats_unknown_vtep))
    else:
        assert 0, "Failed to get stats from compute"
    assert 0 < int(stats_unknown_vtep), "stats are not incremented as expected"
Ejemplo n.º 4
0
def create_user(name=None, field='name', domain=None, project=None,
                project_domain=None, rtn_exist=None,
                password=None, email=None,
                description=None, enable=None,
                auth_info=Tenant.get('admin'), fail_ok=False, con_ssh=None):
    """
    Create an openstack user
    Args:
        name (str|None):
        field: name or id
        domain:
        project (str|None): default project
        project_domain:
        rtn_exist (bool)
        password:
        email:
        description:
        enable:
        auth_info:
        fail_ok:
        con_ssh:

    Returns (tuple):
        (0, <user>)
        (1, <std_err>)

    """
    if not password:
        password = HostLinuxUser.get_password()

    if not name:
        name = 'user'
        common.get_unique_name(name_str=name)

    LOG.info("Create/Show openstack user {}".format(name))
    arg_dict = {
        'domain': domain,
        'project': project,
        'project-domain': project_domain,
        'password': password,
        'email': email,
        'description': description,
        'enable': True if enable is True else None,
        'disable': True if enable is False else None,
        'or-show': rtn_exist,
    }

    arg_str = '{} {}'.format(common.parse_args(args_dict=arg_dict), name)

    code, output = cli.openstack('user create', arg_str, ssh_client=con_ssh,
                                 fail_ok=fail_ok, auth_info=auth_info)
    if code > 0:
        return 1, output

    table_ = table_parser.table(output)
    username = table_parser.get_value_two_col_table(table_, field='name')
    user = username if field == 'name' else table_parser.get_value_two_col_table(table_,
                                                                                 field=field)

    is_platform = auth_info and auth_info.get('platform')
    keystone = 'platform' if is_platform else 'containerized'
    dictname = user + '_platform' if is_platform else user
    existing_auth = Tenant.get(dictname)
    if existing_auth:
        if existing_auth['user'] != username:
            raise ValueError('Tenant.{} already exists for a different user {}'.format(
                dictname, existing_auth['user']))
        Tenant.update(dictname, username=username, password=password, tenant=project,
                      platform=is_platform)
    else:
        Tenant.add(username=username, tenantname=project, dictname=dictname, password=password,
                   platform=is_platform)
        LOG.info('Tenant.{} for {} keystone user {} is added'.format(dictname, keystone, user))

    LOG.info("{} keystone user {} successfully created/showed".format(keystone, user))
    return 0, user
Ejemplo n.º 5
0
def verify_basic_template(template_name=None,
                          con_ssh=None,
                          auth_info=None,
                          delete_after_swact=False):
    """
        Create/Delete heat stack and verify the resource are created/deleted as expeted
            - Create a heat stack with the given template
            - Verify heat stack is created sucessfully
            - Verify heat resources are created
            - Delete Heat stack and verify resource deletion
        Args:
            con_ssh (SSHClient): If None, active controller ssh will be used.
            template_name (str): template to be used to create heat stack.
            auth_info (dict): Tenant dict. If None, primary tenant will be used.
            delete_after_swact
    """

    t_name, yaml = template_name.split('.')
    params = getattr(Heat, t_name)['params']
    heat_user = getattr(Heat, t_name)['heat_user']
    to_verify = getattr(Heat, t_name)['verify']
    if heat_user is 'admin':
        auth_info = Tenant.get('admin')

    table_ = table_parser.table(cli.heat('stack-list', auth_info=auth_info)[1])
    names = table_parser.get_values(table_, 'stack_name')
    stack_name = common.get_unique_name(t_name, existing_names=names)
    template_path = os.path.join(ProjVar.get_var('USER_FILE_DIR'), HEAT_PATH,
                                 template_name)
    if params:
        params = {
            param_: heat_helper.get_heat_params(param_name=param_)
            for param_ in params
        }

    LOG.tc_step("Creating Heat Stack using template %s", template_name)
    heat_helper.create_stack(stack_name=stack_name,
                             template=template_path,
                             parameters=params,
                             cleanup='function',
                             auth_info=auth_info,
                             con_ssh=con_ssh)

    for item in to_verify:
        LOG.tc_step("Verifying Heat created resources %s for stack %s", item,
                    stack_name)
        verify_heat_resource(to_verify=item,
                             template_name=t_name,
                             stack_name=stack_name,
                             auth_info=auth_info)
    LOG.info("Stack {} resources are created as expected.".format(stack_name))

    if hasattr(HeatUpdate, t_name):
        LOG.tc_step("Updating stack %s", stack_name)
        update_stack(stack_name,
                     template_name,
                     ssh_client=con_ssh,
                     auth_info=auth_info,
                     fail_ok=False)

    if delete_after_swact:
        host_helper.swact_host()

    LOG.tc_step("Delete heat stack {} ".format(stack_name))
    heat_helper.delete_stack(stack=stack_name,
                             auth_info=auth_info,
                             fail_ok=False)

    LOG.info("Stack {} deleted successfully.".format(stack_name))

    LOG.tc_step(
        "Verifying resource deletion after heat stack {} is deleted".format(
            stack_name))
    for item in to_verify:
        LOG.tc_step("Verifying Heat resources deletion %s for stack %s", item,
                    stack_name)
        code, msg = verify_heat_resource(to_verify=item,
                                         template_name=t_name,
                                         stack_name=stack_name,
                                         fail_ok=True,
                                         auth_info=auth_info)
        assert 1 == code, "Heat resource {} still exist after stack {} deletion".format(
            item, stack_name)
Ejemplo n.º 6
0
def create_aggregate(field='name',
                     name=None,
                     avail_zone=None,
                     properties=None,
                     check_first=True,
                     fail_ok=False,
                     con_ssh=None,
                     auth_info=Tenant.get('admin')):
    """
    Add a aggregate with given name and availability zone.

    Args:
        field (str): name or id
        name (str): name for aggregate to create
        avail_zone (str|None):
        properties (dict|None)
        check_first (bool)
        fail_ok (bool):
        con_ssh (SSHClient):
        auth_info (dict):

    Returns (tuple):
        (0, <rtn_val>)          -- aggregate successfully created
        (1, <stderr>)           -- cli rejected
        (2, "Created aggregate is not as specified")    -- name and/or availability zone mismatch

    """
    if not name:
        existing_names = get_aggregates(field='name')
        name = common.get_unique_name(name_str='cgcsauto',
                                      existing_names=existing_names)

    args_dict = {
        '--zone': avail_zone,
        '--property': properties,
    }
    args = '{} {}'.format(common.parse_args(args_dict, repeat_arg=True), name)

    if check_first:
        aggregates_ = get_aggregates(field=field,
                                     name=name,
                                     avail_zone=avail_zone)
        if aggregates_:
            LOG.warning(
                "Aggregate {} already exists. Do nothing.".format(name))
            return -1, aggregates_[0]

    LOG.info("Adding aggregate {}".format(name))
    res, out = cli.openstack('aggregate create',
                             args,
                             ssh_client=con_ssh,
                             fail_ok=fail_ok,
                             auth_info=auth_info)
    if res == 1:
        return res, out

    out_tab = table_parser.table(out)

    succ_msg = "Aggregate {} is successfully created".format(name)
    LOG.info(succ_msg)
    return 0, table_parser.get_value_two_col_table(out_tab, field)
Ejemplo n.º 7
0
def create_flavor(name=None,
                  flavor_id=None,
                  vcpus=1,
                  ram=1024,
                  root_disk=None,
                  ephemeral=None,
                  swap=None,
                  is_public=None,
                  rxtx_factor=None,
                  project=None,
                  project_domain=None,
                  description=None,
                  guest_os=None,
                  fail_ok=False,
                  auth_info=Tenant.get('admin'),
                  con_ssh=None,
                  storage_backing=None,
                  rtn_id=True,
                  cleanup=None,
                  add_default_specs=True,
                  properties=None):
    """
    Create a flavor with given criteria.

    Args:
        name (str): substring of flavor name. Whole name will be <name>-<auto_count>.
            e,g., 'myflavor-1'. If None, name will be set to 'flavor'.
        flavor_id (str): auto generated by default unless specified.
        vcpus (int):
        ram (int):
        root_disk (int):
        ephemeral (int):
        swap (int|None):
        is_public (bool):
        rxtx_factor (str):
        project
        project_domain
        description
        guest_os (str|None): guest name such as 'tis-centos-guest' or None - default tis
            guest assumed
        fail_ok (bool): whether it's okay to fail to create a flavor. Default to False.
        auth_info (dict): This is set to Admin by default. Can be set to other tenant for
            negative test.
        con_ssh (SSHClient):
        storage_backing (str): storage backing in extra flavor. Auto set storage backing based on
            system config if None. Valid values: 'local_image', 'remote'
        rtn_id (bool): return id or name
        cleanup (str|None): cleanup scope. function, class, module, or session
        add_default_specs (False): Whether to automatically add extra specs that are needed to
            launch vm
        properties (str|list|dict)

    Returns (tuple): (rtn_code (int), flavor_id/err_msg (str))
        (0, <flavor_id/name>): flavor created successfully
        (1, <stderr>): create flavor cli rejected

    """

    table_ = table_parser.table(
        cli.openstack('flavor list', ssh_client=con_ssh,
                      auth_info=auth_info)[1])
    existing_names = table_parser.get_column(table_, 'Name')

    if name is None:
        name = 'flavor'
    flavor_name = common.get_unique_name(name_str=name,
                                         existing_names=existing_names,
                                         resource_type='flavor')

    if root_disk is None:
        if not guest_os:
            guest_os = GuestImages.DEFAULT['guest']
        root_disk = GuestImages.IMAGE_FILES[guest_os][1]

    args_dict = {
        '--ephemeral': ephemeral,
        '--swap': swap,
        '--rxtx-factor': rxtx_factor,
        '--disk': root_disk,
        '--ram': ram,
        '--vcpus': vcpus,
        '--id': flavor_id,
        '--project': project,
        '--project-domain': project_domain,
        '--description': description,
        '--public': True if is_public else None,
        '--private': True if is_public is False else None,
        '--property': properties,
    }
    args = '{} {}'.format(common.parse_args(args_dict, repeat_arg=True),
                          flavor_name)

    LOG.info("Creating flavor {}...".format(flavor_name))
    LOG.info("openstack flavor create option: {}".format(args))
    exit_code, output = cli.openstack('flavor create',
                                      args,
                                      ssh_client=con_ssh,
                                      fail_ok=fail_ok,
                                      auth_info=auth_info)
    if exit_code > 1:
        return 1, output

    table_ = table_parser.table(output)
    flavor_id = table_parser.get_value_two_col_table(table_, 'id')
    LOG.info("Flavor {} created successfully.".format(flavor_name))

    if cleanup:
        ResourceCleanup.add('flavor', flavor_id, scope=cleanup)

    if add_default_specs:
        extra_specs = {FlavorSpec.MEM_PAGE_SIZE: 'large'}
        # extra_specs = {FlavorSpec.MEM_PAGE_SIZE: 'small'}
        default_flavor_backing = ProjVar.get_var('DEFAULT_INSTANCE_BACKING')
        sys_inst_backing = ProjVar.get_var('INSTANCE_BACKING')
        if not default_flavor_backing:
            from keywords import host_helper
            sys_inst_backing = host_helper.get_hosts_per_storage_backing(
                auth_info=auth_info, con_ssh=con_ssh, refresh=True)
        configured_backings = [
            backing for backing in sys_inst_backing
            if sys_inst_backing.get(backing)
        ]
        LOG.debug(
            "configured backing:{} sys inst backing: {}, required storage backing: {}"
            .format(configured_backings, sys_inst_backing, storage_backing))

        if storage_backing and storage_backing not in configured_backings:
            raise ValueError(
                'Required local_storage {} is not configured on any nova hypervisor'
                .format(storage_backing))

        if len(configured_backings) > 1:
            extra_specs[FlavorSpec.STORAGE_BACKING] = storage_backing if storage_backing else \
                ProjVar.get_var('DEFAULT_INSTANCE_BACKING')

        if extra_specs:
            LOG.info("Setting flavor specs: {}".format(extra_specs))
            set_flavor(flavor_id,
                       con_ssh=con_ssh,
                       auth_info=auth_info,
                       **extra_specs)

    flavor = flavor_id if rtn_id else flavor_name
    return 0, flavor, storage_backing
Ejemplo n.º 8
0
def create_image(name=None,
                 image_id=None,
                 source_image_file=None,
                 volume=None,
                 visibility='public',
                 force=None,
                 store=None,
                 disk_format=None,
                 container_format=None,
                 min_disk=None,
                 min_ram=None,
                 tags=None,
                 protected=None,
                 project=None,
                 project_domain=None,
                 timeout=ImageTimeout.CREATE,
                 con_ssh=None,
                 auth_info=Tenant.get('admin'),
                 fail_ok=False,
                 ensure_sufficient_space=True,
                 sys_con_for_dc=True,
                 wait_for_subcloud_sync=True,
                 cleanup=None,
                 hw_vif_model=None,
                 **properties):
    """
    Create an image with given criteria.

    Args:
        name (str): string to be included in image name
        image_id (str): id for the image to be created
        source_image_file (str|None): local image file to create image from.
        DefaultImage will be used if unset
        volume (str)
        disk_format (str): One of these: ami, ari, aki, vhd, vmdk, raw,
        qcow2, vdi, iso
        container_format (str):  One of these: ami, ari, aki, bare, ovf
        min_disk (int): Minimum size of disk needed to boot image (in gigabytes)
        min_ram (int):  Minimum amount of ram needed to boot image (in
        megabytes)
        visibility (str): public|private|shared|community
        protected (bool): Prevent image from being deleted.
        store (str): Store to upload image to
        force (bool)
        tags (str|tuple|list)
        project (str|None)
        project_domain (str|None)
        timeout (int): max seconds to wait for cli return
        con_ssh (SSHClient):
        auth_info (dict|None):
        fail_ok (bool):
        ensure_sufficient_space (bool)
        sys_con_for_dc (bool): create image on system controller if it's
            distributed cloud
        wait_for_subcloud_sync (bool)
        cleanup (str|None): add to teardown list. 'function', 'class',
        'module', 'session', or None
        hw_vif_model (None|str): if this is set, 'hw_vif_model' in properties
        will be overridden
        **properties: key=value pair(s) of properties to associate with the
            image

    Returns (tuple): (rtn_code(int), message(str))      # 1, 2 only
    applicable if fail_ok=True
        - (0, <id>, "Image <id> is created successfully")
        - (1, <id or ''>, <stderr>)     # openstack image create cli rejected
        - (2, <id>, "Image status is not active.")
    """

    # Use source image url if url is provided. Else use local img file.

    default_guest_img = GuestImages.IMAGE_FILES[
        GuestImages.DEFAULT['guest']][2]

    file_path = source_image_file
    if not file_path and not volume:
        img_dir = GuestImages.DEFAULT['image_dir']
        file_path = "{}/{}".format(img_dir, default_guest_img)

    if file_path:
        if file_path.startswith('~/'):
            file_path = file_path.replace('~', HostLinuxUser.get_home(), 1)
        file_path = os.path.normpath(file_path)
        if 'win' in file_path and 'os_type' not in properties:
            properties['os_type'] = 'windows'
        elif 'ge_edge' in file_path and 'hw_firmware_type' not in properties:
            properties['hw_firmware_type'] = 'uefi'

    if hw_vif_model:
        properties[ImageMetadata.VIF_MODEL] = hw_vif_model

    if sys_con_for_dc and ProjVar.get_var('IS_DC'):
        con_ssh = ControllerClient.get_active_controller('RegionOne')
        create_auth = Tenant.get(tenant_dictname=auth_info['tenant'],
                                 dc_region='SystemController').copy()
        image_host_ssh = get_cli_client(central_region=True)
    else:
        if not con_ssh:
            con_ssh = ControllerClient.get_active_controller()
        image_host_ssh = get_cli_client()
        create_auth = auth_info

    if ensure_sufficient_space and not volume:
        if not is_image_storage_sufficient(img_file_path=file_path,
                                           con_ssh=con_ssh,
                                           image_host_ssh=image_host_ssh)[0]:
            skip('Insufficient image storage for creating glance image '
                 'from {}'.format(file_path))

    source_str = file_path if file_path else ''
    known_imgs = [
        'cgcs-guest', 'tis-centos-guest', 'ubuntu', 'cirros', 'opensuse',
        'rhel', 'centos', 'win', 'ge_edge', 'vxworks', 'debian-8-m-agent'
    ]
    name = name if name else 'auto'
    for img_str in known_imgs:
        if img_str in name:
            break
        elif img_str in source_str:
            name = img_str + '_' + name
            break
    else:
        if source_str:
            name_prefix = str(source_str.split(sep='/')[-1]).split(sep='.')[0]
            name = name_prefix + '_' + name

    name = common.get_unique_name(name_str=name,
                                  existing_names=get_images(),
                                  resource_type='image')

    LOG.info("Creating glance image: {}".format(name))

    if not disk_format:
        if not source_image_file:
            # default tis-centos-guest image is raw
            disk_format = 'raw'
        else:
            disk_format = 'qcow2'

    args_dict = {
        '--id': image_id,
        '--store': store,
        '--disk-format': disk_format,
        '--container-format': container_format if container_format else 'bare',
        '--min-disk': min_disk,
        '--min-ram': min_ram,
        '--file': file_path,
        '--force': True if force else None,
        '--protected': True if protected else None,
        '--unprotected': True if protected is False else None,
        '--tag': tags,
        '--property': properties,
        '--project': project,
        '--project-domain': project_domain,
        '--volume': volume,
    }
    if visibility:
        args_dict['--{}'.format(visibility)] = True
    args_ = '{} {}'.format(
        common.parse_args(args_dict, repeat_arg=True, vals_sep=','), name)

    try:
        LOG.info("Creating image {} with args: {}".format(name, args_))
        code, output = cli.openstack('image create',
                                     args_,
                                     ssh_client=con_ssh,
                                     fail_ok=fail_ok,
                                     auth_info=create_auth,
                                     timeout=timeout)
    except:
        # This is added to help debugging image create failure in case of
        # insufficient space
        con_ssh.exec_cmd('df -h', fail_ok=True, get_exit_code=False)
        raise

    table_ = table_parser.table(output)
    field = 'image_id' if volume else 'id'
    actual_id = table_parser.get_value_two_col_table(table_, field)
    if cleanup and actual_id:
        ResourceCleanup.add('image', actual_id, scope=cleanup)

    if code > 1:
        return 1, actual_id, output

    in_active = wait_for_image_status(actual_id,
                                      con_ssh=con_ssh,
                                      auth_info=create_auth,
                                      fail_ok=fail_ok)
    if not in_active:
        return 2, actual_id, "Image status is not active."

    if image_id and image_id != actual_id:
        msg = "Actual image id - {} is different than requested id - {}.".\
            format(actual_id, image_id)
        if fail_ok:
            return 3, actual_id, msg
        raise exceptions.ImageError(msg)

    if wait_for_subcloud_sync:
        wait_for_image_sync_on_subcloud(image_id=actual_id)

    msg = "Image {} is created successfully".format(actual_id)
    LOG.info(msg)
    return 0, actual_id, msg
Ejemplo n.º 9
0
def create_user(name=None,
                field='name',
                domain=None,
                project=None,
                project_domain=None,
                rtn_exist=None,
                password=HostLinuxUser.get_password(),
                email=None,
                description=None,
                enable=None,
                auth_info=Tenant.get('admin'),
                fail_ok=False,
                con_ssh=None):
    """
    Create an openstack user
    Args:
        name (str|None):
        field: name or id
        domain:
        project (str|None): default project
        project_domain:
        rtn_exist (bool)
        password:
        email:
        description:
        enable:
        auth_info:
        fail_ok:
        con_ssh:

    Returns (tuple):
        (0, <user>)
        (1, <std_err>)

    """

    if not name:
        name = 'user'
        common.get_unique_name(name_str=name)

    LOG.info("Create/Show openstack user {}".format(name))
    arg_dict = {
        'domain': domain,
        'project': project,
        'project-domain': project_domain,
        'password': password,
        'email': email,
        'description': description,
        'enable': True if enable is True else None,
        'disable': True if enable is False else None,
        'or-show': rtn_exist,
    }

    arg_str = '{} {}'.format(common.parse_args(args_dict=arg_dict), name)

    code, output = cli.openstack('user create',
                                 arg_str,
                                 ssh_client=con_ssh,
                                 fail_ok=fail_ok,
                                 auth_info=auth_info)
    if code > 0:
        return 1, output

    user = table_parser.get_value_two_col_table(table_parser.table(output),
                                                field=field)
    LOG.info("Openstack user {} successfully created/showed".format(user))

    return 0, user