Exemplo n.º 1
0
    def _reload(self):
        self._legends = []
        self._elements = []
        self._lines = []
        self.search_box = TopologySearchBox()
        self.display_names = TopologyDisplayNames()
        # load elements
        # we have to wait few seconds, initial few seconds elements are moving
        if len(sel.elements(self.ELEMENTS)) > 0:
            self._el_ref = TopologyElement(o=self, element=sel.elements(self.ELEMENTS)[-1])
            wait_for(lambda: self._is_el_movement_stopped(), delay=2, num_sec=30)

            for element in sel.elements(self.ELEMENTS):
                self._elements.append(TopologyElement(o=self, element=element))
            # load lines
            for line in sel.elements(self.LINES):
                self._lines.append(TopologyLine(element=line))
        # load legends
        # remove old legends
        for legend_id in self._legends:
            try:
                delattr(self, legend_id)
            except AttributeError:
                pass
        # load available legends
        for legend in sel.elements(self.LEGENDS):
            legend_text = sel.text_sane(legend.find_element_by_tag_name('label'))
            legend_id = attributize_string(legend_text.strip())
            legend_object = TopologyLegend(name=legend_text, element=legend)
            setattr(self, legend_id, legend_object)
            self._legends.append(legend_id)
def test_sdn_nsg_firewall_rules(provider, appliance, secgroup_with_rule):
    """ Pulls the list of firewall ports from Provider API and from appliance. Compare the 2
    results. If same, then test is successful.

    Metadata:
        test_flag: sdn, inventory

    Polarion:
        assignee: mmojzis
        initialEstimate: 1/4h
    """

    # Navigate to network provider.
    prov_collection = appliance.collections.network_providers.filter({'provider': provider})
    network_provider = prov_collection.all()[0]
    network_provider.refresh_provider_relationships()
    wait_for(network_provider.is_refreshed, func_kwargs=dict(refresh_delta=10), timeout=600)
    view = navigate_to(network_provider, 'Details')
    parent_name = view.entities.relationships.get_text_of("Parent Cloud Provider")
    assert parent_name == provider.name

    secgrp_collection = appliance.collections.network_security_groups
    secgroup = [i for i in secgrp_collection.all() if i.name == secgroup_with_rule][0]
    view = navigate_to(secgroup, 'Details')

    if appliance.version < '5.10':
        # The table has one header row. The first non-header row has column
        # names.
        assert 'Port' == view.entities.firewall_rules[1][3].text
        assert '22' == view.entities.firewall_rules[2][3].text
    else:
        # The table has two header rows. We cannot access the second one with
        # widgetastic. So let's hope the column of index 3 is the Port Range
        # column.
        assert '22' == view.entities.firewall_rules[1][3].text
def test_appliance_console_restore_pg_basebackup_ansible(get_appliance_with_ansible):
    appl1 = get_appliance_with_ansible
    # Restore DB on the second appliance
    appl1.evmserverd.stop()
    appl1.db.restart_db_service()
    command_set = ('ap', '', '4', '1', '/tmp/backup/base.tar.gz', TimedCommand('y', 60), '')
    appl1.appliance_console.run_commands(command_set)
    manager.quit()
    appl1.start_evm_service()
    appl1.wait_for_web_ui()
    appl1.reboot()
    appl1.start_evm_service()
    appl1.wait_for_web_ui()
    appl1.ssh_client.run_command(
        'curl -kL https://localhost/ansibleapi | grep "Ansible Tower REST API"')
    repositories = appl1.collections.ansible_repositories
    repository = repositories.create('example', REPOSITORIES[0], description='example')
    view = navigate_to(repository, "Details")
    refresh = view.toolbar.refresh.click
    wait_for(
        lambda: view.entities.summary("Properties").get_text_of("Status") == "successful",
        timeout=60,
        fail_func=refresh,
        message="Check if playbook repo added"
    )
Exemplo n.º 4
0
def test_appliance_console_dedicated_db(unconfigured_appliance, app_creds):
    """ Commands:
    1. 'ap' launch appliance_console,
    2. RETURN clear info screen,
    3. '7' setup db,
    4. '1' Creates v2_key,
    5. '1' selects internal db,
    6. '2' use /dev/vdb partition,
    7. 'y' create dedicated db,
    8. 'pwd' db password,
    9. 'pwd' confirm db password + wait 360 secs
    10. RETURN finish.

    Polarion:
        assignee: mnadeem
        caseimportance: high
        casecomponent: Appliance
        initialEstimate: 1/3h
        testtype: structural
    """

    pwd = app_creds['password']
    command_set = ('ap', RETURN, '7', '1', '1', '2', 'y', pwd, TimedCommand(pwd, 360), RETURN)
    unconfigured_appliance.appliance_console.run_commands(command_set)
    wait_for(lambda: unconfigured_appliance.db.is_dedicated_active)
Exemplo n.º 5
0
    def delete_vm(self, vm_name):
        self.wait_vm_steady(vm_name)
        if not self.is_vm_stopped(vm_name):
            self.stop_vm(vm_name)
        self.logger.debug(' Deleting RHEV VM %s' % vm_name)

        def _do_delete():
            """Returns True if you have to retry"""
            if not self.does_vm_exist(vm_name):
                return False
            try:
                vm = self._get_vm(vm_name)
                vm.delete()
            except RequestError as e:
                # Handle some states that can occur and can be circumvented
                if e.status == 409 and "Related operation" in e.detail:
                    self.logger.info("Waiting for RHEV: {}:{} ({})".format(
                        e.status, e.reason, e.detail))
                    return True
                else:
                    raise  # Raise other so we can see them and eventually add them into handling
                # TODO: handle 400 - but I haven't seen the error message, it was empty.
            else:
                return False

        wait_for(_do_delete, fail_condition=True, num_sec=600, delay=15, message="execute delete")

        wait_for(
            lambda: self.does_vm_exist(vm_name),
            fail_condition=True,
            message="wait for RHEV VM %s deleted" % vm_name,
            num_sec=300
        )
        return True
Exemplo n.º 6
0
def client():
    url = 'http://localhost:8088/v1-catalog/schemas'
    templates = cattle.from_env(url=url).list_template(catalogId='qa-catalog')
    wait_for(
        lambda: len(templates) > 0
    )
    return cattle.from_env(url=url)
Exemplo n.º 7
0
def test_appliance_console_ipa(ipa_crud, configured_appliance):
    """ Commands:
    1. 'ap' launches appliance_console,
    2. RETURN clears info screen,
    3. '12' setup IPA, + wait 40 secs,
    4. RETURN finish.

    Polarion:
        assignee: mnadeem
        caseimportance: high
        casecomponent: Auth
        initialEstimate: 1/4h
    """

    command_set = ('ap', RETURN, '12', ipa_crud.host1, ipa_crud.ipadomain, ipa_crud.iparealm,
                   ipa_crud.ipaprincipal, ipa_crud.bind_password, TimedCommand('y', 60),
                   RETURN, RETURN)
    configured_appliance.appliance_console.run_commands(command_set, timeout=20)
    configured_appliance.sssd.wait_for_running()
    assert configured_appliance.ssh_client.run_command("cat /etc/ipa/default.conf |"
                                                       "grep 'enable_ra = True'")

    # Unconfigure to cleanup
    # When setup_ipa option selected, will prompt to unconfigure, then to proceed with new config
    command_set = ('ap', RETURN, '12', TimedCommand('y', 40), TimedCommand('n', 5), RETURN, RETURN)
    configured_appliance.appliance_console.run_commands(command_set)
    wait_for(lambda: not configured_appliance.sssd.running)
Exemplo n.º 8
0
 def restart_vm(self, instance_name):
     self.logger.info("Restarting Google Cloud instance {}".format(instance_name))
     operation = self._instances.reset(
         project=self._project, zone=self._zone, instance=instance_name).execute()
     wait_for(lambda: self._nested_operation_wait(operation['name']),
         message="Restart {}".format(instance_name))
     return True
Exemplo n.º 9
0
def test_appliance_console_dhcp(unconfigured_appliance, soft_assert):
    """ Commands:
    1. 'ap' launches appliance_console,
    2. RETURN clears info screen,
    3. '1' configure network,
    4. '1' configure DHCP,
    5. 'y' confirm IPv4 configuration,
    6. 'y' IPv6 configuration.

    Polarion:
        assignee: mnadeem
        casecomponent: Appliance
        caseimportance: critical
        initialEstimate: 1/6h
    """
    command_set = ('ap', RETURN, '1', '1', 'y', TimedCommand('y', 90), RETURN, RETURN)
    unconfigured_appliance.appliance_console.run_commands(command_set)

    def appliance_is_connective():
        unconfigured_appliance.ssh_client.run_command("true")
    wait_for(appliance_is_connective, handle_exception=True, delay=1, timeout=30)

    soft_assert(unconfigured_appliance.ssh_client.run_command(
        r"ip a show dev eth0 | grep 'inet\s.*dynamic'").success)
    soft_assert(unconfigured_appliance.ssh_client.run_command(
        r"ip a show dev eth0 | grep 'inet6\s.*dynamic'").success)
Exemplo n.º 10
0
def test_appliance_console_static_ipv4(unconfigured_appliance, soft_assert):
    """ Commands:
    1. 'ap' launches appliance_console,
    2. RETURN clears info screen,
    3. '1' configure network,
    4. '2' configure static IPv4,
    5. RETURN confirm default IPv4 addr,
    6. RETURN confirm default netmask,
    7. RETURN confirm default gateway,
    8. RETURN confirm default primary DNS,
    9. RETURN confirm default secondary DNS,
    10. RETURN confirm default search order,
    11. 'y' apply static configuration.

    Polarion:
        assignee: mnadeem
        casecomponent: Appliance
        caseimportance: critical
        initialEstimate: 1/6h
    """
    command_set = ('ap', RETURN, '1', '2', RETURN, RETURN, RETURN, RETURN, RETURN, RETURN, 'y')
    unconfigured_appliance.appliance_console.run_commands(command_set)

    def appliance_is_connective():
        unconfigured_appliance.ssh_client.run_command("true")
    wait_for(appliance_is_connective, handle_exception=True, delay=1, timeout=30)

    soft_assert(unconfigured_appliance.ssh_client.run_command(
        "ip -4 a show dev eth0 | grep 'inet .*scope global eth0'"))
    soft_assert(unconfigured_appliance.ssh_client.run_command(
        "ip -4 r show dev eth0 | grep 'default via'"))
Exemplo n.º 11
0
def test_appliance_console_static_ipv6(unconfigured_appliance, soft_assert):
    """ Commands:
    1. 'ap' launches appliance_console,
    2. RETURN clears info screen,
    3. '1' configure network,
    4. '3' configure static IPv6,
    5. '1::1' set IPv4 addr,
    6. RETURN set deafault prefix length,
    7. '1::f' set IPv6 gateway,
    8. RETURN confirm default primary DNS,
    9. RETURN confirm default secondary DNS,
    10. RETURN confirm default search order,
    11. 'y' apply static configuration.

    Polarion:
        assignee: mnadeem
        caseimportance: high
        casecomponent: Appliance
        initialEstimate: 1/4h
    """
    command_set = ('ap', RETURN, '1', '3', '1::1', RETURN, '1::f', RETURN, RETURN, RETURN, 'y', '')
    unconfigured_appliance.appliance_console.run_commands(command_set, timeout=30)

    def appliance_is_connective():
        unconfigured_appliance.ssh_client.run_command("true")
    wait_for(appliance_is_connective, handle_exception=True, delay=1, timeout=30)

    soft_assert(unconfigured_appliance.ssh_client.run_command(
        "ip -6 a show dev eth0 | grep 'inet6 1::1.*scope global'"))
    soft_assert(unconfigured_appliance.ssh_client.run_command(
        "ip -6 r show dev eth0 | grep 'default via 1::f'"))
Exemplo n.º 12
0
def test_appliance_console_set_hostname(configured_appliance):
    """ Commands:
    1. 'ap' launch appliance_console,
    2. RETURN clear info screen,
    3. '1' loads network settings,
    4. '5' gives access to set hostname,
    5. 'hostname' sets new hostname.

    Polarion:
        assignee: mnadeem
        caseimportance: high
        casecomponent: Appliance
        initialEstimate: 1/6h
    """

    hostname = 'test.example.com'
    command_set = ('ap', RETURN, '1', '5', hostname, RETURN, RETURN)
    configured_appliance.appliance_console.run_commands(command_set, timeout=30)

    def is_hostname_set(appliance):
        return appliance.ssh_client.run_command("hostname -f | grep {hostname}"
                                                .format(hostname=hostname)).success
    wait_for(is_hostname_set, func_args=[configured_appliance])
    result = configured_appliance.ssh_client.run_command("hostname -f")
    assert result.success
    assert result.output.strip() == hostname
def test_appliance_console_cli_datetime(temp_appliance_preconfig_funcscope):
    """Grab fresh appliance and set time and date through appliance_console_cli and check result"""
    app = temp_appliance_preconfig_funcscope
    app.ssh_client.run_command("appliance_console_cli --datetime 2020-10-20T09:59:00")

    def date_changed():
        return app.ssh_client.run_command("date +%F-%T | grep 2020-10-20-10:00").success
    wait_for(date_changed)
Exemplo n.º 14
0
 def _wait_template_ok(self, template_name):
     try:
         wait_for(
             lambda:
             self.api.templates.get(name=template_name).get_status().state == "ok",
             num_sec=30 * 60, message="template is OK", delay=45)
     except AttributeError:  # .get() returns None when template not found
         raise VMInstanceNotFound("Template {} not found!".format(template_name))
Exemplo n.º 15
0
    def suspend_vm(self, instance_name):
        self.logger.info(" Suspending OpenStack instance %s" % instance_name)
        if self.is_vm_suspended(instance_name):
            return True

        instance = self._find_instance_by_name(instance_name)
        instance.suspend()
        wait_for(lambda: self.is_vm_suspended(instance_name), message="suspend %s" % instance_name)
Exemplo n.º 16
0
    def pause_vm(self, instance_name):
        self.logger.info(" Pausing OpenStack instance %s" % instance_name)
        if self.is_vm_paused(instance_name):
            return True

        instance = self._find_instance_by_name(instance_name)
        instance.pause()
        wait_for(lambda: self.is_vm_paused(instance_name), message="pause %s" % instance_name)
Exemplo n.º 17
0
def test_db_migrate_replication(temp_appliance_remote, dbversion, temp_appliance_global_region):
    """
    Polarion:
        assignee: jhenner
        initialEstimate: 1/4h
        casecomponent: Appliance
    """
    app = temp_appliance_remote
    app2 = temp_appliance_global_region
    # Download the database
    logger.info("Downloading database: {}".format(dbversion))
    db_url = cfme_data['db_backups'][dbversion]['url']
    url_basename = os_path.basename(db_url)
    result = app.ssh_client.run_command(
        'curl -o "/tmp/{}" "{}"'.format(url_basename, db_url), timeout=30)
    assert result.success, "Failed to download database: {}".format(result.output)
    # The v2_key is potentially here
    v2key_url = os_path.join(os_path.dirname(db_url), "v2_key")
    # Stop EVM service and drop vmdb_production DB
    app.evmserverd.stop()
    app.db.drop()
    app.db.create()
    # restore new DB and migrate it
    result = app.ssh_client.run_command(
        'pg_restore -v --dbname=vmdb_production /tmp/{}'.format(url_basename), timeout=600)
    assert result.success, "Failed to restore new database: {}".format(result.output)
    app.db.migrate()
    # fetch v2_key
    try:
        result = app.ssh_client.run_command(
            'curl "{}"'.format(v2key_url), timeout=15)
        assert result.success, "Failed to download v2_key: {}".format(result.output)
        assert ":key:" in result.output, "Not a v2_key file: {}".format(result.output)
        result = app.ssh_client.run_command(
            'curl -o "/var/www/miq/vmdb/certs/v2_key" "{}"'.format(v2key_url), timeout=15)
        assert result.success, "Failed to download v2_key: {}".format(result.output)
    # or change all invalid (now unavailable) passwords to 'invalid'
    except AssertionError:
        app.db.fix_auth_key()
    app.db.fix_auth_dbyml()
    # start evmserverd, wait for web UI to start and try to log in
    try:
        app.evmserverd.start()
    except ApplianceException:
        result = app.ssh_client.run_rake_command("evm:start")
        assert result.success, "Couldn't start evmserverd: {}".format(result.output)
    app.wait_for_web_ui(timeout=600)
    # Reset user's password, just in case (necessary for customer DBs)
    app.db.reset_user_pass()
    app.server.login(app.user)

    app.set_pglogical_replication(replication_type=':remote')
    app2.set_pglogical_replication(replication_type=':global')
    app2.add_pglogical_replication_subscription(app.hostname)

    def is_provider_replicated(app, app2):
        return set(app.managed_provider_names) == set(app2.managed_provider_names)
    wait_for(is_provider_replicated, func_args=[app, app2], timeout=30)
Exemplo n.º 18
0
def test_appliance_console_datetime(temp_appliance_preconfig_funcscope):
    """Grab fresh appliance and set time and date through appliance_console and check result"""
    app = temp_appliance_preconfig_funcscope
    command_set = ('ap', '', '3', 'y', '2020-10-20', '09:58:00', 'y', '')
    app.appliance_console.run_commands(command_set)

    def date_changed():
        return app.ssh_client.run_command("date +%F-%T | grep 2020-10-20-10:00").success
    wait_for(date_changed)
Exemplo n.º 19
0
    def stop_vm(self, instance_name):
        self.logger.info(" Stopping OpenStack instance %s" % instance_name)
        if self.is_vm_stopped(instance_name):
            return True

        instance = self._find_instance_by_name(instance_name)
        instance.stop()
        wait_for(lambda: self.is_vm_stopped(instance_name), message="stop %s" % instance_name)
        return True
Exemplo n.º 20
0
def test_appliance_console_extend_storage(unconfigured_appliance):
    """'ap' launches appliance_console, '' clears info screen, '9' extend storage, '1' select
    disk, 'y' confirm configuration and '' complete."""
    command_set = ('ap', '', '9', '1', 'y', '')
    unconfigured_appliance.appliance_console.run_commands(command_set)

    def is_storage_extended():
        assert unconfigured_appliance.ssh_client.run_command("df -h | grep /var/www/miq_tmp")
    wait_for(is_storage_extended)
Exemplo n.º 21
0
def test_appliance_console_dedicated_db(unconfigured_appliance, app_creds):
    """'ap' launch appliance_console, '' clear info screen, '5' setup db, '1' Creates v2_key,
    '1' selects internal db, 'y' continue, '1' use partition, 'y' create dedicated db, 'pwd'
    db password, 'pwd' confirm db password + wait 360 secs and '' finish."""

    pwd = app_creds['password']
    command_set = ('ap', '', '5', '1', '1', 'y', '1', 'y', pwd, TimedCommand(pwd, 360), '')
    unconfigured_appliance.appliance_console.run_commands(command_set)
    wait_for(lambda: unconfigured_appliance.db.is_dedicated_active)
Exemplo n.º 22
0
 def delete_volume(self, *ids, **kwargs):
     wait = kwargs.get("wait", True)
     timeout = kwargs.get("timeout", 180)
     for id in ids:
         self.capi.volumes.find(id=id).delete()
     if not wait:
         return
     # Wait for them
     wait_for(lambda: all(map(lambda id: not self.volume_exists(id), ids)), delay=0.5, num_sec=timeout)
Exemplo n.º 23
0
 def wait_for_connect(self, timeout=30):
     """Wait for as long as the specified/default timeout for the console to be connected."""
     try:
         logger.info('Waiting for console connection (timeout={})'.format(timeout))
         wait_for(func=lambda: self.is_connected(),
                  delay=1, handle_exceptions=True,
                  num_sec=timeout)
         return True
     except TimedOutError:
         return False
Exemplo n.º 24
0
def test_black_console_dedicated_db(temp_appliance_unconfig_funcscope, app_creds):
    """'ap' launch appliance_console, '' clear info screen, '5/8' setup db, '1' Creates v2_key,
    '1' selects internal db, 'y' continue, '1' use partition, 'y' create dedicated db, 'pwd'
    db password, 'pwd' confirm db password + wait 360 secs and '' finish."""

    pwd = app_creds['password']
    opt = '5' if temp_appliance_unconfig_funcscope.version >= "5.8" else '8'
    command_set = ('ap', '', opt, '1', '1', 'y', '1', 'y', pwd, TimedCommand(pwd, 360), '')
    temp_appliance_unconfig_funcscope.appliance_console.run_commands(command_set)
    wait_for(lambda: temp_appliance_unconfig_funcscope.db.is_dedicated_active)
def test_appliance_console_cli_db_maintenance_hourly(appliance_with_preset_time):
    """Test database hourly re-indexing through appliance console"""
    app = appliance_with_preset_time
    app.ssh_client.run_command("appliance_console_cli --db-hourly-maintenance")

    def maintenance_run():
        return app.ssh_client.run_command(
            "grep REINDEX /var/www/miq/vmdb/log/hourly_continuous_pg_maint_stdout.log").success

    wait_for(maintenance_run, timeout=300)
Exemplo n.º 26
0
 def delete_template(self, template_name):
     template = self.api.templates.get(name=template_name)
     if template is None:
         self.logger.info(
             " Template {} is already not present on the RHEV-M provider".format(template_name))
         return
     self._wait_template_ok(template_name)
     template.delete()
     wait_for(
         lambda: not self.does_template_exist(template_name),
         num_sec=15 * 60, delay=20)
Exemplo n.º 27
0
 def delete_vm(self, instance_name, delete_fip=True):
     self.logger.info(" Deleting OpenStack instance {}".format(instance_name))
     instance = self._find_instance_by_name(instance_name)
     if delete_fip:
         self.unassign_and_delete_floating_ip(instance)
     else:
         self.unassign_floating_ip(instance)
     self.logger.info(" Deleting OpenStack instance {} in progress now.".format(instance_name))
     instance.delete()
     wait_for(lambda: not self.does_vm_exist(instance_name), timeout="3m", delay=5)
     return True
Exemplo n.º 28
0
def test_black_console_extend_storage(fqdn_appliance):
    """'ap' launches appliance_console, '' clears info screen, '10/13' extend storage, '1' select
    disk, 'y' confirm configuration and '' complete."""

    opt = '10' if fqdn_appliance.version >= "5.8" else '13'
    command_set = ('ap', '', opt, '1', 'y', '')
    fqdn_appliance.appliance_console.run_commands(command_set)

    def is_storage_extended(fqdn_appliance):
        assert fqdn_appliance.ssh_client.run_command("df -h | grep /var/www/miq_tmp")
    wait_for(is_storage_extended, func_args=[fqdn_appliance])
Exemplo n.º 29
0
    def delete_vm(self, instance_name, timeout=180):
        if not self.does_vm_exist(instance_name):
            self.logger.info("The {} instance is not exists, skipping".format(instance_name))
            return True

        self.logger.info("Deleting Google Cloud instance {}".format(instance_name))
        operation = self._instances.delete(
            project=self._project, zone=self._zone, instance=instance_name).execute()
        wait_for(lambda: self._nested_operation_wait(operation['name']), delay=0.5,
            num_sec=timeout, message="Delete {}".format(instance_name))
        return True
Exemplo n.º 30
0
    def stop_vm(self, instance_name):
        if self.is_vm_stopped(instance_name) or not self.does_vm_exist(instance_name):
            self.logger.info("The {} instance is already stopped or doesn't exist, skip termination"
               .format(instance_name))
            return True

        self.logger.info("Stoping Google Cloud instance {}".format(instance_name))
        operation = self._instances.stop(
            project=self._project, zone=self._zone, instance=instance_name).execute()
        wait_for(lambda: self._nested_operation_wait(operation['name']),
            message="Stop {}".format(instance_name))
        return True
Exemplo n.º 31
0
    def test_positive_deploy_configure_by_id(self, form_data, virtwho_config):
        """ Verify "POST /foreman_virt_who_configure/api/v2/configs"

        :id: f5228e01-bb8d-4c8e-877e-cd8bc494f00e

        :expectedresults: Config can be created and deployed

        :CaseLevel: Integration

        :CaseImportance: High
        """
        assert virtwho_config.status == 'unknown'
        command = get_configure_command(virtwho_config.id)
        hypervisor_name, guest_name = deploy_configure_by_command(
            command, form_data['hypervisor_type'], debug=True
        )
        virt_who_instance = (
            entities.VirtWhoConfig()
            .search(query={'search': f'name={virtwho_config.name}'})[0]
            .status
        )
        assert virt_who_instance == 'ok'
        hosts = [
            (hypervisor_name, f'product_id={virtwho.sku.vdc_physical} and type=NORMAL',),
            (guest_name, f'product_id={virtwho.sku.vdc_physical} and type=STACK_DERIVED',),
        ]
        for hostname, sku in hosts:
            if 'type=NORMAL' in sku:
                subscriptions = entities.Subscription().search(query={'search': sku})
                vdc_id = subscriptions[0].id
            if 'type=STACK_DERIVED' in sku:
                vdc_id = self._get_guest_bonus(hypervisor_name, sku)
            host, time = wait_for(
                entities.Host().search,
                func_args=(None, {'search': hostname}),
                fail_condition=[],
                timeout=5,
                delay=1,
            )
            entities.HostSubscription(host=host[0].id).add_subscriptions(
                data={'subscriptions': [{'id': vdc_id, 'quantity': 1}]}
            )
            result = entities.Host().search(query={'search': hostname})[0].read_json()
            assert result['subscription_status_label'] == 'Fully entitled'
        virtwho_config.delete()
        assert not entities.VirtWhoConfig().search(query={'search': f"name={form_data['name']}"})
Exemplo n.º 32
0
    def wait_for_element(
            self, locator, parent=None, visible=False, timeout=5, delay=0.2, exception=True,
            ensure_page_safe=False):
        """Wait for presence or visibility of elements specified by a locator.

        Args:
            locator, parent: Arguments for :py:meth:`elements`
            visible: If False, then it only checks presence not considering visibility. If True, it
                     also checks visibility.
            timeout: How long to wait for.
            delay: How often to check.
            exception: If True (default), in case of element not being found an exception will be
                       raised. If False, it returns None.
            ensure_page_safe: Whether to call the ``ensure_page_safe`` hook on repeat.

        Returns:
            :py:class:`selenium.webdriver.remote.webelement.WebElement` if element found according
            to params. ``None`` if not found and ``exception=False``.

        Raises:
            :py:class:`selenium.common.exceptions.NoSuchElementException` if element not found.
        """
        def _element_lookup():
            try:
                return self.elements(locator,
                                     parent=parent,
                                     check_visibility=visible,
                                     check_safe=ensure_page_safe)
            # allow other exceptions through to caller on first wait
            except NoSuchElementException:
                return False
        # turn the timeout into NoSuchElement
        try:
            result = wait_for(_element_lookup,
                              num_sec=timeout,
                              delay=delay,
                              fail_condition=lambda elements: not bool(elements),
                              fail_func=self.plugin.ensure_page_safe if ensure_page_safe else None)
        except TimedOutError:
            if exception:
                raise NoSuchElementException('Failed waiting for element with {} in {}'
                                             .format(locator, parent))
            else:
                return None
        # wait_for returns NamedTuple, return first item from 'out', the WebElement
        return result.out[0]
Exemplo n.º 33
0
    def wait_persistent_volume_claim_exist(self, namespace, name, wait=30):
        """Checks whether Persistent Volume Claim exists within some time.

        Args:
            name: entity name
            namespace: openshift namespace where entity should exist
            wait: entity should appear for this time then - True, otherwise False
        Return: True/False
        """
        return wait_for(self._does_exist,
                        num_sec=wait,
                        func_kwargs={
                            'func':
                            self.k_api.read_namespaced_persistent_volume_claim,
                            'name': name,
                            'namespace': namespace
                        })[0]
Exemplo n.º 34
0
    def wait_stateful_set_exist(self, namespace, name, wait=600):
        """Checks whether StatefulSet exists within some time.

        Args:
            name: entity name
            namespace: openshift namespace where entity should exist
            wait: entity should appear for this time then - True, otherwise False
        Return: True/False
        """
        read_st = self.kclient.AppsV1beta1Api().read_namespaced_stateful_set
        return wait_for(self._does_exist,
                        num_sec=wait,
                        func_kwargs={
                            'func': read_st,
                            'name': name,
                            'namespace': namespace
                        })[0]
Exemplo n.º 35
0
    def save_downloaded_file(self, file_uri=None, save_path=None):
        """Save local or remote browser's automatically downloaded file to
        specified local path. Useful when you don't know exact file name or
        path where file was downloaded or you're using remote driver with no
        access to worker's filesystem (e.g. saucelabs).

        Usage example::

            view.widget_which_triggers_file_download.click()
            path = self.browser.save_downloaded_file()
            with open(file_path, newline='') as csvfile:
                reader = csv.DictReader(csvfile)
                for row in reader:
                    # process file contents


        :param str optional file_uri: URI of file. If not specified - browser's
            latest downloaded file will be selected
        :param str optional save_path: local path where the file should be
            saved. If not specified - ``temp_dir`` from airgun settings will be
            used in case of remote session or just path to saved file in case
            local one.
        """
        files, _ = wait_for(
            self.browser.get_downloads_list,
            timeout=60,
            delay=1,
        )
        if not file_uri:
            file_uri = files[0]
        if (not save_path and settings.selenium.browser == 'selenium'):
            # if test is running locally, there's no need to save the file once
            # again except when explicitly asked to
            file_path = urllib.parse.unquote(
                urllib.parse.urlparse(file_uri).path)
        else:
            if not save_path:
                save_path = settings.airgun.tmp_dir
            content = self.get_file_content(file_uri)
            filename = urllib.parse.unquote(os.path.basename(file_uri))
            with open(os.path.join(save_path, filename), 'wb') as f:
                f.write(content)
            file_path = os.path.join(save_path, filename)
        self.selenium.back()
        self.plugin.ensure_page_safe()
        return file_path
Exemplo n.º 36
0
    def get_ip_address(self, vm_name, timeout=600):
        """ Returns the first IP address for the selected VM.

        Args:
            vm_name: The name of the vm to obtain the IP for.
            timeout: The IP address wait timeout.
        Returns: A string containing the first found IP that isn't the loopback device.
        """
        try:
            ip_address, tc = wait_for(lambda: self.current_ip_address(vm_name),
                                      fail_condition=None,
                                      delay=5,
                                      num_sec=timeout,
                                      message="get_ip_address from vsphere")
        except TimedOutError:
            ip_address = None
        return ip_address
Exemplo n.º 37
0
    def wait_deployment_config_exist(self, namespace, name, wait=600):
        """Checks whether Deployment Config exists within some time.

        Args:
            name: entity name
            namespace: openshift namespace where entity should exist
            wait: entity should appear for this time then - True, otherwise False
        Return: True/False
        """
        read_dc = self.o_api.read_namespaced_deployment_config
        return wait_for(self._does_exist,
                        num_sec=wait,
                        func_kwargs={
                            'func': read_dc,
                            'name': name,
                            'namespace': namespace
                        })[0]
Exemplo n.º 38
0
    def wait_for_element(self,
                         locator,
                         parent=None,
                         visible=False,
                         timeout=5,
                         delay=0.2,
                         exception=True,
                         ensure_page_safe=False):
        """Wait for presence or visibility of elements specified by a locator.

        Args:
            locator, parent: Arguments for :py:meth:`elements`
            visible: If False, then it only checks presence not considering visibility. If True, it
                     also checks visibility.
            timeout: How long to wait for.
            delay: How often to check.
            exception: If True (default), in case of element not being found an exception will be
                       raised. If False, it returns False.
            ensure_page_safe: Whether to call the ``ensure_page_safe`` hook on repeat.

        Returns:
            :py:class:`selenium.webdriver.remote.webelement.WebElement` if element found according
            to params. ``None`` if not found and ``exception=False``.

        Raises:
            :py:class:`selenium.common.exceptions.NoSuchElementException` if element not found and
            ``exception=True``.
        """
        try:
            result = wait_for(
                lambda: self.elements(
                    locator, parent=parent, check_visibility=visible),
                num_sec=timeout,
                delay=delay,
                fail_condition=lambda elements: not bool(elements),
                fail_func=self.plugin.ensure_page_safe
                if ensure_page_safe else None)
        except TimedOutError:
            if exception:
                raise NoSuchElementException(
                    'Could not wait for element {!r}'.format(locator))
            else:
                return None
        else:
            return result.out[0]
Exemplo n.º 39
0
    def wait_vm_steady(self, vm_name):
        """Waits 2 (or user-specified time) minutes for VM to settle in steady state

        Args:
            vm_name: VM name
        """
        try:
            return wait_for(
                lambda: self.in_steady_state(vm_name),
                num_sec=self.STEADY_WAIT_MINS * 60,
                delay=2,
                message="VM %s in steady state" % vm_name
            )
        except TimedOutError:
            self.logger.exception(
                "VM {} got stuck in {} state when waiting for steady state.".format(
                    vm_name, self.vm_status(vm_name)))
            raise
Exemplo n.º 40
0
def sync_all():
    api = rest_api()

    def _refresh():
        try:
            # pylint: disable=no-member
            response = api.sync_all()
        except exceptions.ClientError as err:
            if 'Another sync task already in progress' in err.response.body[
                    'msg']:
                return False
            raise
        return response

    response, __ = wait_for(_refresh, num_sec=10)
    response.response_check()
    response, = response
    assert 'sync task started' in response.msg
Exemplo n.º 41
0
    def wait_for_steady_state(self, timeout=None, delay=5):
        """
        Waits for the system's steady_wait_time for VM to reach a steady state

        Args:
            num_sec: Time to wait to override default steady_wait_time
        """
        try:
            return wait_for(
                lambda: self.in_steady_state,
                timeout=timeout if timeout else self.system.steady_wait_time,
                delay=delay,
                message="VM/Instance '{}' in steady state".format(self._log_id)
            )
        except TimedOutError:
            self.logger.exception(
                "VM %s stuck in '%s' while waiting for steady state.", self._log_id, self.state)
            raise
Exemplo n.º 42
0
    def wait_for_element(
            self, locator, parent=None, visible=False, timeout=5, delay=0.2,
            exception=True, ensure_page_safe=False):
        """Wait for presence or visibility of elements specified by a locator.
        todo: wrap default wait_for_element method once #132 issue is fixed in widgetastic.core

        :param locator: Elements locator
        :param parent: Elements parent locator
        :param visible: If False, then it only checks presence not considering visibility.
            If True, it also checks visibility.
        :param timeout: How long to wait for.
        :param delay: How often to check.
        :param exception: If True (default), in case of element not being found an exception
            will be raised. If False, it returns False.
        :param ensure_page_safe: Whether to call the ``ensure_page_safe`` hook on repeat.
        :returns: WebElement if element found according to params.
        :raises: NoSuchElementException: if element not found.
        """
        def _element_lookup():
            try:
                return self.elements(locator,
                                     parent=parent,
                                     check_visibility=visible,
                                     check_safe=ensure_page_safe)
            # allow other exceptions through to caller on first wait
            except NoSuchElementException:
                return False
        # turn the timeout into NoSuchElement
        try:
            result = wait_for(
                _element_lookup,
                num_sec=timeout,
                delay=delay,
                fail_condition=lambda elements: not bool(elements),
                fail_func=self.plugin.ensure_page_safe if ensure_page_safe else None
            )
        except TimedOutError:
            if exception:
                raise NoSuchElementException(
                    'Failed waiting for element with {} in {}'.format(locator, parent))
            else:
                return None
        # wait_for returns NamedTuple, return first item from 'out', the WebElement
        return result.out[0]
Exemplo n.º 43
0
def test_positive_schedule_entitlements_report(setup_content, default_sat):
    """Schedule a report using the Subscription - Entitlement Report template.

    :id: 5152c518-b0da-4c27-8268-2be78289249f

    :setup: Installed Satellite with Organization, Activation key,
            Content View, Content Host, and Subscriptions.

    :steps:

        1. POST /api/report_templates/130-Subscription - Entitlement Report/schedule_report/

    :expectedresults: Report is scheduled and contains all necessary
                      information for entitlements.

    :CaseImportance: High
    """
    with VMBroker(nick='rhel7', host_classes={'host': ContentHost}) as vm:
        ak, org = setup_content
        vm.install_katello_ca(default_sat)
        vm.register_contenthost(org.label, ak.name)
        assert vm.subscribed
        rt = (
            entities.ReportTemplate()
            .search(query={'search': 'name="Subscription - Entitlement Report"'})[0]
            .read()
        )
        scheduled_csv = rt.schedule_report(
            data={
                'id': f'{rt.id}-Subscription - Entitlement Report',
                'organization_id': org.id,
                'report_format': 'csv',
                "input_values": {"Days from Now": "no limit"},
            }
        )
        data_csv, _ = wait_for(
            rt.report_data,
            func_kwargs={'data': {'id': rt.id, 'job_id': scheduled_csv['job_id']}},
            fail_condition=None,
            timeout=300,
            delay=10,
        )
        assert vm.hostname in data_csv
        assert DEFAULT_SUBSCRIPTION_NAME in data_csv
Exemplo n.º 44
0
    def create(cls, options=None, timeout=None):
        """
        Creates a new record using the arguments passed via dictionary.
        """

        cls.command_sub = 'create'

        if options is None:
            options = {}

        result = cls.execute(cls._construct_command(options),
                             output_format='csv',
                             timeout=timeout)

        # Extract new object ID if it was successfully created
        if len(result) > 0 and 'id' in result[0]:
            obj_id = result[0]['id']

            # Fetch new object
            # Some Katello obj require the organization-id for subcommands
            info_options = {'id': obj_id}
            if cls.command_requires_org:
                if 'organization-id' not in options:
                    tmpl = 'organization-id option is required for {0}.create'
                    raise CLIError(tmpl.format(cls.__name__))
                info_options['organization-id'] = options['organization-id']

            # organization creation can take some time
            if cls.command_base == 'organization':
                new_obj, _ = wait_for(
                    lambda: cls.info(info_options),
                    timeout=300,
                    delay=5,
                    silent_failure=True,
                    handle_exception=True,
                )
            else:
                new_obj = cls.info(info_options)

            # stdout should be a dictionary containing the object
            if len(new_obj) > 0:
                result = new_obj

        return result
Exemplo n.º 45
0
def test_appliance_console_cli_ha_crud(unconfigured_appliances, app_creds):
    """Tests the configuration of HA with three appliances including failover to standby node

    Polarion:
        assignee: sbulage
        caseimportance: high
        casecomponent: appl
        initialEstimate: 1h
    """
    apps = unconfigured_appliances
    app0_ip = apps[0].hostname
    app1_ip = apps[1].hostname
    # Configure primary database
    apps[0].appliance_console_cli.configure_appliance_dedicated_db(
        app_creds['username'], app_creds['password'], 'vmdb_production',
        apps[0].unpartitioned_disks[0]
    )
    wait_for(lambda: apps[0].db.is_dedicated_active)
    # Configure webui access on EVM appliance
    apps[2].appliance_console_cli.configure_appliance_external_create(1,
        app0_ip, app_creds['username'], app_creds['password'], 'vmdb_production', app0_ip,
        app_creds['sshlogin'], app_creds['sshpass'])
    apps[2].evmserverd.wait_for_running()
    apps[2].wait_for_web_ui()
    # Configure primary node
    apps[0].appliance_console_cli.configure_appliance_dedicated_ha_primary(
        app_creds['username'], app_creds['password'], 'primary', app0_ip, '1', 'vmdb_production'
    )
    # Configure standby node
    apps[1].appliance_console_cli.configure_appliance_dedicated_ha_standby(
        app_creds['username'], app_creds['password'], 'standby', app0_ip, app1_ip, '2',
        'vmdb_production', apps[1].unpartitioned_disks[0]
    )
    # Configure automatic failover on EVM appliance
    command_set = ('ap', '', '8', '1', '')
    apps[2].appliance_console.run_commands(command_set)

    def is_ha_monitor_started(appliance):
        return appliance.ssh_client.run_command(
            "grep {} /var/www/miq/vmdb/config/failover_databases.yml".format(app1_ip)).success
    wait_for(is_ha_monitor_started, func_args=[apps[2]], timeout=300, handle_exception=True)
    # Cause failover to occur
    result = apps[0].ssh_client.run_command('systemctl stop $APPLIANCE_PG_SERVICE', timeout=15)
    assert result.success, "Failed to stop APPLIANCE_PG_SERVICE: {}".format(result.output)

    def is_failover_started(appliance):
        return appliance.ssh_client.run_command(
            "grep 'Starting to execute failover' /var/www/miq/vmdb/log/ha_admin.log").success
    wait_for(is_failover_started, func_args=[apps[2]], timeout=450, handle_exception=True)
    apps[2].evmserverd.wait_for_running()
    apps[2].wait_for_web_ui()
Exemplo n.º 46
0
 def test_project_create(self, provider, gen_project):
     if MOCKED:
         provider.api.post.return_value = [
             201, {
                 "apiVersion": "v1",
                 "kind": "Project",
                 "metadata": {
                     "name": gen_project.name
                 }
             }
         ]
         provider.api.get.return_value = [200, {}]
     gen_project.create()
     assert wait_for(
         lambda: gen_project.exists(),
         message="Waiting for project {} to be created...".format(
             gen_project.name),
         delay=5,
         timeout='1M')
Exemplo n.º 47
0
 def run_analysis(self):
     """Run analysis"""
     view = navigate_to(self, "AnalysisResultsPage")
     wait_for(lambda: view.run_analysis_button.is_displayed,
              delay=5,
              timeout=30)
     view.run_analysis_button.click()
     wait_for(lambda: view.analysis_results.in_progress(),
              delay=0.2,
              timeout=450)
     wait_for(lambda: view.analysis_results.is_analysis_complete(),
              delay=0.2,
              timeout=450)
     assert view.analysis_results.is_analysis_complete()
Exemplo n.º 48
0
    def wait_for_text(self, timeout=45, text_to_find="", to_disappear=False):
        """Wait for as long as the specified/default timeout for the 'text' to show up on screen.

        Args:
            timeout: Wait Time before wait_for function times out.
            text_to_find: value passed to find_text_on_screen function
            to_disappear: if set to True, function will wait for text_to_find to disappear
                          from screen.
        """
        if not text_to_find:
            return None
        try:
            if to_disappear:
                logger.info("Waiting for {} to disappear from screen".format(text_to_find))
            result = wait_for(func=lambda: to_disappear != self.find_text_on_screen(text_to_find),
                     delay=5,
                     num_sec=timeout)
            return result.out
        except TimedOutError:
            return None
Exemplo n.º 49
0
def base__test_label_delete(resource, label_key):
    if MOCKED:
        resource.provider.api.patch.return_value = \
            resource.provider.o_api.patch.return_value = [200, {}]
        resource.provider.api.get.return_value = \
            resource.provider.o_api.get.return_value = [200, {
                'metadata': {'labels': {label_key: 'doesntmatter'}}}]
    res = resource.delete_label(label_key)
    assert res[0] == 200
    if MOCKED:
        resource.provider.api.get.return_value = \
            resource.provider.o_api.get.return_value = [200, {
                'metadata': {'labels': {}}}]
    assert wait_for(
        lambda: label_key not in resource.list_labels(),
        message="Waiting for label {} of {} {} to be deleted...".format(
            label_key,
            type(resource).__name__, resource.name),
        delay=5,
        timeout='1M').out
Exemplo n.º 50
0
def test_alternative_route(tree_mesh):
    nodeX = Node("nodeX",
                 connections=["node4", "node3"],
                 stats_enable=True,
                 stats_port=random_port())
    tree_mesh.add_node(nodeX)
    nodeX.start()
    wait_for(tree_mesh.validate_all_node_routes, delay=6, num_sec=30)
    assert nodeX.ping(1) != "Failed"
    wait_for(tree_mesh.validate_all_node_routes, delay=6, num_sec=30)
    assert "nodeX" in str(tree_mesh.nodes["controller"].get_routes())
    assert tree_mesh.validate_all_node_routes()
    tree_mesh.nodes["node3"].stop()
    time.sleep(7)
    wait_for(tree_mesh.validate_all_node_routes, num_sec=30)
    # TODO make ping return quicker if it can't ping then reenable to ensure node3 is dead
    # assert tree_mesh.nodes['node3'].ping() != "Failed"
    assert nodeX.ping(1) != "Failed"
    tree_mesh.nodes["node3"].start()
    wait_for(tree_mesh.validate_all_node_routes, num_sec=30)
    tree_mesh.nodes["node4"].stop()
    time.sleep(7)
    assert nodeX.ping(1) != "Failed"
    nodeX.stop()
Exemplo n.º 51
0
 def _wait_for_process_to_finish(self, name, has_manifest=False):
     """Helper ensuring that task (upload / delete manifest / subscription)
     has finished. Run after action invoking task to leave Satellite
     in usable state.
     Currently waits for three events. Since page is written asynchronously,
     they can happen in any order.
     :param name: Name of running task
     :param has_manifest: Should manifest exist after task ended?
     """
     view = SubscriptionListView(self.browser, logger=self.browser.logger)
     wait_for(lambda: view.flash.assert_message(
         "Task {} completed".format(name), partial=True),
              handle_exception=True,
              timeout=60 * 10,
              logger=view.flash.logger)
     wait_for(lambda: not view.progressbar.is_displayed,
              handle_exception=True,
              timeout=60 * 10,
              logger=view.progressbar.logger)
     wait_for(lambda: self.has_manifest == has_manifest,
              handle_exception=True,
              timeout=10,
              logger=view.logger)
     view.flash.dismiss()
Exemplo n.º 52
0
 def delete(self):
     self.raw.delete()
     wait_for(lambda: not self.exists, num_sec=120, delay=10)
Exemplo n.º 53
0
 def create_volume(self, size_gb, **kwargs):
     volume = self.capi.volumes.create(size_gb, **kwargs).id
     wait_for(lambda: self.capi.volumes.get(volume).status == "available",
              num_sec=60,
              delay=0.5)
     return volume
Exemplo n.º 54
0
    def add_disk(self, capacity_in_kb, provision_type=None, unit=None):
        """
        Create a disk on the given datastore (by name)

        Community Example used
        https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/add_disk_to_vm.py

        Return task type from Task.result or Task.error
        https://github.com/vmware/pyvmomi/blob/master/docs/vim/TaskInfo.rst

        Args:
            capacity_in_kb (int): capacity of the new drive in Kilobytes
            provision_type (string): 'thin' or 'thick', will default to thin if invalid option
            unit (int): The unit number of the disk to add, use to override existing disk. Will
                search for next available unit number by default

        Returns:
            (bool, task_result): Tuple containing boolean True if task ended in success,
                                 and the contents of task.result or task.error depending on state
        """
        provision_type = provision_type if provision_type in ['thick', 'thin'] else 'thin'
        self.refresh()

        # if passed unit matches existing device unit, match these values too
        key = None
        controller_key = None
        unit_number = None
        virtual_disk_devices = [
            device for device
            in self.raw.config.hardware.device if isinstance(device, vim.vm.device.VirtualDisk)]
        for dev in virtual_disk_devices:
            if unit == int(dev.unitNumber):
                # user specified unit matching existing disk, match key too
                key = dev.key
            unit_number = unit or int(dev.unitNumber) + 1
            if unit_number == 7:  # reserved
                unit_number += 1
            controller_key = dev.controllerKey

        if not (controller_key or unit_number):
            raise ValueError('Could not identify VirtualDisk device on given vm')

        # create disk backing specification
        backing_spec = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
        backing_spec.diskMode = 'persistent'
        backing_spec.thinProvisioned = (provision_type == 'thin')

        # create disk specification, attaching backing
        disk_spec = vim.vm.device.VirtualDisk()
        disk_spec.backing = backing_spec
        disk_spec.unitNumber = unit_number
        if key:  # only set when overriding existing disk
            disk_spec.key = key
        disk_spec.controllerKey = controller_key
        disk_spec.capacityInKB = capacity_in_kb

        # create device specification, attaching disk
        device_spec = vim.vm.device.VirtualDeviceSpec()
        device_spec.fileOperation = 'create'
        device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
        device_spec.device = disk_spec

        # create vm specification for device changes
        vm_spec = vim.vm.ConfigSpec()
        vm_spec.deviceChange = [device_spec]

        # start vm reconfigure task
        task = self.raw.ReconfigVM_Task(spec=vm_spec)

        try:
            wait_for(lambda: task.info.state not in ['running', 'queued'])
        except TimedOutError:
            self.logger.exception('Task did not go to success state: %s', task)
        finally:
            if task.info.state == 'success':
                result = (True, task.info.result)
            elif task.info.state == 'error':
                result = (False, task.info.error)
            else:  # shouldn't happen
                result = (None, None)
        return result
Exemplo n.º 55
0
def test_black_console_cli_configure_dedicated_db(unconfigured_appliance,
                                                  app_creds):
    unconfigured_appliance.appliance_console_cli.configure_appliance_dedicated_db(
        0, app_creds['username'], app_creds['password'], 'vmdb_production',
        unconfigured_appliance.unpartitioned_disks[0])
    wait_for(lambda: unconfigured_appliance.db.is_dedicated_active)
Exemplo n.º 56
0
    def _clone(self, destination, resourcepool=None, datastore=None, power_on=True,
               sparse=False, template=False, provision_timeout=1800, progress_callback=None,
               allowed_datastores=None, cpu=None, ram=None, **kwargs):
        """
        Clone this template to a VM

        Returns a VMWareVirtualMachine object
        """
        try:
            vm = self.system.get_vm(destination)
        except VMInstanceNotFound:
            vm = None
        if vm:
            raise Exception("VM/template of the name {} already present!".format(destination))

        if progress_callback is None:
            progress_callback = partial(
                self._progress_log_callback, self.logger, self.name, destination)

        source_template = self.raw

        vm_clone_spec = vim.VirtualMachineCloneSpec()
        vm_reloc_spec = vim.VirtualMachineRelocateSpec()
        # DATASTORE
        if isinstance(datastore, six.string_types):
            vm_reloc_spec.datastore = self.system.get_obj(vim.Datastore, name=datastore)
        elif isinstance(datastore, vim.Datastore):
            vm_reloc_spec.datastore = datastore
        elif datastore is None:
            if allowed_datastores is not None:
                # Pick a datastore by space
                vm_reloc_spec.datastore = self._pick_datastore(allowed_datastores)
            else:
                # Use the same datastore
                datastores = source_template.datastore
                if isinstance(datastores, (list, tuple)):
                    vm_reloc_spec.datastore = datastores[0]
                else:
                    vm_reloc_spec.datastore = datastores
        else:
            raise NotImplementedError("{} not supported for datastore".format(datastore))
        progress_callback("Picked datastore `{}`".format(vm_reloc_spec.datastore.name))

        # RESOURCE POOL
        if isinstance(resourcepool, vim.ResourcePool):
            vm_reloc_spec.pool = resourcepool
        else:
            vm_reloc_spec.pool = self._get_resource_pool(resourcepool)
        progress_callback("Picked resource pool `{}`".format(vm_reloc_spec.pool.name))

        vm_reloc_spec.host = None
        if sparse:
            vm_reloc_spec.transform = vim.VirtualMachineRelocateTransformation().sparse
        else:
            vm_reloc_spec.transform = vim.VirtualMachineRelocateTransformation().flat

        vm_clone_spec.powerOn = power_on
        vm_clone_spec.template = template
        vm_clone_spec.location = vm_reloc_spec
        vm_clone_spec.snapshot = None

        if cpu is not None:
            vm_clone_spec.config.numCPUs = int(cpu)
        if ram is not None:
            vm_clone_spec.config.memoryMB = int(ram)

        try:
            folder = source_template.parent.parent.vmParent
        except AttributeError:
            folder = source_template.parent
        progress_callback("Picked folder `{}`".format(folder.name))

        task = source_template.CloneVM_Task(folder=folder, name=destination, spec=vm_clone_spec)

        def _check(store=[task]):
            try:
                if hasattr(store[0].info, 'progress') and store[0].info.progress is not None:
                    progress_callback("{}/{}%".format(store[0].info.state, store[0].info.progress))
                else:
                    progress_callback("{}".format(store[0].info.state))
            except AttributeError:
                pass
            if store[0].info.state not in {"queued", "running"}:
                return True
            store[0] = self.system.get_updated_obj(store[0])
            return False

        wait_for(_check, num_sec=provision_timeout, delay=4)

        if task.info.state != 'success':
            self.logger.error(
                "Clone VM from VM/template '%s' failed: %s",
                self.name, get_task_error_message(task)
            )
            raise VMInstanceNotCloned(destination)
        if template:
            entity_cls = VMWareTemplate
        else:
            entity_cls = VMWareVirtualMachine
        return entity_cls(system=self.system, name=destination)
Exemplo n.º 57
0
 def wait_for_ok_status(self):
     wait_for(lambda: self.api.get().status == types.TemplateStatus.OK,
              num_sec=30 * 60,
              message="template is OK",
              delay=45)
Exemplo n.º 58
0
                            storage_domain_name,
                            cluster_name,
                            temp_template_name,
                            template_name,
                            async=True,
                            import_as_template=True):
        image_service = self._get_image_service(storage_domain_name,
                                                template_name)
        image_service.import_(
            async=async,
            import_as_template=import_as_template,
            template=types.Template(name=temp_template_name),
            cluster=types.Cluster(name=cluster_name),
            storage_domain=types.StorageDomain(name=storage_domain_name))
        wait_for(self.does_template_exist,
                 func_args=[temp_template_name],
                 delay=5,
                 num_sec=240)

    def _get_disk_service(self, disk_name):
        disks_service = self.api.system_service().disks_service()
        query_result = disks_service.list(search="name={}".format(disk_name))
        if not query_result:
            raise ItemNotFound(disk_name, 'disk')
        else:
            disk = query_result[0]
            return disks_service.service(disk.id)

    def does_disk_exist(self, disk_name):
        try:
            return bool(self._get_disk_service(disk_name))
        except ItemNotFound:
def test_webmks_vm_console(request, appliance, provider, vm_obj,
                           configure_websocket, configure_console_webmks,
                           take_screenshot, ssh_client):
    """Test the VMware WebMKS console support for a particular provider.

    The supported providers are:
        VMware vSphere6 and vSphere6.5

    For a given provider, and a given VM, the console will be opened, and then:

        - The console's status will be checked.
        - A command that creates a file will be sent through the console.
        - Using ssh we will check that the command worked (i.e. that the file
          was created.)

    Polarion:
        assignee: apagac
        casecomponent: Infra
        caseimportance: medium
        initialEstimate: 1/4h
    """
    console_vm_username = credentials[provider.data.templates.get(
        'console_template')['creds']].get('username')
    console_vm_password = credentials[provider.data.templates.get(
        'console_template')['creds']].get('password')

    vm_obj.open_console(console='VM Console', invokes_alert=True)
    assert vm_obj.vm_console, 'VMConsole object should be created'
    vm_console = vm_obj.vm_console

    request.addfinalizer(vm_console.close_console_window)
    request.addfinalizer(appliance.server.logout)
    try:
        if appliance.version >= '5.9':
            # Since connection status element is only available in latest 5.9
            assert vm_console.wait_for_connect(
                180), "VM Console did not reach 'connected' state"
        # Get the login screen image, and make sure it is a jpeg file:
        screen = vm_console.get_screen(180)
        assert imghdr.what('', screen) == 'jpeg'

        assert vm_console.wait_for_text(text_to_find="login:"******"VM Console didn't prompt for Login"

        def _get_user_count_before_login():
            try:
                result = ssh_client.run_command("who --count",
                                                ensure_user=True)
                return result.success, result
            except socket.error as e:
                if e.errno == socket.errno.ECONNRESET:
                    logger.exception(
                        "Socket Error Occured: [104] Connection Reset by peer."
                    )
                logger.info("Trying again to perform 'who --count' over ssh.")
                return False

        result_before_login, _ = wait_for(func=_get_user_count_before_login,
                                          timeout=300,
                                          delay=5)
        result_before_login = result_before_login[1]
        logger.info("Output of who --count is {} before login".format(
            result_before_login))
        # Enter Username:
        vm_console.send_keys(console_vm_username)

        assert vm_console.wait_for_text(text_to_find="Password", timeout=200),\
            "VM Console didn't prompt for Password"
        # Enter Password:
        vm_console.send_keys("{}\n".format(console_vm_password))

        logger.info("Wait to get the '$' prompt")

        vm_console.wait_for_text(text_to_find=provider.data.templates.get(
            'console_template')['prompt_text'],
                                 timeout=200)

        def _validate_login():
            # the following try/except is required to handle the exception thrown by SSH
            # while connecting to VMware VM.It throws "[Error 104]Connection reset by Peer".
            try:
                result_after_login = ssh_client.run_command("who --count",
                                                            ensure_user=True)
                logger.info("Output of 'who --count' is {} after login".format(
                    result_after_login))
                return result_before_login < result_after_login
            except socket.error as e:
                if e.errno == socket.errno.ECONNRESET:
                    logger.exception(
                        "Socket Error Occured: [104] Connection Reset by peer."
                    )
                logger.info("Trying again to perform 'who --count' over ssh.")
                return False

        # Number of users before login would be 0 and after login would be 180
        # If below assertion would fail result_after_login is also 0,
        # denoting login failed
        wait_for(func=_validate_login, timeout=300, delay=5)

        # create file on system
        vm_console.send_keys("touch blather\n")
        vm_console.send_keys("\n\n")

        if appliance.version >= '5.9':
            # Since these buttons are only available in latest 5.9
            vm_console.send_ctrl_alt_delete()
            assert vm_console.wait_for_text(
                text_to_find="login:"******"Text 'login:'******'t prompt for Login")
            assert vm_console.send_fullscreen(), (
                "VM Console Toggle Full Screen button does"
                " not work")

        wait_for(func=ssh_client.run_command,
                 func_args=["ls blather"],
                 func_kwargs={'ensure_user': True},
                 handle_exception=True,
                 fail_condition=lambda result: result.rc != 0,
                 delay=1,
                 num_sec=60)
        # if file was created in previous steps it will be removed here
        # we will get instance of SSHResult
        # Sometimes Openstack drops characters from word 'blather' hence try to remove
        # file using partial file name. Known issue, being worked on.
        command_result = ssh_client.run_command("rm blather", ensure_user=True)
        assert command_result

    except Exception as e:
        # Take a screenshot if an exception occurs
        vm_console.switch_to_console()
        take_screenshot("ConsoleScreenshot")
        vm_console.switch_to_appliance()
        raise e
Exemplo n.º 60
0
 def test_post_create_gce_cr_and_host(self, arch_os_domain, delete_host):
     """"""
     arch, os, domain_name = arch_os_domain
     hostname = gen_string('alpha')
     self.__class__.fullhost = f'{hostname}.{domain_name}'.lower()
     preentities = get_entity_data(self.__class__.__name__)
     gce_cr = entities.GCEComputeResource().search(
         query={'search': f'name={preentities["cr_name"]}'})[0]
     org = entities.Organization().search(
         query={'search': f'name={preentities["org"]}'})[0]
     loc = entities.Location().search(
         query={'search': f'name={preentities["loc"]}'})[0]
     compute_attrs = {
         'machine_type': 'g1-small',
         'network': 'default',
         'associate_external_ip': True,
         'volumes_attributes': {
             '0': {
                 'size_gb': '10'
             }
         },
         'image_id': LATEST_RHEL7_GCE_IMG_UUID,
     }
     # Host Provisioning Tests
     try:
         skip_yum_update_during_provisioning(
             template='Kickstart default finish')
         gce_hst = entities.Host(
             name=hostname,
             organization=org,
             location=loc,
             root_pass=gen_string('alphanumeric'),
             architecture=arch,
             compute_resource=gce_cr,
             domain=entities.Domain().search(
                 query={'search': f'name={domain_name}'})[0],
             compute_attributes=compute_attrs,
             operatingsystem=os,
             provision_method='image',
         ).create()
     finally:
         skip_yum_update_during_provisioning(
             template='Kickstart default finish', reverse=True)
     wait_for(
         lambda: entities.Host().search(query={
             'search': f'name={self.fullhost}'
         })[0].build_status_label == 'Installed',
         timeout=400,
         delay=15,
         silent_failure=True,
         handle_exception=True,
     )
     assert gce_hst.name == self.fullhost
     gce_hst = entities.Host(id=gce_hst.id).read()
     assert gce_hst.build_status_label == 'Installed'
     # CR Manipulation Tests
     newgce_name = gen_string('alpha')
     newgce_zone = random.choice(VALID_GCE_ZONES)
     gce_cr.name = newgce_name
     gce_cr.zone = newgce_zone
     gce_cr.update(['name', 'zone'])
     gce_cr = entities.GCEComputeResource(id=gce_cr.id).read()
     assert gce_cr.name == newgce_name
     assert gce_cr.zone == newgce_zone