def auth_user_data(auth_provider, user_type):
    """Grab user data attrdict from auth provider's user data in yaml

    Expected formatting of yaml containing user data:
        test_users:
        -
          username: ldapuser2
          password: mysecretpassworddontguess
          fullname: Ldap User2
          groupname: customgroup1
          providers:
            - freeipa01
          user_types:
            - uid

    Only include user data for users where the user_type matches that under test

    Assert the data isn't empty, and skip the test if so
    """
    try:
        data = [user
                for user in auth_provider.user_data
                if user_type in user.user_types]
        assert data
    except (KeyError, AttributeError, AssertionError):
        logger.exception('Exception fetching auth_user_data from yaml')
        pytest.skip('No yaml data for auth_prov {} under "auth_data.test_data"'
                    .format(auth_provider))
    return data
    def test_assign_and_unassign_tag(self, appliance, tags_mod, provider, services_mod,
            service_templates, tenants, vm, collection_name):
        """Tests assigning and unassigning tags.

        Metadata:
            test_flag: rest

        Polarion:
            assignee: pvala
            casecomponent: Rest
            caseimportance: high
            initialEstimate: 1/5h
        """
        collection = getattr(appliance.rest_api.collections, collection_name)
        collection.reload()
        if not collection.all:
            pytest.skip("No available entity in {} to assign tag".format(collection_name))
        entity = collection[-1]
        tag = tags_mod[0]
        try:
            entity.tags.action.assign(tag)
        except AttributeError:
            msg = ('Missing tag attribute in parametrized REST collection {} for entity: {}'
                   .format(collection_name, entity))
            logger.exception(msg)
            pytest.fail(msg)
        assert_response(appliance)
        entity.reload()
        assert tag.id in [t.id for t in entity.tags.all]
        entity.tags.action.unassign(tag)
        assert_response(appliance)
        entity.reload()
        assert tag.id not in [t.id for t in entity.tags.all]
Exemple #3
0
    def main(self):
        track = False
        teardown = False
        try:
            if self.stream in self.blocked_streams:
                logger.info('This stream (%s) is blocked for the given provider type, %s',
                            self.stream, self.provider_type)
                return True
            if self.provider_type != 'openshift' and self.mgmt.does_template_exist(
                    self.template_name):
                logger.info("(template-upload) [%s:%s:%s] Template already exists",
                            self.log_name, self.provider_key, self.template_name)
                track = True
            else:
                teardown = True
                if self.decorated_run():
                    track = True
            if track and self.provider_type != 'openshift':
                # openshift run will call track_template since it needs custom_data kwarg
                self.track_template()

            return True

        except TemplateUploadException:
            logger.exception('TemplateUploadException, failed upload')
            return False
        except Exception:
            logger.exception('non-TemplateUploadException, failed interaction with provider')
            return False

        finally:
            if teardown:
                self.teardown()
Exemple #4
0
def delete_stacks(provider_mgmt, excluded_stacks, stack_template, output):
    stack_list = []
    provider_name = provider_mgmt.kwargs['name']
    try:
        for stack in provider_mgmt.list_stacks():
            if (excluded_stacks and
                    stack.name in excluded_stacks or
                    not stack.name.startswith(stack_template)):
                logger.info("  Excluding Stack name: %r", stack.name)
                continue
            else:
                today = datetime.utcnow().replace(tzinfo=None)
                some_date = today - timedelta(days=1)
                if stack.creation_time < some_date:
                    stack_list.append([provider_name, stack.name])
                    try:
                        stack.cleanup()
                    except Exception as e:
                        logger.error(e)
                        continue
        logger.info("  Deleted CloudFormation Stacks: %r", stack_list)
        with open(output, 'a+') as report:
            if stack_list:
                report.write(tabulate(tabular_data=stack_list,
                                      headers=['Provider Key', 'Stack Name'],
                                      tablefmt='orgtbl'))
    except Exception:
        # TODO don't diaper this whole method
        logger.exception('Exception in %r', delete_stacks.__name__)
Exemple #5
0
def delete_disassociated_addresses(provider_mgmt, excluded_eips, output):
    ip_list = []
    provider_name = provider_mgmt.kwargs['name']
    try:
        for ip in provider_mgmt.get_all_disassociated_addresses():
            if ip.allocation_id:
                if excluded_eips and ip.allocation_id in excluded_eips:
                    logger.info("  Excluding allocation ID: %r", ip.allocation_id)
                    continue
                else:
                    ip_list.append([provider_name, ip.public_ip, ip.allocation_id])
                    provider_mgmt.release_vpc_address(alloc_id=ip.allocation_id)
            else:
                if excluded_eips and ip.public_ip in excluded_eips:
                    logger.info("  Excluding IP: %r", ip.public_ip)
                    continue
                else:
                    ip_list.append([provider_name, ip.public_ip, 'N/A'])
                    provider_mgmt.release_address(address=ip.public_ip)
        logger.info("  Released Addresses: %r", ip_list)
        with open(output, 'a+') as report:
            if ip_list:
                # tabulate ip_list and write it
                report.write(tabulate(tabular_data=ip_list,
                                      headers=['Provider Key', 'Public IP', 'Allocation ID'],
                                      tablefmt='orgtbl'))

    except Exception:
        # TODO don't diaper this whole method
        logger.exception('Exception in %r', delete_disassociated_addresses.__name__)
def parse_template(template_name):
    """Given a template name, attempt to extract its group name and upload date

    Returns:
        * None if no groups matched
        * group_name, datestamp of the first matching group. group name will be a string,
          datestamp with be a :py:class:`datetime.date <python:datetime.date>`, or None if
          a date can't be derived from the template name
    """
    for group_name, regex in stream_matchers:
        matches = re.match(regex, template_name)
        if matches:
            groups = matches.groupdict()
            # hilarity may ensue if this code is run right before the new year
            today = date.today()
            year = int(groups.get('year', today.year))
            month, day = int(groups['month']), int(groups['day'])
            # validate the template date by turning into a date obj
            try:
                # year, month, day might have been parsed incorrectly with loose regex
                template_date = futurecheck(date(year, month, day))
            except ValueError:
                logger.exception('Failed to parse year: %s, month: %s, day: %s correctly '
                                 'from template %s with regex %s',
                                 year, month, day, template_name, regex)
                continue
            return TemplateInfo(group_name, template_date, True)
    for group_name, regex in generic_matchers:
        matches = re.match(regex, template_name)
        if matches:
            return TemplateInfo(group_name, None, False)
    # If no match, unknown
    return TemplateInfo('unknown', None, False)
    def remove_tag(self, items, tag, cancel=False, reset=False, entity_filter_key='name'):
        """ Remove tag of tested item

        Args:
            items: list of entity object, also can be passed lint on entities names
            tag: Tag object
            cancel: set True to cancel tag deletion
            reset: set True to reset tag changes
            entity_filter_key: used when items are objects, this is the attribute name to filter on

        """
        view = navigate_to(self, 'All')
        for item in items:
            entity_kwargs = {}
            try:
                # setup a entities widget filter with the given key, using the item if its string
                # or the item's attribute (given key) otherwise
                entity_kwargs[entity_filter_key] = (item if isinstance(item, six.string_types)
                                                    else getattr(item, entity_filter_key))
            except AttributeError:
                logger.exception('TaggableCollection item does not have attribute to search by: %s',
                                 entity_filter_key)
                raise
            # checkbox for given entity
            view.entities.get_entity(surf_pages=True, **entity_kwargs).check()
        view = self._open_edit_tag_page(view)
        self._unassign_tag_action(view, tag)
        self._tags_action(view, cancel, reset)
def appliance_preupdate(old_version, appliance):

    series = appliance.version.series()
    update_url = "update_url_{}".format(series.replace('.', ''))

    """Requests appliance from sprout based on old_versions, edits partitions and adds
    repo file for update"""

    usable = []
    sp = SproutClient.from_config()
    available_versions = set(sp.call_method('available_cfme_versions'))
    for a in available_versions:
        if a.startswith(old_version):
            usable.append(Version(a))
    usable.sort(reverse=True)
    try:
        apps, pool_id = sp.provision_appliances(count=1, preconfigured=True,
            lease_time=180, version=str(usable[0]))
    except Exception as e:
        logger.exception("Couldn't provision appliance with following error:{}".format(e))
        raise SproutException('No provision available')

    apps[0].db.extend_partition()
    urls = process_url(conf.cfme_data['basic_info'][update_url])
    output = build_file(urls)
    with tempfile.NamedTemporaryFile('w') as f:
        f.write(output)
        f.flush()
        os.fsync(f.fileno())
        apps[0].ssh_client.put_file(
            f.name, '/etc/yum.repos.d/update.repo')
    yield apps[0]
    apps[0].ssh_client.close()
    sp.destroy_pool(pool_id)
def cleanup(api, qcowname, provider, temp_template_name, temp_vm_name):
    """Cleans up all the mess that the previous functions left behind.

    Args:
        api: API to chosen RHEVM provider.
        edomain: Export domain of chosen RHEVM provider.
    """
    try:
        logger.info("RHEVM:%r Deleting the  .qcow2 file...", provider)
        rc = subprocess.call(
            ['rm', qcowname])
        if rc != 0:
            print('Failure deleting qcow2 file')

        logger.info("RHEVM:%r Deleting the temp_vm on sdomain...", provider)
        temporary_vm = api.vms.get(temp_vm_name)
        if temporary_vm:
            temporary_vm.delete()

        logger.info("RHEVM:%r Deleting the temp_template on sdomain...", provider)
        temporary_template = api.templates.get(temp_template_name)
        if temporary_template:
            temporary_template.delete()

    except Exception:
        logger.exception("RHEVM:%r Exception occurred in cleanup method:", provider)
        return False
Exemple #10
0
def host_creds(request, v2v_providers):
    """Add credentials to VMware and RHV hosts."""
    if len(v2v_providers) > 2:
        pytest.skip(
            "There are more than two providers in v2v_providers fixture,"
            "which is invalid, skipping."
        )
    try:
        vmware_provider = v2v_providers.vmware_provider
        rhv_provider = v2v_providers.rhv_provider
        vmware_hosts = vmware_provider.hosts.all()
        try:
            for host in vmware_hosts:
                host_data, = [
                    data for data in vmware_provider.data["hosts"] if data["name"] == host.name
                ]
                host.update_credentials_rest(credentials=host_data["credentials"])

            rhv_hosts = rhv_provider.hosts.all()
            for host in rhv_hosts:
                host_data, = [data for data in rhv_provider.data["hosts"]
                              if data["name"] == host.name]
                host.update_credentials_rest(credentials=host_data["credentials"])
        except ValueError as error:
            pytest.skip('Host data invalid for provider: {}'.format(error.msg))
    except Exception:
        # if above throws ValueError or TypeError or other exception, just skip the test
        logger.exception("Exception when trying to add the host credentials.")
        pytest.skip("No data for hosts in providers, failed to retrieve hosts and add creds.")
    # only yield RHV hosts as they will be required to tag with conversion_tags fixture
    yield rhv_hosts
    for host in itertools.chain(rhv_hosts, vmware_hosts):
        host.remove_credentials_rest()
Exemple #11
0
 def __getattr__(self, name):
     if name not in self._availiable_collections:
         sorted_collection_keys = sorted(self._availiable_collections)
         raise AttributeError('Collection [{}] not known to object, available collections: {}'
                              .format(name, sorted_collection_keys))
     if name not in self._collection_cache:
         item_filters = self._filters.copy()
         cls_and_or_filter = self._availiable_collections[name]
         if isinstance(cls_and_or_filter, tuple):
             item_filters.update(cls_and_or_filter[1])
             cls_or_verpick = cls_and_or_filter[0]
         else:
             cls_or_verpick = cls_and_or_filter
         # Now check whether we verpick the collection or not
         if isinstance(cls_or_verpick, VersionPick):
             cls = cls_or_verpick.pick(self._parent.appliance.version)
             try:
                 logger.info(
                     '[COLLECTIONS] Version picked collection %s as %s.%s',
                     name, cls.__module__, cls.__name__)
             except (AttributeError, TypeError, ValueError):
                 logger.exception('[COLLECTIONS] Is the collection %s truly a collection?', name)
         else:
             cls = cls_or_verpick
         self._collection_cache[name] = cls(self._parent, filters=item_filters)
     return self._collection_cache[name]
def add_disk_to_vm(api, sdomain, disk_size, disk_format, disk_interface, temp_vm_name,
                   provider):
    """Adds second disk to a temporary VM.

    Args:
        api: API to chosen RHEVM provider.
        sdomain: Storage domain to save new disk onto.
        disk_size: Size of the new disk (in B).
        disk_format: Format of the new disk.
        disk_interface: Interface of the new disk.
    """
    try:
        if len(api.vms.get(temp_vm_name).disks.list()) > 1:
            logger.info("RHEVM:%r Warning: found more than one disk in existing VM (%r).",
                    provider, temp_vm_name)
            logger.info("RHEVM:%r Skipping this step, attempting to continue...", provider)
            return
        actual_sdomain = api.storagedomains.get(sdomain)
        temp_vm = api.vms.get(temp_vm_name)
        storage_id = params.StorageDomains(storage_domain=[params.StorageDomain
            (id=actual_sdomain.get_id())])
        params_disk = params.Disk(storage_domains=storage_id, size=disk_size,
                                  interface=disk_interface, format=disk_format)
        temp_vm.disks.add(params_disk)

        wait_for(check_disks, [api, temp_vm_name], fail_condition=False, delay=5, num_sec=900)

        # check, if there are two disks
        if len(api.vms.get(temp_vm_name).disks.list()) < 2:
            logger.error("RHEVM:%r Disk failed to add", provider)
            sys.exit(127)
        logger.info("RHEVM:%r Successfully added disk", provider)
    except Exception:
        logger.exception("RHEVM:%r add_disk_to_temp_vm failed:", provider)
def cleanup_templates(api, edomain, days, max_templates):
    try:
        templates = api.storagedomains.get(edomain).templates.list()
        thread_queue = []
        delete_templates = []
        for template in templates:
            delta = datetime.timedelta(days=days)
            now = datetime.datetime.now(pytz.utc)
            template_creation_time = template.get_creation_time().astimezone(pytz.utc)

            if template.get_name().startswith('auto-tmp'):
                if now > (template_creation_time + delta):
                    delete_templates.append(template)

        if not delete_templates:
            print("RHEVM: No old templates to delete in {}".format(edomain))

        for delete_template in delete_templates[:max_templates]:
            thread = Thread(target=delete_edomain_templates,
                            args=(api, delete_template, edomain))
            thread.daemon = True
            thread_queue.append(thread)
            thread.start()

        for thread in thread_queue:
            thread.join()
    except Exception as e:
        logger.exception(e)
        return False
def appliance_preupdate(old_version, appliance):

    series = appliance.version.series()
    update_url = "update_url_{}".format(series.replace('.', ''))

    """Requests appliance from sprout based on old_versions, edits partitions and adds
    repo file for update"""

    usable = []
    sp = SproutClient.from_config()
    available_versions = set(sp.call_method('available_cfme_versions'))
    for a in available_versions:
        if a.startswith(old_version):
            usable.append(Version(a))
    usable.sort(reverse=True)
    try:
        apps, pool_id = sp.provision_appliances(count=1, preconfigured=True,
                                                lease_time=180, version=str(usable[0]))
    except Exception as e:
        logger.exception("Couldn't provision appliance with following error:{}".format(e))
        raise SproutException('No provision available')

    apps[0].db.extend_partition()
    urls = cfme_data["basic_info"][update_url]
    apps[0].ssh_client.run_command(
        "curl {} -o /etc/yum.repos.d/update.repo".format(urls)
    )
    logger.info('Appliance update.repo file: \n%s',
                apps[0].ssh_client.run_command('cat /etc/yum.repos.d/update.repo').output)
    yield apps[0]
    apps[0].ssh_client.close()
    sp.destroy_pool(pool_id)
def templatize_vm(api, template_name, cluster, temp_vm_name, provider):
    """Templatizes temporary VM. Result is template with two disks.

    Args:
        api: API to chosen RHEVM provider.
        template_name: Name of the final template.
        cluster: Cluster to save the final template onto.
    """
    try:
        if api.templates.get(template_name) is not None:
            logger.info("RHEVM:%r Warning: found finished template with this name (%r).",
                    provider, template_name)
            logger.info("RHEVM:%r Skipping this step, attempting to continue", provider)
            return
        temporary_vm = api.vms.get(temp_vm_name)
        actual_cluster = api.clusters.get(cluster)
        new_template = params.Template(name=template_name, vm=temporary_vm, cluster=actual_cluster)
        api.templates.add(new_template)

        wait_for(check_disks, [api, temp_vm_name], fail_condition=False, delay=5, num_sec=900)

        # check, if template is really there
        if not api.templates.get(template_name):
            logger.error("RHEVM:%r templatizing temporary VM failed", provider)
            sys.exit(127)
        logger.info("RHEVM:%r successfully templatized the temporary VM", provider)
    except Exception:
        logger.exception("RHEVM:%r templatizing temporary VM failed", provider)
def upload_ova(hostname, username, password, name, datastore,
               cluster, datacenter, url, provider, proxy,
               ovf_tool_client, default_user, default_pass):

    cmd_args = []
    cmd_args.append('ovftool --noSSLVerify')
    cmd_args.append("--datastore={}".format(datastore))
    cmd_args.append("--name={}".format(name))
    cmd_args.append("--vCloudTemplate=True")
    cmd_args.append("--overwrite")  # require when failures happen and it retries
    if proxy:
        cmd_args.append("--proxy={}".format(proxy))
    cmd_args.append(url)
    cmd_args.append(
        "'vi://{}:{}@{}/{}/host/{}'"
        .format(username, password, hostname, datacenter, cluster)
    )
    logger.info("VSPHERE:%r Running OVFTool", provider)

    command = ' '.join(cmd_args)
    with make_ssh_client(ovf_tool_client, default_user, default_pass) as ssh_client:
        try:
            result = ssh_client.run_command(command)
        except Exception:
            logger.exception("VSPHERE:%r Exception during upload", provider)
            return False

    if "successfully" in result.output:
        logger.info(" VSPHERE:%r Upload completed", provider)
        return True
    else:
        logger.error("VSPHERE:%r Upload failed: %r", provider, result.output)
        return False
def pytest_runtest_teardown(item, nextitem):
    name, location = get_test_idents(item)
    app = find_appliance(item)
    ip = app.hostname
    fire_art_test_hook(
        item, 'finish_test',
        slaveid=store.slaveid, ip=ip, wait_for_task=True)
    fire_art_test_hook(item, 'sanitize', words=words)
    jenkins_data = {
        'build_url': os.environ.get('BUILD_URL'),
        'build_number': os.environ.get('BUILD_NUMBER'),
        'git_commit': os.environ.get('GIT_COMMIT'),
        'job_name': os.environ.get('JOB_NAME')
    }
    param_dict = None
    try:
        caps = app.browser.widgetastic.selenium.capabilities
        param_dict = {
            'browserName': caps['browserName'],
            'browserPlatform': caps['platform'],
            'browserVersion': caps['version']
        }
    except Exception:
        logger.exception("Couldn't grab browser env_vars")
        pass  # already set param_dict

    fire_art_test_hook(
        item, 'ostriz_send', env_params=param_dict,
        slaveid=store.slaveid, polarion_ids=extract_polarion_ids(item), jenkins=jenkins_data)
def add_disk_to_vm(mgmt, sdomain, disk_size, disk_format, disk_interface, temp_vm_name,
                   provider):
    """Adds second disk to a temporary VM.

    Args:
        mgmt: A ``RHEVMSystem`` instance from wrapanapi.
        sdomain: Storage domain to save new disk onto.
        disk_size: Size of the new disk (in B).
        disk_format: Format of the new disk.
        disk_interface: Interface of the new disk.
    """
    try:
        vm = mgmt.get_vm(temp_vm_name)
        if vm.get_disks_count() > 1:
            logger.info("RHEVM:%r Warning: found more than one disk in existing VM (%r).",
                    provider, temp_vm_name)
            logger.info("RHEVM:%r Skipping this step, attempting to continue...", provider)
            return
        vm.add_disk(
            storage_domain=sdomain,
            size=disk_size,
            interface=disk_interface,
            format=disk_format
        )
        # check, if there are two disks
        if vm.get_disks_count() < 2:
            logger.error("RHEVM:%r Disk failed to add", provider)
            sys.exit(127)
        logger.info("RHEVM:%r Successfully added disk", provider)
    except Exception:
        logger.exception("RHEVM:%r add_disk_to_temp_vm failed:", provider)
        raise
def templatize_vm(mgmt, template_name, cluster, temp_vm_name, provider):
    """Templatizes temporary VM. Result is template with two disks.

    Args:
        mgmt: A ``RHEVMSystem`` instance from wrapanapi.
        template_name: Name of the final template.
        cluster: Cluster to save the final template onto.
    """
    try:
        if mgmt.does_template_exist(template_name):
            logger.info("RHEVM:%r Warning: found finished template with this name (%r).",
                    provider, template_name)
            logger.info("RHEVM:%r Skipping this step, attempting to continue", provider)
            return
        vm = mgmt.get_vm(temp_vm_name)
        template = vm.mark_as_template(
            temporary_name=template_name,
            cluster=cluster,
            delete=False
        )
        # check, if template is really there
        if not template.exists:
            logger.error("RHEVM:%r templatizing temporary VM failed", provider)
            sys.exit(127)
        logger.info("RHEVM:%r successfully templatized the temporary VM", provider)
    except Exception:
        logger.exception("RHEVM:%r templatizing temporary VM failed", provider)
        raise
def import_template(mgmt, cfme_data, edomain, sdomain, cluster, temp_template_name, provider):
    """Imports template from export domain to storage domain.

    Args:
        mgmt: A ``RHEVMSystem`` instance from wrapanapi..
        edomain: Export domain of selected RHEVM provider.
        sdomain: Storage domain of selected RHEVM provider.
        cluster: Cluster to save imported template on.
    """
    try:
        if mgmt.find_templates(temp_template_name):
            logger.info("RHEVM:%r Warning: found another template with this name.", provider)
            logger.info("RHEVM:%r Skipping this step, attempting to continue...", provider)
            return
        mgmt.import_template(edomain, sdomain, cluster, temp_template_name)
        if not mgmt.does_template_exist(temp_template_name):
            logger.info("RHEVM:%r The template failed to import on data domain", provider)
            sys.exit(127)
        logger.info("RHEVM:%r successfully imported template on data domain", provider)
        logger.info('RHEVM:%r updating nic on template', provider)
        network_name = cfme_data.management_systems[provider].template_upload.management_network
        mgmt.get_template(temp_template_name).update_nic(
            network_name=network_name, nic_name='eth0')
    except Exception:
        logger.exception("RHEVM:%r import_template to data domain failed:", provider)
        raise
def change_edomain_state(mgmt, state, edomain, provider):
    try:
        return mgmt.change_storage_domain_state(state, edomain)
    except Exception:
        logger.exception("RHEVM:%s Exception occurred while changing %s state to %s",
                         provider, edomain, state)
        return False
def get_registered_vm_files(provider_key):
    try:
        print("{} processing all the registered files..".format(provider_key))
        vm_registered_files = defaultdict(set)
        provider = get_mgmt(provider_key)
        for vm_name in provider.list_vm():
            try:
                vm_file_path = provider.get_vm_config_files_path(vm_name)
                vm_directory_name = re.findall(r'\s(.*)/\w*', vm_file_path)
                vm_registered_files[vm_directory_name[0]] = vm_name
            except Exception as e:
                logger.error(e)
                logger.error('Failed to get creation/boot time for {} on {}'.format(
                    vm_name, provider_key))
                continue
        print("\n**************************REGISTERED FILES ON {}***********************\n".format(
            provider_key))
        for k, v in vm_registered_files.items():
            print('FILE_NAME: {}\nVM_NAME: {}\n'.format(k, v))
        return vm_registered_files
    except Exception as ex:
            # Print out the error message too because logs in the job get deleted
        print('{} failed ({}: {})'.format(provider_key, type(ex).__name__, str(ex)))
        logger.error('failed to process vms from provider {}'.format(provider_key))
        logger.exception(ex)
def scan_provider(provider_key, matchers, match_queue, scan_failure_queue):
    """
    Process the VMs on a given provider, comparing name and creation time.
    Append vms meeting criteria to vms_to_delete

    Args:
        provider_key (string): the provider key from yaml
        matchers (list): A list of regex objects with match() method
        match_queue (Queue.Queue): MP queue to hold VMs matching age requirement
        scan_failure_queue (Queue.Queue): MP queue to hold vms that we could not compare age
    Returns:
        None: Uses the Queues to 'return' data
    """
    logger.info('%r: Start scan for vm text matches', provider_key)
    try:
        vm_list = get_mgmt(provider_key).list_vm()
    except Exception:  # noqa
        scan_failure_queue.put(VmReport(provider_key, FAIL, NULL, NULL, NULL))
        logger.exception('%r: Exception listing vms', provider_key)
        return

    text_matched_vms = [name for name in vm_list if match(matchers, name)]
    for name in text_matched_vms:
        match_queue.put(VmProvider(provider_key, name))

    non_text_matching = set(vm_list) - set(text_matched_vms)
    logger.info('%r: NOT matching text filters: %r', provider_key, non_text_matching)
    logger.info('%r: MATCHED text filters: %r', provider_key, text_matched_vms)
def test_provider_type_support(appliance, soft_assert):
    """Test availability of GCE provider in downstream CFME builds

    Polarion:
        assignee: anikifor
        initialEstimate: 1/10h
        casecomponent: WebUI
    """
    classes_to_test = provider_classes(appliance)
    for category, providers in classes_to_test.items():
        try:
            collection = getattr(appliance.collections, providers[0].collection_name)
        except AttributeError:
            msg = 'Missing collection name for a provider class, cannot test UI field'
            logger.exception(msg)
            pytest.fail(msg)
        view = navigate_to(collection, 'Add')
        options = [o.text for o in view.prov_type.all_options]
        for provider_class in providers:
            type_text = provider_class.ems_pretty_name
            if type_text is not None:
                soft_assert(
                    type_text in options,
                    'Provider type [{}] not in Add provider form options [{}]'
                    .format(type_text, options)
                )
def cleanup_empty_dir_on_edomain(path, edomainip, sshname, sshpass, provider_ip, provider):
    """Cleanup all the empty directories on the edomain/edomain_id/master/vms
    else api calls will result in 400 Error with ovf not found,
    Args:
        path: path for vms directory on edomain.
        edomain: Export domain of chosen RHEVM provider.
        edomainip: edomainip to connect through ssh.
        sshname: edomain ssh credentials.
        sshpass: edomain ssh credentials.
        provider: provider under execution
        provider_ip: provider ip address
    """
    try:
        edomain_path = edomainip + ':' + path
        temp_path = '~/tmp_filemount'
        command = 'mkdir -p {} &&'.format(temp_path)
        command += 'mount -O tcp {} {} &&'.format(edomain_path, temp_path)
        command += 'cd {}/master/vms &&'.format(temp_path)
        command += 'find . -maxdepth 1 -type d -empty -delete &&'
        command += 'cd ~ && umount {} &&'.format(temp_path)
        command += 'rmdir {}'.format(temp_path)
        logger.info("RHEVM:%r Deleting the empty directories on edomain/vms file...", provider)

        with make_ssh_client(provider_ip, sshname, sshpass) as ssh_client:
            result = ssh_client.run_command(command)
        if result.failed:
            logger.error("RHEVM:%r Error while deleting the empty directories on path: \n %r",
                provider, str(result))
        else:
            logger.info("RHEVM:%r successfully deleted the empty directories on path..", provider)
    except Exception:
        logger.exception('RHEVM:%r Exception cleaning up empty dir on edomain', provider)
        return False
Exemple #26
0
    def delete_from_provider(self):
        """
        Delete VM/instance from provider.

        You cannot expect additional cleanup of attached resources by calling this method. You
        should use cleanup_on_provider() to guarantee that.
        """
        logger.info("Begin delete_from_provider for VM '{}'".format(self.name))
        if not self.provider.mgmt.does_vm_exist(self.name):
            logger.info("VM does '{}' not exist on provider, nothing to delete".format(self.name))
            return True

        # Some providers require VM to be stopped before removal
        logger.info(
            "delete: ensuring VM '{}' on provider '{}' is powered off".format(
                self.name, self.provider.key)
        )
        self.ensure_state_on_provider(self.STATE_OFF)

        logger.info("delete: removing VM '{}'".format(self.name))
        try:
            return self.provider.mgmt.delete_vm(self.name)
        except Exception:
            logger.exception("delete for vm '{}' failed".format(self.name))

        return True
def change_edomain_state(api, state, edomain, provider):
    try:
        dcs = api.datacenters.list()
        for dc in dcs:
            export_domain = dc.storagedomains.get(edomain)
            if export_domain:
                if state == 'maintenance' and export_domain.get_status().state == 'active':
                    # may be tasks on the storage, try multiple times
                    logger.info('RHEVM:%s %s in active, waiting for deactivate...',
                                provider, edomain)
                    wait_for(lambda: dc.storagedomains.get(edomain).deactivate(), delay=5,
                             num_sec=600, handle_exception=True)
                elif state == 'active' and export_domain.get_status().state != 'active':
                    logger.info('RHEVM:%s %s not active, waiting for active...',
                                provider, edomain)
                    wait_for(lambda: dc.storagedomains.get(edomain).activate(), delay=5,
                             num_sec=600, handle_exception=True)

                wait_for(is_edomain_in_state, [api, state, edomain],
                         fail_condition=False, delay=5, num_sec=240)
                logger.info('RHEVM:%s %s successfully set to %s state', provider, edomain, state)
                return True
        return False
    except Exception:
        logger.exception("RHEVM:%s Exception occurred while changing %s state to %s",
                         provider, edomain, state)
        return False
Exemple #28
0
def add_provider_template(stream, provider, template_name, custom_data=None, mark_kwargs=None):
    """Checking existing providertemplates first, call mark_provider_template to add records

    Args:
        stream (str): build stream, like upstream or downstream-510z
        provider (str): provider key
        template_name (str): name of the template to track on provider
        custom_data (dict): JSON serializable custom data dict
        mark_kwargs (dict): Passed to mark_provider_template to allow for additional kwargs
    Returns:
        None on no action (already tracked)
        True on adding
        False on error
    """
    tb_api = api()
    try:
        existing_provider_templates = [
            pt['id']
            for pt in depaginate(
                tb_api,
                tb_api.providertemplate.get(provider=provider, template=template_name))['objects']]
        if '{}_{}'.format(template_name, provider) in existing_provider_templates:
            return None
        else:
            mark_provider_template(tb_api, provider, template_name, stream=stream,
                                   custom_data=custom_data, **(mark_kwargs or {}))
            return True
    except Exception:
        logger.exception('{}: Error occurred while template sync to trackerbot'.format(provider))
        return False
def get_datastores_per_host(provider_key):
    print('{} processing to get datastores per host'.format(provider_key))
    try:
        provider = get_mgmt(provider_key)

        vm_registered_files = get_registered_vm_files(provider_key)
        hosts = provider.list_host()
        host_datastore_url = {host: provider.list_host_datastore_url(host) for host in hosts}
        unregistered_files = []

        print("\n*********************UNREGISTERED FILES ON: {}**********************\n".format(
            provider_key))
        print('HOST_NAME\t\tFILE_PATH\t\tTEMPLATE_VM_ISO\t\tNUMBER_OF_FILES\n')
        for host in host_datastore_url:
            try:
                list_orphaned_files_per_host(host, host_datastore_url[host],
                                             provider_key, vm_registered_files,
                                             unregistered_files)
            except Exception as e:
                logger.error(e)
                continue

    except Exception as ex:
            # Print out the error message too because logs in the job get deleted
        print('{} failed ({}: {})'.format(provider_key, type(ex).__name__, str(ex)))
        logger.error('failed to process vms from provider {}'.format(provider_key))
        logger.exception(ex)
def import_template(api, edomain, sdomain, cluster, temp_template_name, provider):
    """Imports template from export domain to storage domain.

    Args:
        api: API to RHEVM instance.
        edomain: Export domain of selected RHEVM provider.
        sdomain: Storage domain of selected RHEVM provider.
        cluster: Cluster to save imported template on.
    """
    try:
        if api.templates.get(temp_template_name) is not None:
            logger.info("RHEVM:%r Warning: found another template with this name.", provider)
            logger.info("RHEVM:%r Skipping this step, attempting to continue...", provider)
            return
        actual_template = api.storagedomains.get(edomain).templates.get(temp_template_name)
        actual_storage_domain = api.storagedomains.get(sdomain)
        actual_cluster = api.clusters.get(cluster)
        import_action = params.Action(async=False, cluster=actual_cluster,
                                      storage_domain=actual_storage_domain)
        actual_template.import_template(action=import_action)
        # Check if the template is really there
        if not api.templates.get(temp_template_name):
            logger.info("RHEVM:%r The template failed to import on data domain", provider)
            sys.exit(127)
        logger.info("RHEVM:%r successfully imported template on data domain", provider)
    except Exception:
        logger.exception("RHEVM:%r import_template to data domain failed:", provider)
    def check_for_badness(self, fn, _tries, nav_args, *args, **kwargs):
        if getattr(fn, '_can_skip_badness_test', False):
            # self.log_message('Op is a Nop! ({})'.format(fn.__name__))
            return

        # TODO: Uncomment after resolving the issue in widgetastic. Shouldn't be needed though :)
        # if self.VIEW:
        #     self.view.flush_widget_cache()
        go_kwargs = kwargs.copy()
        go_kwargs.update(nav_args)
        self.appliance.browser.open_browser(
            url_key=self.obj.appliance.server.address())

        # check for MiqQE javascript patch on first try and patch the appliance if necessary
        if self.appliance.is_miqqe_patch_candidate and not self.appliance.miqqe_patch_applied:
            self.appliance.patch_with_miqqe()
            self.appliance.browser.quit_browser()
            _tries -= 1
            self.go(_tries, *args, **go_kwargs)

        br = self.appliance.browser

        try:
            br.widgetastic.execute_script('miqSparkleOff();', silent=True)
        except:  # noqa
            # miqSparkleOff undefined, so it's definitely off.
            # Or maybe it is alerts? Let's only do this when we get an exception.
            self.appliance.browser.widgetastic.dismiss_any_alerts()
            # If we went so far, let's put diapers on one more miqSparkleOff just to be sure
            # It can be spinning in the back
            try:
                br.widgetastic.execute_script('miqSparkleOff();', silent=True)
            except:  # noqa
                pass

        # Check if the page is blocked with blocker_div. If yes, let's headshot the browser right
        # here
        if (br.widgetastic.is_displayed(
                "//div[@id='blocker_div' or @id='notification']")
                or br.widgetastic.is_displayed(".modal-backdrop.fade.in")):
            logger.warning(
                "Page was blocked with blocker div on start of navigation, recycling."
            )
            self.appliance.browser.quit_browser()
            self.go(_tries, *args, **go_kwargs)

        # Check if modal window is displayed
        if (br.widgetastic.is_displayed(
                "//div[contains(@class, 'modal-dialog') and contains(@class, 'modal-lg')]"
        )):
            logger.warning("Modal window was open; closing the window")
            br.widgetastic.click(
                "//button[contains(@class, 'close') and contains(@data-dismiss, 'modal')]"
            )

        # Check if jQuery present
        try:
            br.widgetastic.execute_script("jQuery", silent=True)
        except Exception as e:
            if "jQuery" not in str(e):
                logger.error("Checked for jQuery but got something different.")
                logger.exception(e)
            # Restart some workers
            logger.warning("Restarting UI and VimBroker workers!")
            with self.appliance.ssh_client as ssh:
                # Blow off the Vim brokers and UI workers
                ssh.run_rails_command(
                    "\"(MiqVimBrokerWorker.all + MiqUiWorker.all).each &:kill\""
                )
            logger.info("Waiting for web UI to come back alive.")
            sleep(10)  # Give it some rest
            self.appliance.wait_for_web_ui()
            self.appliance.browser.quit_browser()
            self.appliance.browser.open_browser(
                url_key=self.obj.appliance.server.address())
            self.go(_tries, *args, **go_kwargs)

        # Same with rails errors
        view = br.widgetastic.create_view(ErrorView)
        rails_e = view.get_rails_error()

        if rails_e is not None:
            logger.warning("Page was blocked by rails error, renavigating.")
            logger.error(rails_e)
            # RHEL7 top does not know -M and -a
            logger.debug('Top CPU consumers:')
            logger.debug(
                store.current_appliance.ssh_client.run_command(
                    'top -c -b -n1 | head -30').output)
            logger.debug('Top Memory consumers:')
            logger.debug(
                store.current_appliance.ssh_client.run_command(
                    'top -c -b -n1 -o "%MEM" | head -30').output)  # noqa
            logger.debug('Managed known Providers:')
            logger.debug('%r', [
                prov.key
                for prov in store.current_appliance.managed_known_providers
            ])
            self.appliance.browser.quit_browser()
            self.appliance.browser.open_browser()
            self.go(_tries, *args, **go_kwargs)
            # If there is a rails error past this point, something is really awful

        # Set this to True in the handlers below to trigger a browser restart
        recycle = False

        # Set this to True in handlers to restart evmserverd on the appliance
        # Includes recycling so you don't need to specify recycle = False
        restart_evmserverd = False

        try:
            self.log_message("Invoking {}, with {} and {}".format(
                fn.func_name, args, kwargs),
                             level="debug")
            return fn(*args, **kwargs)
        except (KeyboardInterrupt, ValueError):
            # KeyboardInterrupt: Don't block this while navigating
            raise
        except UnexpectedAlertPresentException:
            if _tries == 1:
                # There was an alert, accept it and try again
                br.widgetastic.handle_alert(wait=0)
                self.go(_tries, *args, **go_kwargs)
            else:
                # There was still an alert when we tried again, shoot the browser in the head
                logger.debug('Unxpected alert, recycling browser')
                recycle = True
        except (ErrorInResponseException, InvalidSwitchToTargetException):
            # Unable to switch to the browser at all, need to recycle
            logger.info('Invalid browser state, recycling browser')
            recycle = True
        except exceptions.CFMEExceptionOccured as e:
            # We hit a Rails exception
            logger.info('CFME Exception occured')
            logger.exception(e)
            recycle = True
        except exceptions.CannotContinueWithNavigation as e:
            # The some of the navigation steps cannot succeed
            logger.info('Cannot continue with navigation due to: {}; '
                        'Recycling browser'.format(str(e)))
            recycle = True
        except (NoSuchElementException, InvalidElementStateException,
                WebDriverException, StaleElementReferenceException) as e:
            # First check - if jquery is not found, there can be also another
            # reason why this happened so do not put the next branches in elif
            if isinstance(e, WebDriverException) and "jQuery" in str(e):
                # UI failed in some way, try recycling the browser
                logger.exception(
                    "UI failed in some way, jQuery not found, (probably) recycling the browser."
                )
                recycle = True
            # If the page is blocked, then recycle...
            # TODO .modal-backdrop.fade.in catches the 'About' modal resulting in nav loop
            if (br.widgetastic.is_displayed(
                    "//div[@id='blocker_div' or @id='notification']")
                    or br.widgetastic.is_displayed(".modal-backdrop.fade.in")):
                logger.warning("Page was blocked with blocker div, recycling.")
                recycle = True
            elif br.widgetastic.is_displayed("//div[@id='exception_div']"):
                logger.exception(
                    "CFME Exception before force navigate started!: {}".format(
                        br.widgetastic.text(
                            "//div[@id='exception_div']//td[@id='maincol']/div[2]/h3[2]"
                        )))
                recycle = True
            elif br.widgetastic.is_displayed(
                    "//body/h1[normalize-space(.)='Proxy Error']"):
                # 502
                logger.exception(
                    "Proxy error detected. Killing browser and restarting evmserverd."
                )
                req = br.widgetastic.elements("/html/body/p[1]//a")
                req = br.widgetastic.text(
                    req[0]) if req else "No request stated"
                reason = br.widgetastic.elements("/html/body/p[2]/strong")
                reason = br.widgetastic.text(
                    reason[0]) if reason else "No reason stated"
                logger.info("Proxy error: {} / {}".format(req, reason))
                restart_evmserverd = True
            elif br.widgetastic.is_displayed(
                    "//body[./h1 and ./p and ./hr and ./address]"):
                # 503 and similar sort of errors
                title = br.widgetastic.text("//body/h1")
                body = br.widgetastic.text("//body/p")
                logger.exception("Application error {}: {}".format(
                    title, body))
                sleep(5)  # Give it a little bit of rest
                recycle = True
            elif br.widgetastic.is_displayed(
                    "//body/div[@class='dialog' and ./h1 and ./p]"):
                # Rails exception detection
                logger.exception(
                    "Rails exception before force navigate started!: %r:%r at %r",
                    br.widgetastic.text("//body/div[@class='dialog']/h1"),
                    br.widgetastic.text("//body/div[@class='dialog']/p"),
                    getattr(manager.browser, 'current_url',
                            "error://dead-browser"))
                recycle = True
            elif br.widgetastic.elements("//ul[@id='maintab']/li[@class='inactive']") and not\
                    br.widgetastic.elements("//ul[@id='maintab']/li[@class='active']/ul/li"):
                # If upstream and is the bottom part of menu is not displayed
                logger.exception("Detected glitch from BZ#1112574. HEADSHOT!")
                recycle = True
            elif not self.obj.appliance.server.logged_in():
                # Session timeout or whatever like that, login screen appears.
                logger.exception("Looks like we are logged out. Try again.")
                recycle = True
            else:
                logger.error(
                    "Could not determine the reason for failing the navigation. "
                    + " Reraising.  Exception: {}".format(str(e)))
                logger.debug(
                    store.current_appliance.ssh_client.run_command(
                        'systemctl status evmserverd').output)
                raise

        if restart_evmserverd:
            logger.info("evmserverd restart requested")
            self.appliance.restart_evm_service()
            self.appliance.wait_for_web_ui()
            self.go(_tries, *args, **go_kwargs)

        if recycle or restart_evmserverd:
            self.appliance.browser.quit_browser()
            logger.debug('browser killed on try {}'.format(_tries))
            # If given a "start" nav destination, it won't be valid after quitting the browser
            self.go(_tries, *args, **go_kwargs)
def cleanup_provider(provider_key, matchers, scan_failure_queue, max_hours, dryrun):
    """
    Process the VMs on a given provider, comparing name and creation time.
    Use thread pools to scan vms, then to delete vms in batches

    Args:
        provider_key (string): the provider key from yaml
        matchers (list): A list of regex objects with match() method
        scan_failure_queue (Queue.Queue): MP queue to hold vms that we could not compare age
    Returns:
        None: if there aren't any old vms to delete
        List of VMReport tuples
    """
    logger.info('%r: Start scan for vm text matches', provider_key)
    try:
        vm_list = get_mgmt(provider_key).list_vms()
    except Exception:  # noqa
        scan_failure_queue.put(VmReport(provider_key, FAIL, NULL, NULL, NULL))
        logger.exception('%r: Exception listing vms', provider_key)
        return

    text_matched_vms = [vm for vm in vm_list if match(matchers, vm.name)]

    logger.info('%r: NOT matching text filters: %r',
                provider_key,
                set([v.name for v in vm_list]) - set([v.name for v in text_matched_vms]))
    logger.info('%r: MATCHED text filters: %r', provider_key, [vm.name for vm in text_matched_vms])

    if not text_matched_vms:
        return

    with ThreadPool(4) as tp:
        scan_args = (
            (provider_key,
             vm,
             timedelta(hours=int(max_hours)),
             scan_failure_queue)
            for vm in text_matched_vms
        )
        old_vms = [
            vm
            for vm in tp.starmap(scan_vm, scan_args)
            if vm is not None
        ]

    if old_vms and dryrun:
        logger.warning('DRY RUN: Would have deleted the following VMs on provider %s: \n %s',
                       provider_key,
                       [(vm[0].name, vm[1], vm[2]) for vm in old_vms])
        # for tabulate consistency on dry runs. 0=vm, 1=age, 2=status
        return [VmReport(provider_key, vm[0].name, vm[1], vm[2], NULL) for vm in old_vms]

    elif old_vms:
        with ThreadPool(4) as tp:
            delete_args = (
                (provider_key,
                 old_tuple[0],  # vm
                 old_tuple[1])  # age
                for old_tuple in old_vms
            )
            delete_results = tp.starmap(delete_vm, delete_args)

            return delete_results
Exemple #33
0
def cleanup():
    click.echo("Removing the BZ report file, bz-report.yaml")
    try:
        os.remove("bz-report.yaml")
    except OSError:
        logger.exception("bz-report.yaml not found")
Exemple #34
0
    def configure_auth(self, reset=False, **kwargs):
        """ Set up authentication mode

        Defaults to Database if no auth_mode is passed in kwargs, ignoring other kwargs

        Args:
            reset: Set True, to reset all changes for the page. Default value: False
            kwargs: A dict of keyword arguments used to initialize auth mode
                if you want not to use yamls settings,
                auth_mode='your_mode_type_here' should be a mandatory in your kwargs
                ex. auth_settings.configure_auth(
                reset= True, mode='Amazon', access_key=key, secret_key=secret_key)
        """
        fill_data = {'auth_mode': kwargs.get('auth_mode', 'database')}
        settings = {}  # for auth_settings
        if fill_data['auth_mode'] == 'Database':
            logger.warning('auth_mode is Database, ignoring kwargs')
        else:
            for key, value in kwargs.items():
                if key not in ['default_groups']:
                    if key == 'hosts':
                        # TODO why kill a test if more than 3 ldap hosts passed for config
                        assert len(
                            value) <= 3, "You can specify only 3 LDAP hosts"
                        for enum, host in enumerate(value):
                            settings["ldap_host_{}".format(enum + 1)] = host
                    elif key == 'user_type':
                        settings[key] = self.user_type_dict[value]
                    else:
                        settings[key] = value
                else:
                    settings[key] = value
        fill_data['auth_settings'] = settings

        view = navigate_to(self.appliance.server,
                           'Authentication',
                           wait_for_view=True)
        changed = view.form.fill(fill_data)
        if reset:
            view.reset.click()
            view.flash.assert_message('All changes have been reset')
            # Can't save the form if nothing was changed
            logger.info('Authentication form reset, returning')
            return
        elif changed:
            if (fill_data['auth_mode'] in ['amazon', 'ldap', 'ldaps']
                    and view.form.auth_settings.validate.is_displayed):
                view.form.auth_settings.validate.click()
                view.flash.assert_no_error()
            # FIXME BZ 1527239 This button goes disabled if a password field is 'changed' to same
            # on exception, log and continue
            # no exception - assert flash messages
            # all cases - assert no flash error
            try:
                view.save.click()
            except NoSuchElementException:
                logger.exception(
                    'NoSuchElementException when trying to save auth settings. BZ '
                    '1527239 prevents consistent form saving. Assuming auth settings '
                    'unchanged')
                pass
            else:
                # TODO move this flash message assert into test and only assert no error
                flash_message = (
                    'Authentication settings saved for {} Server "{} [{}]" in Zone "{}"'
                    .format(self.appliance.product_name,
                            self.appliance.server.name,
                            self.appliance.server.sid,
                            self.appliance.server.zone.name))
                view.flash.assert_success_message(flash_message)
            finally:
                view.flash.assert_no_error()
        else:
            logger.info('No authentication settings changed, not saving form.')
Exemple #35
0
def main(**kwargs):
    # get_mgmt validates, since it will explode without an existing key or type
    if kwargs.get('deploy'):
        kwargs['configure'] = True
        kwargs['outfile'] = 'appliance_ip_address_1'
        provider_data = cfme.utils.conf.provider_data
        providers = provider_data['management_systems']
        provider_dict = provider_data['management_systems'][kwargs['provider']]
        credentials =\
            {'username': provider_dict['username'],
             'password': provider_dict['password'],
             'tenant': provider_dict['template_upload'].get('tenant_admin', 'admin'),
             'auth_url': provider_dict.get('auth_url'),
             }
        provider = get_mgmt(kwargs['provider'],
                            providers=providers,
                            credentials=credentials)
        flavors = provider_dict['template_upload'].get('flavors',
                                                       ['m1.medium'])
        provider_type = provider_data['management_systems'][
            kwargs['provider']]['type']
        deploy_args = {
            'vm_name': kwargs['vm_name'],
            'template': kwargs['template'],
        }
    else:
        provider = get_mgmt(kwargs['provider'])
        provider_dict = cfme_data['management_systems'][kwargs['provider']]
        provider_type = provider_dict['type']
        flavors = cfme_data['appliance_provisioning']['default_flavors'].get(
            provider_type, [])
        deploy_args = {
            'vm_name': kwargs['vm_name'],
            'template': kwargs['template'],
        }

    logger.info('Connecting to {}'.format(kwargs['provider']))

    if kwargs.get('destroy'):
        # TODO: destroy should be its own script
        # but it's easy enough to just hijack the parser here
        # This returns True if destroy fails to give POSIXy exit codes (0 is good, False is 0, etc)
        return not destroy_vm(provider, deploy_args['vm_name'])

    # Try to snag defaults from cfme_data here for each provider type
    if provider_type == 'rhevm':
        cluster = provider_dict.get('default_cluster', kwargs.get('cluster'))
        if cluster is None:
            raise Exception(
                '--cluster is required for rhev instances and default is not set'
            )
        deploy_args['cluster'] = cluster

        if kwargs.get('place_policy_host') and kwargs.get('place_policy_aff'):
            deploy_args['placement_policy_host'] = kwargs['place_policy_host']
            deploy_args['placement_policy_affinity'] = kwargs[
                'place_policy_aff']
    elif provider_type == 'ec2':
        # ec2 doesn't have an api to list available flavors, so the first flavor is the default
        try:
            flavor = kwargs.get('flavor') or flavors[0]
        except IndexError:
            raise Exception(
                '--flavor is required for EC2 instances and default is not set'
            )
        deploy_args['instance_type'] = flavor
    elif provider_type == 'openstack':
        # filter openstack flavors based on what's available
        available_flavors = provider.list_flavor()
        flavors = filter(lambda f: f in available_flavors, flavors)
        try:
            flavor = kwargs.get('flavor') or flavors[0]
        except IndexError:
            raise Exception('--flavor is required for RHOS instances and '
                            'default is not set or unavailable on provider')
        # flavour? Thanks, psav...
        deploy_args['flavour_name'] = flavor

        if 'network' in provider_dict:
            # support rhos4 network names
            deploy_args['network_name'] = provider_dict['network']

        provider_pools = [
            p.name for p in provider.api.floating_ip_pools.list()
        ]
        try:
            # TODO: If there are multiple pools, have a provider default in cfme_data
            floating_ip_pool = kwargs.get(
                'floating_ip_pool') or provider_pools[0]
        except IndexError:
            raise Exception('No floating IP pools available on provider')

        if floating_ip_pool is not None:
            deploy_args['floating_ip_pool'] = floating_ip_pool
    elif provider_type == "virtualcenter":
        if "allowed_datastores" in provider_dict:
            deploy_args["allowed_datastores"] = provider_dict[
                "allowed_datastores"]
    elif provider_type == 'scvmm':
        deploy_args["host_group"] = provider_dict["provisioning"]['host_group']
    elif provider_type == 'gce':
        deploy_args['ssh_key'] = '{user_name}:{public_key}'.format(
            user_name=cred['ssh']['ssh-user'],
            public_key=cred['ssh']['public_key'])
    # Do it!
    try:
        logger.info('Cloning {} to {} on {}'.format(deploy_args['template'],
                                                    deploy_args['vm_name'],
                                                    kwargs['provider']))
        provider.deploy_template(**deploy_args)
    except Exception as e:
        logger.exception(e)
        logger.error('provider.deploy_template failed')
        if kwargs.get('cleanup'):
            logger.info('attempting to destroy {}'.format(
                deploy_args['vm_name']))
            destroy_vm(provider, deploy_args['vm_name'])
        return 12

    if not provider.does_vm_exist(deploy_args['vm_name']):
        logger.error('provider.deploy_template failed without exception')
        return 12

    if provider.is_vm_running(deploy_args['vm_name']):
        logger.info("VM {} is running".format(deploy_args['vm_name']))
    else:
        logger.error("VM is not running")
        return 10

    try:
        ip, time_taken = wait_for(provider.get_ip_address,
                                  [deploy_args['vm_name']],
                                  num_sec=1200,
                                  fail_condition=None)
        logger.info('IP Address returned is {}'.format(ip))
    except Exception as e:
        logger.exception(e)
        logger.error('IP address not returned')
        return 10

    try:
        if kwargs.get('configure'):
            logger.info('Configuring appliance, this can take a while.')
            if kwargs.get('deploy'):
                app = IPAppliance(address=ip)
            else:
                app = Appliance(kwargs['provider'], deploy_args['vm_name'])
            if provider_type == 'gce':
                with app as ipapp:
                    ipapp.configure_gce()
            else:
                app.configure()
            logger.info('Successfully Configured the appliance.')
    except Exception as e:
        logger.exception(e)
        logger.error('Appliance Configuration Failed')
        if not kwargs.get('deploy'):
            app = Appliance(kwargs['provider'], deploy_args['vm_name'])
            ssh_client = app.ssh_client()
            status, output = ssh_client.run_command(
                'find /root/anaconda-post.log')
            if status == 0:
                ssh_client.get_file('/root/anaconda-post.log',
                                    log_path.join('anaconda-post.log').strpath)
            ssh_client.close()
        return 10

    if kwargs.get('outfile') or kwargs.get('deploy'):
        with open(kwargs['outfile'], 'w') as outfile:
            outfile.write("appliance_ip_address={}\n".format(ip))

    # In addition to the outfile, drop the ip address on stdout for easy parsing
    print(ip)
Exemple #36
0
    def check_for_badness(self, fn, _tries, nav_args, *args, **kwargs):
        if getattr(fn, '_can_skip_badness_test', False):
            # self.log_message('Op is a Nop! ({})'.format(fn.__name__))
            return

        # TODO: Uncomment after resolving the issue in widgetastic. Shouldn't be needed though :)
        # if self.VIEW:
        #     self.view.flush_widget_cache()
        go_kwargs = kwargs.copy()
        go_kwargs.update(nav_args)
        self.appliance.browser.open_browser(
            url_key=self.obj.appliance.server.address())

        br = self.appliance.browser

        # Set this to True in the handlers below to trigger a browser restart
        recycle = False

        # Set this to True in handlers to restart evmserverd on the appliance
        # Includes recycling so you don't need to specify recycle = False
        restart_evmserverd = False

        try:
            self.pre_badness_check(_tries, *args, **go_kwargs)
            self.log_message("Invoking {}, with {} and {}".format(
                fn.__name__, args, kwargs),
                             level="debug")
            return fn(*args, **kwargs)
        except (KeyboardInterrupt, ValueError):
            # KeyboardInterrupt: Don't block this while navigating
            raise
        except UnexpectedAlertPresentException:
            if _tries == 1:
                # There was an alert, accept it and try again
                br.widgetastic.handle_alert(wait=0)
                self.go(_tries, *args, **go_kwargs)
            else:
                # There was still an alert when we tried again, shoot the browser in the head
                logger.debug('Unxpected alert, recycling browser')
                recycle = True
        except (ErrorInResponseException, InvalidSwitchToTargetException):
            # Unable to switch to the browser at all, need to recycle
            logger.info('Invalid browser state, recycling browser')
            recycle = True
        except exceptions.CFMEExceptionOccured as e:
            # We hit a Rails exception
            logger.info('CFME Exception occured')
            logger.exception(e)
            recycle = True
        except exceptions.CannotContinueWithNavigation as e:
            # The some of the navigation steps cannot succeed
            logger.info('Cannot continue with navigation due to: {}; '
                        'Recycling browser'.format(str(e)))
            recycle = True
        except (NoSuchElementException, InvalidElementStateException,
                WebDriverException, StaleElementReferenceException) as e:
            # First check - if jquery is not found, there can be also another
            # reason why this happened so do not put the next branches in elif
            if isinstance(e, WebDriverException) and "jQuery" in str(e):
                # UI failed in some way, try recycling the browser
                logger.exception(
                    "UI failed in some way, jQuery not found, (probably) recycling the browser."
                )
                recycle = True
            # If the page is blocked, then recycle...
            # TODO .modal-backdrop.fade.in catches the 'About' modal resulting in nav loop
            if (br.widgetastic.is_displayed(
                    "//div[@id='blocker_div' or @id='notification']")
                    or br.widgetastic.is_displayed(".modal-backdrop.fade.in")):
                logger.warning("Page was blocked with blocker div, recycling.")
                recycle = True
            elif br.widgetastic.is_displayed("//div[@id='exception_div']"):
                logger.exception(
                    "CFME Exception before force navigate started!: {}".format(
                        br.widgetastic.text(
                            "//div[@id='exception_div']//td[@id='maincol']/div[2]/h3[2]"
                        )))
                recycle = True
            elif br.widgetastic.is_displayed(
                    "//body/h1[normalize-space(.)='Proxy Error']"):
                # 502
                logger.exception(
                    "Proxy error detected. Killing browser and restarting evmserverd."
                )
                req = br.widgetastic.elements("/html/body/p[1]//a")
                req = br.widgetastic.text(
                    req[0]) if req else "No request stated"
                reason = br.widgetastic.elements("/html/body/p[2]/strong")
                reason = br.widgetastic.text(
                    reason[0]) if reason else "No reason stated"
                logger.info("Proxy error: {} / {}".format(req, reason))
                restart_evmserverd = True
            elif br.widgetastic.is_displayed(
                    "//body[./h1 and ./p and ./hr and ./address]"):
                # 503 and similar sort of errors
                title = br.widgetastic.text("//body/h1")
                body = br.widgetastic.text("//body/p")
                logger.exception("Application error {}: {}".format(
                    title, body))
                sleep(5)  # Give it a little bit of rest
                recycle = True
            elif br.widgetastic.is_displayed(
                    "//body/div[@class='dialog' and ./h1 and ./p]"):
                # Rails exception detection
                logger.exception(
                    "Rails exception before force navigate started!: %r:%r at %r",
                    br.widgetastic.text("//body/div[@class='dialog']/h1"),
                    br.widgetastic.text("//body/div[@class='dialog']/p"),
                    getattr(manager.browser, 'current_url',
                            "error://dead-browser"))
                recycle = True
            elif br.widgetastic.elements("//ul[@id='maintab']/li[@class='inactive']") and not\
                    br.widgetastic.elements("//ul[@id='maintab']/li[@class='active']/ul/li"):
                # If upstream and is the bottom part of menu is not displayed
                logger.exception("Detected glitch from BZ#1112574. HEADSHOT!")
                recycle = True
            elif not self.obj.appliance.server.logged_in():
                # Session timeout or whatever like that, login screen appears.
                logger.exception("Looks like we are logged out. Try again.")
                recycle = True
            else:
                logger.error(
                    "Could not determine the reason for failing the navigation. "
                    + " Reraising.  Exception: {}".format(str(e)))
                logger.debug(
                    store.current_appliance.ssh_client.run_command(
                        'systemctl status evmserverd').output)
                raise

        if restart_evmserverd:
            logger.info("evmserverd restart requested")
            self.appliance.restart_evm_service()
            self.appliance.wait_for_web_ui()
            self.go(_tries, *args, **go_kwargs)

        if recycle or restart_evmserverd:
            self.appliance.browser.quit_browser()
            logger.debug('browser killed on try {}'.format(_tries))
            # If given a "start" nav destination, it won't be valid after quitting the browser
            self.go(_tries, *args, **go_kwargs)
Exemple #37
0
def upload_template(client, hostname, username, password, provider, url, name,
                    provider_data, stream, results):

    try:
        if provider_data:
            kwargs = make_kwargs_vsphere(provider_data, provider)
        else:
            kwargs = make_kwargs_vsphere(cfme_data, provider)
        kwargs['ovf_tool_username'] = credentials['host_default']['username']
        kwargs['ovf_tool_password'] = credentials['host_default']['password']

        if name is None:
            name = cfme_data['basic_info']['appliance_template']

        logger.info("VSPHERE:%r Start uploading Template: %r", provider, name)
        if not check_kwargs(**kwargs):
            results[provider] = False
            return

        if name in client.list_template():
            logger.info("VSPHERE:%r template %r already exists", provider,
                        name)
        else:
            if kwargs.get('upload'):
                # Wrapper for ovftool - sometimes it just won't work
                for i in range(0, NUM_OF_TRIES_OVFTOOL):
                    logger.info("VSPHERE:%r ovftool try #%r", provider, i)
                    upload_result = upload_ova(hostname, username, password,
                                               name, kwargs.get('datastore'),
                                               kwargs.get('cluster'),
                                               kwargs.get('datacenter'), url,
                                               provider, kwargs.get('proxy'),
                                               kwargs.get('ovf_tool_client'),
                                               kwargs['ovf_tool_username'],
                                               kwargs['ovf_tool_password'])
                    if upload_result:
                        break
                else:
                    logger.error(
                        "VSPHERE:%r Ovftool failed upload after multiple tries",
                        provider)
                    results[provider] = False
                    return

            if kwargs.get('disk'):
                if not add_disk(client, name, provider):
                    logger.error(
                        '"VSPHERE:%r FAILED adding disk to VM, exiting',
                        provider)
                    results[provider] = False
                    return

            if kwargs.get('template'):
                try:
                    client.mark_as_template(vm_name=name)
                    logger.info("VSPHERE:%r Successfully templatized machine",
                                provider)
                except Exception:
                    logger.exception("VSPHERE:%r FAILED to templatize machine",
                                     provider)
                    results[provider] = False
                    return

            if not provider_data:
                logger.info("VSPHERE:%r Adding template %r to trackerbot",
                            provider, name)
                trackerbot.trackerbot_add_provider_template(
                    stream, provider, name)

        if provider_data and name in client.list_template():
            logger.info(
                "VSPHERE:%r Template and provider_data exist, Deploy %r",
                provider, name)
            vm_name = 'test_{}_{}'.format(name,
                                          fauxfactory.gen_alphanumeric(8))
            deploy_args = {
                'provider': provider,
                'vm_name': vm_name,
                'template': name,
                'deploy': True
            }
            getattr(__import__('clone_template'), "main")(**deploy_args)

        # If we get here without hitting an exception, we passed...
        results[provider] = True
    except Exception:
        logger.exception('VSPHERE:%r Exception during upload_template',
                         provider)
        results[provider] = False
        return
    finally:
        logger.info("VSPHERE:%r End uploading Template: %r", provider, name)
    def create(self,
               cancel=False,
               validate_credentials=True,
               validate=True,
               force=False):
        """Creates the manager through UI

        Args:
            cancel (bool): Whether to cancel out of the creation.  The cancel is done
                after all the information present in the manager has been filled in the UI.
            validate_credentials (bool): Whether to validate credentials - if True and the
                credentials are invalid, an error will be raised.
            validate (bool): Whether we want to wait for the manager's data to load
                and show up in it's detail page. True will also wait, False will only set it up.
            force (bool): Whether to force the creation even if the manager already exists.
                True will try anyway; False will check for its existence and leave, if present.
        """
        def config_profiles_loaded():
            # Workaround - without this, validation of provider failed
            config_profiles_names = [
                prof.name for prof in self.config_profiles
            ]
            logger.info("UI: %s\nYAML: %s", set(config_profiles_names),
                        set(self.yaml_data['config_profiles']))
            return all([
                cp in config_profiles_names
                for cp in self.yaml_data['config_profiles']
            ])

        if not force and self.exists:
            return
        form_dict = self.__dict__
        form_dict.update(self.credentials.view_value_mapping)
        if self.appliance.version < '5.8':
            form_dict['provider_type'] = self.type
        view = navigate_to(self, 'Add')
        view.entities.form.fill(form_dict)
        if validate_credentials:
            view.entities.form.validate.click()
            view.flash.assert_success_message(
                'Credential validation was successful')
        if cancel:
            view.entities.cancel.click()
            view.flash.assert_success_message(
                'Add of Provider was cancelled by the user')
        else:
            view.entities.add.click()
            success_message = '{} Provider "{}" was added'.format(
                self.type, self.name)
            view.flash.assert_success_message(success_message)
            view.flash.assert_success_message(self.refresh_flash_msg)
            if validate:
                try:
                    self.yaml_data['config_profiles']
                except KeyError as e:
                    logger.exception(e)
                    raise

                wait_for(config_profiles_loaded,
                         fail_func=self.refresh_relationships,
                         handle_exception=True,
                         num_sec=180,
                         delay=30)
Exemple #39
0
def main():

    urls = cfme_data['basic_info']['cfme_images_url']
    stream = args.stream or cfme_data['template_upload']['stream']
    upload_url = args.image_url
    provider_type = args.provider_type or cfme_data['template_upload'][
        'provider_type']

    if args.provider_data is not None:
        local_datafile = open(args.provider_data, 'r').read()
        create_datafile = open(path.conf_path.strpath + '/provider_data.yaml',
                               'w')
        create_datafile.write(local_datafile)
        create_datafile.close()
        provider_data = cfme.utils.conf.provider_data
        stream = provider_data['stream']

    if stream:
        urls = {}
        image_url = cfme_data['basic_info']['cfme_images_url']
        urls[stream] = image_url.get(stream)
        if not urls[stream]:
            image_url = cfme_data['basic_info']['cfme_old_images_url']
            urls[stream] = image_url.get(stream)
        if not urls[stream]:
            base_url = cfme_data['basic_info']['cfme_old_images_url'][
                'base_url']
            version = ''.join(re.findall(r'(\d+)', stream))
            urls[stream] = \
                base_url + '.'.join(version[:2]) + '/' + '.'.join(version) + '/'

    for key, url in urls.iteritems():
        if stream is not None:
            if key != stream:
                continue
        if upload_url:
            # strip trailing slashes just in case
            if url.rstrip('/') != upload_url.rstrip('/'):
                continue
        dir_files = browse_directory(url)
        if not dir_files:
            continue
        checksum_url = url + "SHA256SUM"
        try:
            urlopen(checksum_url)
        except Exception:
            logger.exception("No valid checksum file for %r, Skipping", key)
            continue

        kwargs = {}
        module = None
        if not provider_type:
            sys.exit('specify the provider_type')

        if provider_type == 'openstack':
            module = 'template_upload_rhos'
            if module not in dir_files.iterkeys():
                continue
        elif provider_type == 'rhevm':
            module = 'template_upload_rhevm'
            if module not in dir_files.iterkeys():
                continue
        elif provider_type == 'virtualcenter':
            module = 'template_upload_vsphere'
            if module not in dir_files.iterkeys():
                continue
        elif provider_type == 'scvmm':
            module = 'template_upload_scvmm'
            if module not in dir_files.iterkeys():
                continue
        elif provider_type == 'gce':
            module = 'template_upload_gce'
            if module not in dir_files.iterkeys():
                continue
        elif provider_type == 'ec2':
            module = 'template_upload_ec2'
            if module not in dir_files.iterkeys():
                continue
        elif provider_type == 'openshift':
            module = 'template_upload_openshift'
            if module not in dir_files.iterkeys():
                continue

        if not module:
            logger.error('Could not match module to given provider type')
            return 1
        kwargs['stream'] = stream
        kwargs['image_url'] = dir_files[module]
        if args.provider_data is not None:
            kwargs['provider_data'] = provider_data
        else:
            kwargs['provider_data'] = None

        if cfme_data['template_upload']['automatic_name_strategy']:
            kwargs['template_name'] = template_name(
                dir_files[module], dir_files[module + "_date"], checksum_url,
                get_version(url))
            if not stream:
                # Stream is none, using automatic naming strategy, parse stream from template name
                template_parser = trackerbot.parse_template(
                    kwargs['template_name'])
                if template_parser.stream:
                    kwargs['stream'] = template_parser.group_name

        logger.info(
            "TEMPLATE_UPLOAD_ALL:-----Start of %r upload on: %r--------",
            kwargs['template_name'], provider_type)

        logger.info("Executing %r with the following kwargs: %r", module,
                    kwargs)
        getattr(__import__(module), "run")(**kwargs)

        logger.info(
            "TEMPLATE_UPLOAD_ALL:------End of %r upload on: %r--------",
            kwargs['template_name'], provider_type)
Exemple #40
0
def upload_template(hostname, username, password, provider, url, name, provider_data,
                    stream, upload_folder, oc_username, oc_password):
    try:
        kwargs = {}

        if name is None:
            name = cfme_data['basic_info']['appliance_template']

        logger.info("OPENSHIFT:%r Start uploading Template: %r", provider, name)
        if not check_kwargs(**kwargs):
            return False

        logger.info("checking whether this template is already present in provider env")
        if name not in list_templates(hostname, username, password, upload_folder):
            with SSHClient(hostname=hostname, username=username, password=password) as ssh:
                # creating folder to store template files
                dest_dir = os.path.join(upload_folder, name)
                logger.info("creating folder for templates: {f}".format(f=dest_dir))
                result = ssh.run_command('mkdir {dir}'.format(dir=dest_dir))
                if result.failed:
                    err_text = "OPENSHIFT: cant create folder {}".format(str(result))
                    logger.exception(err_text)
                    raise RuntimeError(err_text)
                download_cmd = ('wget -q --no-parent --no-directories --reject "index.html*" '
                                '--directory-prefix={dir} -r {url}')
                logger.info("downloading templates to destination dir {f}".format(f=dest_dir))
                result = ssh.run_command(download_cmd.format(dir=dest_dir, url=url))
                if result.failed:
                    err_text = "OPENSHIFT: cannot download template {}".format(str(result))
                    logger.exception(err_text)
                    raise RuntimeError(err_text)

                # updating image streams in openshift
                logger.info("logging in to openshift")
                login_cmd = 'oc login --username={u} --password={p}'
                result = ssh.run_command(login_cmd.format(u=oc_username, p=oc_password))
                if result.failed:
                    err_text = "OPENSHIFT: couldn't login to openshift {}".format(str(result))
                    logger.exception(err_text)
                    raise RuntimeError(err_text)

                logger.info("looking for templates in destination dir {f}".format(f=dest_dir))
                get_urls_cmd = 'find {d} -type f -name "cfme-openshift-*" -exec tail -1 {{}} \;'
                result = ssh.run_command(get_urls_cmd.format(d=dest_dir))
                if result.failed:
                    err_text = "OPENSHIFT: couldn't get img stream urls {}".format(str(result))
                    logger.exception(err_text)
                    raise RuntimeError(err_text)

                tags = {}
                for img_url in str(result).split():
                    update_img_cmd = 'docker pull {url}'
                    logger.info("updating image stream to tag {t}".format(t=img_url))
                    result = ssh.run_command(update_img_cmd.format(url=img_url))
                    # url ex:
                    # brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-httpd:2.4.6-14
                    tag_name, tag_value = img_url.split('/')[-1].split(':')
                    tags[tag_name] = tag_value
                    if result.failed:
                        err_text = ("OPENSHIFT: couldn't update image stream using url "
                                    "{}, {}".format(img_url, str(result)))
                        logger.exception(err_text)
                        raise RuntimeError(err_text)

                logger.info('updating templates before upload to openshift')
                # updating main template file, adding essential patches
                main_template_file = 'cfme-template.yaml'
                main_template = os.path.join(dest_dir, main_template_file)

                default_template_name = 'cloudforms'
                new_template_name = name
                logger.info('removing old templates from ocp if those exist')
                for template in (default_template_name, new_template_name):
                    if ssh.run_command('oc get template {t} '
                                       '--namespace=openshift'.format(t=template)).success:
                        ssh.run_command('oc delete template {t} '
                                        '--namespace=openshift'.format(t=template))

                logger.info('changing template name to unique one')
                change_name_cmd = """python -c 'import yaml
data = yaml.safe_load(open("{file}"))
data["metadata"]["name"] = "{new_name}"
yaml.safe_dump(data, stream=open("{file}", "w"))'""".format(new_name=new_template_name,
                                                            file=main_template)
                # our templates always have the same name but we have to keep many templates
                # of the same stream. So we have to change template name before upload to ocp
                # in addition, openshift doesn't provide any convenient way to change template name
                logger.info(change_name_cmd)
                result = ssh.run_command(change_name_cmd)
                if result.failed:
                    err_text = "OPENSHIFT: couldn't change default template name"
                    logger.exception(err_text)
                    raise RuntimeError(err_text)

                logger.info("uploading main template to ocp")
                result = ssh.run_command('oc create -f {t} '
                                         '--namespace=openshift'.format(t=main_template))
                if result.failed:
                    err_text = "OPENSHIFT: couldn't upload template to openshift"
                    logger.exception(err_text)
                    raise RuntimeError(err_text)

            if not provider_data:
                logger.info("OPENSHIFT:%r Adding template %r to trackerbot", provider, name)
                trackerbot.trackerbot_add_provider_template(stream=stream,
                                                            provider=provider,
                                                            template_name=name,
                                                            custom_data={'TAGS': tags})

            logger.info("upload has been finished successfully")
        else:
            logger.info("OPENSHIFT:%r template %r already exists", provider, name)

    except Exception:
        logger.exception('OPENSHIFT:%r Exception during upload_template', provider)
        return False
    finally:
        logger.info("OPENSHIFT:%r End uploading Template: %r", provider, name)
Exemple #41
0
def main(**kwargs):
    # get_mgmt validates, since it will explode without an existing key or type
    if kwargs.get('deploy'):
        kwargs['configure'] = True
        kwargs['outfile'] = 'appliance_ip_address_1'
        providers = provider_data['management_systems']
        provider_dict = provider_data['management_systems'][kwargs['provider']]
        credentials =\
            {'username': provider_dict['username'],
             'password': provider_dict['password'],
             'tenant': provider_dict['template_upload'].get('tenant_admin', 'admin'),
             'auth_url': provider_dict.get('auth_url'),
             }
        provider = get_mgmt(kwargs['provider'],
                            providers=providers,
                            credentials=credentials)
        flavors = provider_dict['template_upload'].get('flavors',
                                                       ['m1.medium'])
        provider_type = provider_data['management_systems'][
            kwargs['provider']]['type']
        deploy_args = {
            'vm_name': kwargs['vm_name'],
            'template': kwargs['template'],
        }
    else:
        provider = get_mgmt(kwargs['provider'])
        provider_dict = cfme_data['management_systems'][kwargs['provider']]
        provider_type = provider_dict['type']
        flavors = cfme_data['appliance_provisioning']['default_flavors'].get(
            provider_type, [])
        deploy_args = {
            'vm_name': kwargs['vm_name'],
            'template': kwargs['template'],
        }

    logger.info('Connecting to %s', kwargs['provider'])

    if kwargs.get('destroy'):
        # TODO: destroy should be its own script
        # but it's easy enough to just hijack the parser here
        # This returns True if destroy fails to give POSIXy exit codes (0 is good, False is 0, etc)
        return not destroy_vm(provider, deploy_args['vm_name'])

    # Try to snag defaults from cfme_data here for each provider type
    if provider_type == 'rhevm':
        cluster = provider_dict.get('default_cluster', kwargs.get('cluster'))
        if cluster is None:
            raise Exception(
                '--cluster is required for rhev instances and default is not set'
            )
        deploy_args['cluster'] = cluster

        if kwargs.get('place_policy_host') and kwargs.get('place_policy_aff'):
            deploy_args['placement_policy_host'] = kwargs['place_policy_host']
            deploy_args['placement_policy_affinity'] = kwargs[
                'place_policy_aff']
    elif provider_type == 'ec2':
        # ec2 doesn't have an api to list available flavors, so the first flavor is the default
        try:
            # c3.xlarge has 4 CPU cores and 7.5GB RAM - minimal requirements for CFME Appliance
            flavor = kwargs.get('flavor', 'c3.xlarge')
        except IndexError:
            raise Exception(
                '--flavor is required for EC2 instances and default is not set'
            )
        deploy_args['instance_type'] = flavor
        deploy_args['key_name'] = "shared"
        # we want to override default cloud-init which disables root login and password login
        cloud_init_dict = {
            'chpasswd': {
                'expire':
                False,
                'list':
                '{}:{}\n'.format(cred['ssh']['username'],
                                 cred['ssh']['password'])
            },
            'disable_root': False,
            'ssh_pwauth': True
        }
        cloud_init = "#cloud-config\n{}".format(
            yaml.safe_dump(cloud_init_dict, default_flow_style=False))
        deploy_args['user_data'] = cloud_init
    elif provider_type == 'openstack':
        # filter openstack flavors based on what's available
        available_flavors = provider.list_flavor()
        logger.info("Available flavors on provider: %s", available_flavors)
        flavors = filter(lambda f: f in available_flavors, flavors)
        try:
            flavor = kwargs.get('flavor') or flavors[0]
        except IndexError:
            raise Exception('--flavor is required for RHOS instances and '
                            'default is not set or unavailable on provider')
        logger.info('Selected flavor: %s', flavor)

        # flavour? Thanks, psav...
        deploy_args['flavour_name'] = flavor

        if 'network' in provider_dict:
            # support rhos4 network names
            deploy_args['network_name'] = provider_dict['network']

        provider_pools = [
            p.name for p in provider.api.floating_ip_pools.list()
        ]
        try:
            # TODO: If there are multiple pools, have a provider default in cfme_data
            floating_ip_pool = kwargs.get(
                'floating_ip_pool') or provider_pools[0]
        except IndexError:
            raise Exception('No floating IP pools available on provider')

        if floating_ip_pool is not None:
            deploy_args['floating_ip_pool'] = floating_ip_pool
    elif provider_type == "virtualcenter":
        if "allowed_datastores" in provider_dict:
            deploy_args["allowed_datastores"] = provider_dict[
                "allowed_datastores"]
    elif provider_type == 'scvmm':
        deploy_args["host_group"] = provider_dict["provisioning"]['host_group']
    elif provider_type == 'gce':
        deploy_args['ssh_key'] = '{user_name}:{public_key}'.format(
            user_name=cred['ssh']['ssh-user'],
            public_key=cred['ssh']['public_key'])
    elif provider_type == 'openshift':
        trackerbot = api()
        raw_tags = trackerbot.providertemplate().get(
            provider=kwargs['provider'],
            template=deploy_args['template'])['objects']
        raw_tags = raw_tags[-1]['template'].get('custom_data', "{}")
        deploy_args["tags"] = yaml.safe_load(
            raw_tags.replace("u'", '"').replace("'", '"'))['TAGS']
    # Do it!
    try:
        logger.info('Cloning %s to %s on %s', deploy_args['template'],
                    deploy_args['vm_name'], kwargs['provider'])
        output = provider.deploy_template(**deploy_args)
    except Exception as e:
        logger.exception(e)
        logger.error('provider.deploy_template failed')
        if kwargs.get('cleanup'):
            logger.info('attempting to destroy %s', deploy_args['vm_name'])
            destroy_vm(provider, deploy_args['vm_name'])
        return 12

    if not provider.does_vm_exist(deploy_args['vm_name']):
        logger.error('provider.deploy_template failed without exception')
        return 12

    if provider.is_vm_running(deploy_args['vm_name']):
        logger.info('VM %s is running', deploy_args['vm_name'])
    else:
        logger.error('VM %s is not running', deploy_args['vm_name'])
        return 10

    if provider_type == 'gce':
        try:
            attach_gce_disk(provider, deploy_args['vm_name'])
        except Exception:
            logger.exception("Failed to attach db disk")
            destroy_vm(provider, deploy_args['vm_name'])
            return 10

    if provider_type == 'openshift':
        ip = output['url']
    else:
        try:
            ip, _ = wait_for(provider.get_ip_address, [deploy_args['vm_name']],
                             num_sec=1200,
                             fail_condition=None)
            logger.info('IP Address returned is %s', ip)
        except Exception as e:
            logger.exception(e)
            logger.error('IP address not returned')
            return 10

    try:
        if kwargs.get('configure'):
            logger.info('Configuring appliance, this can take a while.')
            if kwargs.get('deploy'):
                app = IPAppliance(hostname=ip)
            else:
                app_args = (kwargs['provider'], deploy_args['vm_name'])
                app_kwargs = {}
                if provider_type == 'openshift':
                    ocp_creds = cred[provider_dict['credentials']]
                    ssh_creds = cred[provider_dict['ssh_creds']]
                    app_kwargs = {
                        'project': output['project'],
                        'db_host': output['external_ip'],
                        'container': 'cloudforms-0',
                        'hostname': ip,
                        'openshift_creds': {
                            'hostname': provider_dict['hostname'],
                            'username': ocp_creds['username'],
                            'password': ocp_creds['password'],
                            'ssh': {
                                'username': ssh_creds['username'],
                                'password': ssh_creds['password'],
                            },
                        }
                    }
                app = Appliance.from_provider(*app_args, **app_kwargs)

            if provider_type == 'ec2':
                wait_for(cloud_init_done,
                         func_args=[app],
                         num_sec=600,
                         handle_exception=True,
                         delay=5)
            if provider_type == 'gce':
                app.configure_gce()
            elif provider_type == 'openshift':
                # openshift appliances don't need any additional configuration
                pass
            else:
                app.configure()
            logger.info('Successfully Configured the appliance.')
    except Exception as e:
        logger.exception(e)
        logger.error('Appliance Configuration Failed')
        if not kwargs.get('deploy'):
            app = Appliance.from_provider(kwargs['provider'],
                                          deploy_args['vm_name'])
            ssh_client = app.ssh_client()
            result = ssh_client.run_command('find /root/anaconda-post.log')
            if result.success:
                ssh_client.get_file('/root/anaconda-post.log',
                                    log_path.join('anaconda-post.log').strpath)
            ssh_client.close()
        destroy_vm(app.provider, deploy_args['vm_name'])
        return 10

    if kwargs.get('outfile') or kwargs.get('deploy'):
        # todo: to get rid of those scripts in jenkins or develop them from scratch
        with open(kwargs['outfile'], 'w') as outfile:
            if provider_type == 'openshift':
                output_data = {
                    'appliances': [
                        {
                            'project': output['project'],
                            'db_host': output['external_ip'],
                            'hostname': ip,
                            'container': 'cloudforms-0',
                            'openshift_creds': {
                                'hostname': provider_dict['hostname'],
                                'username': ocp_creds['username'],
                                'password': ocp_creds['password'],
                                'ssh': {
                                    'username': ssh_creds['username'],
                                    'password': ssh_creds['password'],
                                }
                            },
                        },
                    ],
                }
            else:
                output_data = {'appliances': [{'hostname': ip}]}
            yaml_data = yaml.safe_dump(output_data, default_flow_style=False)
            outfile.write(yaml_data)

        # In addition to the outfile, drop the ip address on stdout for easy parsing
        print(yaml_data)
Exemple #42
0
def upload_template(rhevip, sshname, sshpass, username, password,
                    provider, image_url, template_name, provider_data, stream, glance):
    try:
        logger.info("RHEVM:%r Template %r upload started", provider, template_name)
        if provider_data:
            kwargs = make_kwargs_rhevm(provider_data, provider)
            providers = provider_data['management_systems']
            mgmt = get_mgmt(kwargs.get('provider'), providers=providers)
        else:
            kwargs = make_kwargs_rhevm(cfme_data, provider)
            mgmt = get_mgmt(kwargs.get('provider'))
        kwargs['image_url'] = image_url
        kwargs['template_name'] = template_name
        qcowname = get_qcow_name(image_url)
        temp_template_name = ('auto-tmp-{}-'.format(
            fauxfactory.gen_alphanumeric(8))) + template_name
        temp_vm_name = ('auto-vm-{}-'.format(
            fauxfactory.gen_alphanumeric(8))) + template_name
        if template_name is None:
            template_name = cfme_data['basic_info']['appliance_template']

        kwargs = update_params_api(mgmt, **kwargs)
        check_kwargs(**kwargs)

        if mgmt.does_template_exist(template_name):
            logger.info("RHEVM:%r Found finished template with name %r.", provider, template_name)
            logger.info("RHEVM:%r The script will now end.", provider)
            return True

        logger.info("RHEVM:%r Downloading .qcow2 file...", provider)
        download_qcow(kwargs.get('image_url'))
        try:
            logger.info("RHEVM:%r Uploading template to Glance", provider)
            glance_args = {'image': qcowname, 'image_name_in_glance': template_name,
                'provider': glance, 'disk_format': 'qcow2'}
            getattr(__import__('image_upload_glance'), "run")(**glance_args)

            logger.info("RHEVM:%r Adding Glance", provider)
            add_glance(mgmt, provider, glance)

            logger.info("RHEVM:%r Importing new template to data domain", provider)
            import_template_from_glance(mgmt, kwargs.get('sdomain'), kwargs.get('cluster'),
                temp_template_name, glance, provider, template_name)

            logger.info("RHEVM:%r Making a temporary VM from new template", provider)
            make_vm_from_template(mgmt, stream, cfme_data, kwargs.get('cluster'),
                temp_template_name, temp_vm_name, provider, mgmt_network=kwargs.get('mgmt_network'))

            logger.info("RHEVM:%r Adding disk to created VM", provider)
            add_disk_to_vm(mgmt, kwargs.get('sdomain'), kwargs.get('disk_size'),
                kwargs.get('disk_format'), kwargs.get('disk_interface'),
                temp_vm_name, provider)

            logger.info("RHEVM:%r Templatizing VM", provider)
            templatize_vm(mgmt, template_name, kwargs.get('cluster'), temp_vm_name, provider)

            if not provider_data:
                logger.info("RHEVM:%r Add template %r to trackerbot", provider, template_name)
                trackerbot.trackerbot_add_provider_template(stream, provider, template_name)
        finally:
            cleanup(mgmt, qcowname, provider, temp_template_name, temp_vm_name)
            mgmt.disconnect()
            logger.info("RHEVM:%r Template %r upload Ended", provider, template_name)
        if provider_data and mgmt.does_template_exist(template_name):
            logger.info("RHEVM:%r Deploying Template %r", provider, template_name)
            vm_name = 'test_{}_{}'.format(template_name, fauxfactory.gen_alphanumeric(8))
            deploy_args = {'provider': provider, 'vm_name': vm_name,
                           'template': template_name, 'deploy': True}
            getattr(__import__('clone_template'), "main")(**deploy_args)
        logger.info("RHEVM:%r Template %r upload Ended", provider, template_name)
    except Exception:
        logger.exception("RHEVM:%r Template %r upload exception", provider, template_name)
        return False
Exemple #43
0
    def download_image(self):
        ARCHIVE_TYPES = ['zip']
        suffix = re.compile(r'^.*?[.](?P<ext>tar\.gz|tar\.bz2|\w+)$').match(
            self.image_name).group('ext')
        # Check if file exists already:
        if path.isfile(self.image_name):
            if self.checksum_verification():
                logger.info('Local image found, skipping download: %s',
                            self.local_file_path)
                if suffix not in ARCHIVE_TYPES:
                    return True
            else:
                os.remove(self.local_file_path)

        if not path.isfile(self.image_name):
            # Download file to cli-tool-client
            try:
                request.urlretrieve(self.raw_image_url, self.local_file_path)
            except URLError:
                logger.exception('Failed download of image using urllib')
                return False

        self.checksum_verification()

        # Unzips image  when suffix is zip or tar.gz and then changes image name to extracted one.
        # For EC2 and SCVMM images is zip used and for GCE is tar.gz used.

        archive_path = self.image_name
        if suffix not in ARCHIVE_TYPES:
            return True
        else:
            if suffix == 'zip':
                try:
                    archive = ZipFile(archive_path)
                    zipinfo = archive.infolist()
                    self._unzipped_file = zipinfo[0].filename
                except Exception:
                    logger.exception(
                        "Getting information of {} archive failed.".format(
                            self.image_name))
                    return False

                if path.isfile(self.image_name):
                    try:
                        os.remove(self.image_name)
                    except Exception:
                        logger.exception(
                            "Deleting previously unpacked file {} failed.".
                            format(self.image_name))
                        return False
                logger.info(
                    f"Image archived - unpacking as : {self._unzipped_file}")
                try:
                    archive.extractall()
                    archive.close()
                    # remove the archive
                    os.remove(archive_path)
                    return True
                except Exception:
                    logger.exception(f"{suffix} archive unpacked failed.")
                    return False
Exemple #44
0
def deploy_template(provider_key, vm_name, template_name=None, timeout=900, **deploy_args):
    """
    Args:
        provider_key: Provider key on which the VM is to be created
        vm_name: Name of the VM to be deployed
        template_name: Name of the template that the VM is deployed from
        timeout: the timeout for template deploy

    Returns:
        wrapanapi.entities.Vm or wrapanapi.entities.Instance object
    """
    allow_skip = deploy_args.pop("allow_skip", ())
    if isinstance(allow_skip, dict):
        skip_exceptions = list(allow_skip.keys())
        callable_mapping = allow_skip
    elif isinstance(allow_skip, str) and allow_skip.lower() == "default":
        skip_exceptions = DEFAULT_SKIP
        callable_mapping = {}
    else:
        skip_exceptions = allow_skip
        callable_mapping = {}
    provider_crud = get_crud(provider_key)

    deploy_args.update(vm_name=vm_name)

    if template_name is None:
        try:
            template_name = provider_crud.data['templates']['small_template']['name']
        except KeyError:
            raise KeyError('small_template not defined for Provider {} in cfme_data.yaml'
                           .format(provider_key))

    deploy_args.update(template=template_name)

    deploy_args.update(provider_crud.deployment_helper(deploy_args))

    logger.info("Getting ready to deploy VM/instance %s from template %s on provider %s",
                vm_name, deploy_args['template'], provider_crud.data['name'])
    try:
        try:
            logger.debug("Deploy args: %s", deploy_args)
            if isinstance(provider_crud.mgmt, AzureSystem):
                template = provider_crud.mgmt.get_template(
                    template_name, container=deploy_args['template_container'])
            else:
                template = provider_crud.mgmt.get_template(template_name)
            vm = template.deploy(timeout=timeout, **deploy_args)
            logger.info("Provisioned VM/instance %r", vm)
        except Exception:
            logger.exception('Could not provisioning VM/instance %s', vm_name)
            for vm_to_cleanup in provider_crud.mgmt.find_vms(vm_name):
                try:
                    vm_to_cleanup.cleanup()
                except Exception:
                    logger.exception("Unable to clean up vm: %r", vm_to_cleanup.name)
            raise
    except skip_exceptions as e:
        e_c = type(e)
        if e_c in callable_mapping and not callable_mapping[e_c](e):
            raise
        # Make it visible also in the log.
        store.write_line(
            "Skipping due to a provider error: {}: {}\n".format(e_c.__name__, str(e)), purple=True)
        logger.exception(e)
        pytest.skip("{}: {}".format(e_c.__name__, str(e)))
    return vm
Exemple #45
0
    def _run_command(self,
                     command,
                     timeout=RUNCMD_TIMEOUT,
                     reraise=False,
                     ensure_host=False,
                     ensure_user=False,
                     container=None):
        if isinstance(command, dict):
            command = VersionPicker(command).pick(self.vmdb_version)
        original_command = command
        uses_sudo = False
        logger.info("Running command %r", command)
        container = container or self._container
        if self.is_pod and not ensure_host:
            # This command will be executed in the context of the host provider
            command_to_run = '[[ -f /etc/default/evm ]] && source /etc/default/evm; ' + command
            oc_cmd = 'oc exec --namespace={proj} {pod} -- bash -c {cmd}'.format(
                proj=self._project, pod=container, cmd=quote(command_to_run))
            command = oc_cmd
            ensure_host = True
        elif self.is_container and not ensure_host:
            command = 'docker exec {} bash -c {}'.format(
                container, quote('source /etc/default/evm; ' + command))

        if self.username != 'root' and not ensure_user:
            # We need sudo
            command = 'sudo -i bash -c {command}'.format(
                command=quote(command))
            uses_sudo = True

        if command != original_command:
            logger.info("> Actually running command %r", command)
        command += '\n'

        output = []
        try:
            session = self.get_transport().open_session()
            if uses_sudo:
                # We need a pseudo-tty for sudo
                session.get_pty()
            if timeout:
                session.settimeout(float(timeout))

            session.exec_command(command)
            stdout = session.makefile()
            stderr = session.makefile_stderr()

            def write_output(line, file):
                output.append(line)
                if self._streaming:
                    file.write(line)

            while True:
                if session.exit_status_ready():
                    break
                no_data = 0
                # While the program is running loop through collecting line by line so that we don't
                # fill the buffers up without a newline.
                # Also, note that for long running programs if we try to read output when there
                # is none (and in the case of stderr may never be any)
                # we risk blocking so long that the write buffer on the remote side will fill
                # and the remote program will block on a write.
                # The blocking on our side occurs in paramiko's buffered_pipe.py's read() call,
                # which will block if its internal buffer is empty.
                if session.recv_ready():
                    try:
                        line = next(stdout)
                        write_output(line, self.f_stdout)
                    except StopIteration:
                        pass
                else:
                    no_data += 1

                if session.recv_stderr_ready():
                    try:
                        line = next(stderr)
                        write_output(line, self.f_stderr)
                    except StopIteration:
                        pass
                else:
                    no_data += 1

                if no_data == 2:
                    gevent.sleep(0.01)

            # When the program finishes, we need to grab the rest of the output that is left.
            # Also, we don't have the issue of blocking reads because since the command is
            # finished, any pending reads of SSH encrypted data will finish shortly and put in
            # the buffer or for an empty file EOF will be reached as it will be closed.
            for line in stdout:
                write_output(line, self.f_stdout)
            for line in stderr:
                write_output(line, self.f_stderr)

            exit_status = session.recv_exit_status()
            if exit_status != 0:
                logger.warning('Exit code %d!', exit_status)
            return SSHResult(rc=exit_status,
                             output=''.join(output),
                             command=command)
        except paramiko.SSHException:
            if reraise:
                raise
            else:
                logger.exception('Exception happened during SSH call')
        except socket.timeout:
            logger.exception(
                "Command %r timed out. Output before it failed was:\n%r",
                command, ''.join(output))
            raise

        # Returning two things so tuple unpacking the return works even if the ssh client fails
        # Return whatever we have in the output
        return SSHResult(rc=1, output=''.join(output), command=command)
Exemple #46
0
    def configure(self,
                  auth_mode=None,
                  auth_provider=None,
                  user_type=None,
                  reset=False,
                  validate=True):
        """ Set up authentication mode

        Defaults to Database if auth_mode is none, uses auth_provider.as_fill_value()

        Args:
            auth_mode: key for AUTH_MODES, UI dropdown selection, defaults to Database if None
            auth_provider: authentication provider class from cfme.utils.auth
            user_type: key for USER_TYPES
            reset:  to reset all changes for the page.after filling
            validate: validate ldap/ldaps/amazon provider config bind_dn+password

        """
        # Don't call lower() on None, just use 'database'
        mode = AUTH_MODES.get(auth_mode.lower() if auth_mode else 'database')
        settings = None  # determine correct settings for mode selection
        if mode == AUTH_MODES['database']:
            # no other auth config settings
            logger.warning('auth_mode is Database, ignoring auth_provider')
        elif mode == AUTH_MODES['external']:
            # limited config in external mode
            # possible to configure external with no auth provider object (default UI options)
            settings = auth_provider.as_fill_external_value(
            ) if auth_provider else None
        elif auth_provider:
            # full provider config
            settings = auth_provider.as_fill_value(auth_mode=auth_mode,
                                                   user_type=user_type)
        else:
            raise ValueError(
                'You have tried to configure auth with unexpected settings: '
                '%r on mode %r', auth_provider, auth_mode)

        view = navigate_to(self.appliance.server, 'Authentication')
        changed = view.form.fill({
            'auth_mode': mode,
            'auth_settings': settings
        })
        if reset:
            view.reset.click()
            view.flash.assert_message('All changes have been reset')
            # Can't save the form if nothing was changed
            logger.info('Authentication form reset, returning')
            return
        elif changed:
            if validate and mode not in [
                    AUTH_MODES['database'], AUTH_MODES['external']
            ]:
                if view.form.auth_settings.validate.is_displayed:
                    view.form.auth_settings.validate.click()
                    view.flash.assert_no_error()
            # FIXME BZ 1527239 This button goes disabled if a password field is 'changed' to same
            # on exception, log and continue
            # no exception - assert flash messages
            # all cases - assert no flash error
            try:
                view.save.click()
            except NoSuchElementException:
                logger.exception(
                    'NoSuchElementException when trying to save auth settings. BZ '
                    '1527239 prevents consistent form saving. Assuming auth settings '
                    'unchanged')
                pass
            else:
                # TODO move this flash message assert into test and only assert no error
                view.flash.assert_success_message(
                    'Authentication settings saved for {} Server "{} [{}]" in Zone "{}"'
                    .format(self.appliance.product_name,
                            self.appliance.server.name,
                            self.appliance.server.sid,
                            self.appliance.server.zone.name))
            finally:
                view.flash.assert_no_error()
        else:
            logger.info('No authentication settings changed, not saving form.')
Exemple #47
0
    def pre_badness_check(self, _tries, *args, **go_kwargs):
        # check for MiqQE javascript patch on first try and patch the appliance if necessary
        if self.appliance.is_miqqe_patch_candidate and not self.appliance.miqqe_patch_applied:
            self.appliance.patch_with_miqqe()
            self.appliance.browser.quit_browser()
            _tries -= 1
            self.go(_tries, *args, **go_kwargs)

        br = self.appliance.browser

        try:
            br.widgetastic.execute_script('miqSparkleOff();', silent=True)
        except:  # noqa
            # miqSparkleOff undefined, so it's definitely off.
            # Or maybe it is alerts? Let's only do this when we get an exception.
            self.appliance.browser.widgetastic.dismiss_any_alerts()
            # If we went so far, let's put diapers on one more miqSparkleOff just to be sure
            # It can be spinning in the back
            try:
                br.widgetastic.execute_script('miqSparkleOff();', silent=True)
            except:  # noqa
                pass

        # Check if the page is blocked with blocker_div. If yes, let's headshot the browser right
        # here
        if (br.widgetastic.is_displayed(
                "//div[@id='blocker_div' or @id='notification']")
                or br.widgetastic.is_displayed(".modal-backdrop.fade.in")):
            logger.warning(
                "Page was blocked with blocker div on start of navigation, recycling."
            )
            self.appliance.browser.quit_browser()
            self.go(_tries, *args, **go_kwargs)

        # Check if modal window is displayed
        if (br.widgetastic.is_displayed(
                "//div[contains(@class, 'modal-dialog') and contains(@class, 'modal-lg')]"
        )):
            logger.warning("Modal window was open; closing the window")
            br.widgetastic.click(
                "//button[contains(@class, 'close') and contains(@data-dismiss, 'modal')]"
            )

        # Check if jQuery present
        try:
            br.widgetastic.execute_script("jQuery", silent=True)
        except Exception as e:
            if "jQuery" not in str(e):
                logger.error("Checked for jQuery but got something different.")
                logger.exception(e)
            # Restart some workers
            logger.warning("Restarting UI and VimBroker workers!")
            with self.appliance.ssh_client as ssh:
                # Blow off the Vim brokers and UI workers
                ssh.run_rails_command(
                    "\"(MiqVimBrokerWorker.all + MiqUiWorker.all).each &:kill\""
                )
            logger.info("Waiting for web UI to come back alive.")
            sleep(10)  # Give it some rest
            self.appliance.wait_for_web_ui()
            self.appliance.browser.quit_browser()
            self.appliance.browser.open_browser(
                url_key=self.obj.appliance.server.address())
            self.go(_tries, *args, **go_kwargs)

        # Same with rails errors
        view = br.widgetastic.create_view(ErrorView)
        rails_e = view.get_rails_error()

        if rails_e is not None:
            logger.warning("Page was blocked by rails error, renavigating.")
            logger.error(rails_e)
            # RHEL7 top does not know -M and -a
            logger.debug('Top CPU consumers:')
            logger.debug(
                store.current_appliance.ssh_client.run_command(
                    'top -c -b -n1 | head -30').output)
            logger.debug('Top Memory consumers:')
            logger.debug(
                store.current_appliance.ssh_client.run_command(
                    'top -c -b -n1 -o "%MEM" | head -30').output)  # noqa
            logger.debug('Managed known Providers:')
            logger.debug('%r', [
                prov.key
                for prov in store.current_appliance.managed_known_providers
            ])
            self.appliance.browser.quit_browser()
            self.appliance.browser.open_browser()
            self.go(_tries, *args, **go_kwargs)
Exemple #48
0
def verify_revert_snapshot(full_test_vm,
                           provider,
                           soft_assert,
                           register_event,
                           request,
                           active_snapshot=False):
    if provider.one_of(RHEVMProvider):
        # RHV snapshots have only description, no name
        snapshot1 = new_snapshot(full_test_vm, has_name=False)
    else:
        snapshot1 = new_snapshot(full_test_vm)
    full_template = getattr(provider.data.templates, 'full_template')
    # Define parameters of the ssh connection
    ssh_kwargs = {
        'hostname': snapshot1.parent_vm.mgmt.ip,
        'username': credentials[full_template.creds]['username'],
        'password': credentials[full_template.creds]['password']
    }
    ssh_client = SSHClient(**ssh_kwargs)
    # We need to wait for ssh to become available on the vm, it can take a while. Without
    # this wait, the ssh command would fail with 'port 22 not available' error.
    # Easiest way to solve this is just mask the exception with 'handle_exception = True'
    # and wait for successful completition of the ssh command.
    # The 'fail_func' ensures we close the connection that failed with exception.
    # Without this, the connection would hang there and wait_for would fail with timeout.
    wait_for(lambda: ssh_client.run_command('touch snapshot1.txt').success,
             num_sec=400,
             delay=20,
             handle_exception=True,
             fail_func=ssh_client.close(),
             message="Waiting for successful SSH connection")
    # Create first snapshot
    snapshot1.create()
    ssh_client.run_command('touch snapshot2.txt')

    # If we are not testing 'revert to active snapshot' situation, we create another snapshot
    if not active_snapshot:
        if provider.one_of(RHEVMProvider):
            snapshot2 = new_snapshot(full_test_vm, has_name=False)
        else:
            snapshot2 = new_snapshot(full_test_vm)
        snapshot2.create()

    # VM on RHV provider must be powered off before snapshot revert
    if provider.one_of(RHEVMProvider):
        full_test_vm.power_control_from_cfme(option=full_test_vm.POWER_OFF,
                                             cancel=False)
        full_test_vm.wait_for_vm_state_change(
            desired_state=full_test_vm.STATE_OFF, timeout=900)

    snapshot1.revert_to()
    # Wait for the snapshot to become active
    logger.info('Waiting for vm %s to become active', snapshot1.name)
    wait_for(lambda: snapshot1.active,
             num_sec=300,
             delay=20,
             fail_func=provider.browser.refresh,
             message="Waiting for the first snapshot to become active")
    # VM state after revert should be OFF
    full_test_vm.wait_for_vm_state_change(desired_state=full_test_vm.STATE_OFF,
                                          timeout=720)
    # Let's power it ON again
    full_test_vm.power_control_from_cfme(option=full_test_vm.POWER_ON,
                                         cancel=False)
    full_test_vm.wait_for_vm_state_change(desired_state=full_test_vm.STATE_ON,
                                          timeout=900)
    soft_assert(full_test_vm.mgmt.is_running, "vm not running")
    # Wait for successful ssh connection
    wait_for(lambda: ssh_client.run_command('test -e snapshot1.txt').success,
             num_sec=400,
             delay=10,
             handle_exception=True,
             fail_func=ssh_client.close(),
             message="Waiting for successful SSH connection after revert")
    try:
        result = ssh_client.run_command('test -e snapshot1.txt')
        assert result.success  # file found, RC=0
        result = ssh_client.run_command('test -e snapshot2.txt')
        assert result.failed  # file not found, RC=1
        logger.info('Revert to snapshot %s successful', snapshot1.name)
    except Exception:
        logger.exception('Revert to snapshot %s Failed', snapshot1.name)
    ssh_client.close()
Exemple #49
0
def _get_vm(request, provider, template_name, vm_name):
    if provider.one_of(RHEVMProvider):
        kwargs = {"cluster": provider.data["default_cluster"]}
    elif provider.one_of(OpenStackProvider):
        kwargs = {}
        if 'small_template' in provider.data.templates:
            kwargs = {"flavour_name": provider.data.templates.get('small_template').name}
    elif provider.one_of(SCVMMProvider):
        kwargs = {
            "host_group": provider.data.get("provisioning", {}).get("host_group", "All Hosts")}
    else:
        kwargs = {}

    try:
        deploy_template(
            provider.key,
            vm_name,
            template_name=template_name,
            allow_skip="default",
            power_on=True,
            **kwargs
        )
    except TimedOutError as e:
        logger.exception(e)
        try:
            provider.mgmt.delete_vm(vm_name)
        except TimedOutError:
            logger.warning("Could not delete VM %s!", vm_name)
        finally:
            # If this happened, we should skip all tests from this provider in this module
            pytest.skip("{} is quite likely overloaded! Check its status!\n{}: {}".format(
                provider.key, type(e).__name__, str(e)))

    @request.addfinalizer
    def _finalize():
        """if getting REST object failed, we would not get the VM deleted! So explicit teardown."""
        logger.info("Shutting down VM with name %s", vm_name)
        if (provider.one_of(InfraProvider, OpenStackProvider, AzureProvider) and
                provider.mgmt.is_vm_suspended(vm_name)):
            logger.info("Powering up VM %s to shut it down correctly.", vm_name)
            provider.mgmt.start_vm(vm_name)
        if provider.mgmt.is_vm_running(vm_name):
            logger.info("Powering off VM %s", vm_name)
            provider.mgmt.stop_vm(vm_name)
        if provider.mgmt.does_vm_exist(vm_name):
            logger.info("Deleting VM %s in %s", vm_name, provider.mgmt.__class__.__name__)
            provider.mgmt.delete_vm(vm_name)

    # Make it appear in the provider
    provider.refresh_provider_relationships()

    # Get the REST API object
    api = wait_for(
        get_vm_object,
        func_args=[provider.appliance, vm_name],
        message="VM object {} appears in CFME".format(vm_name),
        fail_condition=None,
        num_sec=600,
        delay=15,
    )[0]

    return VMWrapper(provider, vm_name, api)
def test_verify_revert_snapshot(full_test_vm, provider, soft_assert,
                                register_event):
    """Tests revert snapshot

    Metadata:
        test_flag: snapshot, provision
    """
    if provider.one_of(RHEVMProvider):
        snapshot1 = new_snapshot(full_test_vm, has_name=False)
    else:
        snapshot1 = new_snapshot(full_test_vm)
    full_template = getattr(provider.data.templates, 'full_template')
    ssh_kwargs = {
        'hostname':
        snapshot1.vm.provider.mgmt.get_ip_address(snapshot1.vm.name),
        'username': credentials[full_template.creds]['username'],
        'password': credentials[full_template.creds]['password']
    }
    ssh_client = SSHClient(**ssh_kwargs)
    # We need to wait for ssh to become available on the vm, it can take a while. Without
    # this wait, the ssh command would fail with 'port 22 not available' error.
    # Easiest way to solve this is just mask the exception with 'handle_exception = True'
    # and wait for successful completition of the ssh command.
    # The 'fail_func' ensures we close the connection that failed with exception.
    # Without this, the connection would hang there and wait_for would fail with timeout.
    wait_for(lambda: ssh_client.run_command('touch snapshot1.txt').rc == 0,
             num_sec=300,
             delay=20,
             handle_exception=True,
             fail_func=ssh_client.close())
    snapshot1.create()
    register_event(target_type='VmOrTemplate',
                   target_name=full_test_vm.name,
                   event_type='vm_snapshot_complete')
    register_event(target_type='VmOrTemplate',
                   target_name=full_test_vm.name,
                   event_type='vm_snapshot')
    ssh_client.run_command('touch snapshot2.txt')
    if provider.one_of(RHEVMProvider):
        snapshot2 = new_snapshot(full_test_vm, has_name=False)
    else:
        snapshot2 = new_snapshot(full_test_vm)
    snapshot2.create()

    if provider.one_of(RHEVMProvider):
        full_test_vm.power_control_from_cfme(option=full_test_vm.POWER_OFF,
                                             cancel=False)
        full_test_vm.wait_for_vm_state_change(
            desired_state=full_test_vm.STATE_OFF, timeout=900)

    snapshot1.revert_to()
    # Wait for the snapshot to become active
    logger.info('Waiting for vm %s to become active', snapshot1.name)
    wait_for(lambda: snapshot1.active,
             num_sec=300,
             delay=20,
             fail_func=provider.browser.refresh)
    full_test_vm.wait_for_vm_state_change(desired_state=full_test_vm.STATE_OFF,
                                          timeout=720)
    full_test_vm.power_control_from_cfme(option=full_test_vm.POWER_ON,
                                         cancel=False)
    full_test_vm.wait_for_vm_state_change(desired_state=full_test_vm.STATE_ON,
                                          timeout=900)
    current_state = full_test_vm.find_quadicon().data['state']
    soft_assert(current_state.startswith('currentstate-on'),
                "Quadicon state is {}".format(current_state))
    soft_assert(full_test_vm.provider.mgmt.is_vm_running(full_test_vm.name),
                "vm not running")
    wait_for(lambda: ssh_client.run_command('test -e snapshot1.txt').rc == 0,
             num_sec=400,
             delay=20,
             handle_exception=True,
             fail_func=ssh_client.close())
    try:
        result = ssh_client.run_command('test -e snapshot1.txt')
        assert not result.rc
        result = ssh_client.run_command('test -e snapshot2.txt')
        assert result.rc
        logger.info('Revert to snapshot %s successful', snapshot1.name)
    except:
        logger.exception('Revert to snapshot %s Failed', snapshot1.name)
    ssh_client.close()
def upload_template(rhevip, sshname, sshpass, username, password,
                    provider, image_url, template_name, provider_data, stream):
    try:
        logger.info("RHEVM:%r Template %r upload started", provider, template_name)
        if provider_data:
            kwargs = make_kwargs_rhevm(provider_data, provider)
            providers = provider_data['management_systems']
            api = get_mgmt(kwargs.get('provider'), providers=providers).api
        else:
            kwargs = make_kwargs_rhevm(cfme_data, provider)
            api = get_mgmt(kwargs.get('provider')).api
        kwargs['image_url'] = image_url
        kwargs['template_name'] = template_name
        ovaname = get_ova_name(image_url)
        temp_template_name = ('auto-tmp-{}-'.format(
            fauxfactory.gen_alphanumeric(8))) + template_name
        temp_vm_name = ('auto-vm-{}-'.format(
            fauxfactory.gen_alphanumeric(8))) + template_name
        if template_name is None:
            template_name = cfme_data['basic_info']['appliance_template']

        path, edomain_ip = get_edomain_path(api, kwargs.get('edomain'))

        kwargs = update_params_api(api, **kwargs)
        check_kwargs(**kwargs)

        if api.templates.get(template_name) is not None:
            logger.info("RHEVM:%r Found finished template with name %r.", provider, template_name)
            logger.info("RHEVM:%r The script will now end.", provider)
            return True
        logger.info("RHEVM:%r Downloading .ova file...", provider)
        with make_ssh_client(rhevip, sshname, sshpass) as ssh_client:
            download_ova(ssh_client, kwargs.get('image_url'))
            try:
                logger.info("RHEVM:%r Templatizing .ova file", provider)
                template_from_ova(api, username, password, rhevip, kwargs.get('edomain'),
                                  ovaname, ssh_client, temp_template_name, provider)

                logger.info("RHEVM:%r Importing new template to data domain", provider)
                import_template(api, kwargs.get('edomain'), kwargs.get('sdomain'),
                                kwargs.get('cluster'), temp_template_name, provider)

                logger.info("RHEVM:%r Making a temporary VM from new template", provider)
                make_vm_from_template(api, kwargs.get('cluster'), temp_template_name, temp_vm_name,
                                      provider, mgmt_network=kwargs.get('mgmt_network'))

                logger.info("RHEVM:%r Adding disk to created VM", provider)
                add_disk_to_vm(api, kwargs.get('sdomain'), kwargs.get('disk_size'),
                               kwargs.get('disk_format'), kwargs.get('disk_interface'),
                               temp_vm_name, provider)

                logger.info("RHEVM:%r Templatizing VM", provider)
                templatize_vm(api, template_name, kwargs.get('cluster'), temp_vm_name, provider)

                if not provider_data:
                    logger.info("RHEVM:%r Add template %r to trackerbot", provider, template_name)
                    trackerbot.trackerbot_add_provider_template(stream, provider, template_name)
            finally:
                cleanup(api, kwargs.get('edomain'), ssh_client, ovaname, provider,
                        temp_template_name, temp_vm_name)
                change_edomain_state(api, 'maintenance', kwargs.get('edomain'), provider)
                cleanup_empty_dir_on_edomain(path, edomain_ip,
                                             sshname, sshpass, rhevip, provider)
                change_edomain_state(api, 'active', kwargs.get('edomain'), provider)
                api.disconnect()
                logger.info("RHEVM:%r Template %r upload Ended", provider, template_name)
        if provider_data and api.templates.get(template_name):
            logger.info("RHEVM:%r Deploying Template %r", provider, template_name)
            vm_name = 'test_{}_{}'.format(template_name, fauxfactory.gen_alphanumeric(8))
            deploy_args = {'provider': provider, 'vm_name': vm_name,
                           'template': template_name, 'deploy': True}
            getattr(__import__('clone_template'), "main")(**deploy_args)
        logger.info("RHEVM:%r Template %r upload Ended", provider, template_name)
    except Exception:
        logger.exception("RHEVM:%r Template %r upload exception", provider, template_name)
        return False
Exemple #52
0
def run(**kwargs):

    for provider in list_provider_keys("scvmm"):

        kwargs = make_kwargs_scvmm(cfme_data, provider,
                                   kwargs.get('image_url'),
                                   kwargs.get('template_name'))
        check_kwargs(**kwargs)
        mgmt_sys = cfme_data['management_systems'][provider]
        host_fqdn = mgmt_sys['hostname_fqdn']
        creds = credentials[mgmt_sys['credentials']]

        # For powershell to work, we need to extract the User Name from the Domain
        user = creds['username'].split('\\')
        if len(user) == 2:
            username_powershell = user[1]
        else:
            username_powershell = user[0]

        username_scvmm = creds['domain'] + "\\" + creds['username']

        scvmm_args = {
            "hostname": mgmt_sys['ipaddress'],
            "username": username_powershell,
            "password": creds['password'],
            "domain": creds['domain'],
            "provisioning": mgmt_sys['provisioning']
        }
        client = SCVMMSystem(**scvmm_args)

        url = kwargs.get('image_url')

        # Template name equals either user input of we extract the name from the url
        new_template_name = kwargs.get('template_name')
        if new_template_name is None:
            new_template_name = os.path.basename(url)[:-4]

        logger.info("SCVMM:%s Make Template out of the VHD %s", provider,
                    new_template_name)

        # use_library is either user input or we use the cfme_data value
        library = kwargs.get('library',
                             mgmt_sys['template_upload'].get('vhds', None))

        logger.info("SCVMM:%s Template Library: %s", provider, library)

        #  The VHD name changed, match the template_name.
        new_vhd_name = new_template_name + '.vhd'

        network = mgmt_sys['template_upload'].get('network', None)
        os_type = mgmt_sys['template_upload'].get('os_type', None)
        cores = mgmt_sys['template_upload'].get('cores', None)
        ram = mgmt_sys['template_upload'].get('ram', None)

        # Uses PowerShell Get-SCVMTemplate to return a list of  templates and aborts if exists.
        if not client.does_template_exist(new_template_name):
            if kwargs.get('upload'):
                logger.info(
                    "SCVMM:%s Uploading VHD image to Library VHD folder.",
                    provider)
                upload_vhd(client, url, library, new_vhd_name)
            if kwargs.get('template'):
                logger.info("SCVMM:%s Make Template out of the VHD %s",
                            provider, new_template_name)

                make_template(client, host_fqdn, new_template_name, library,
                              network, os_type, username_scvmm, cores, ram)
            try:
                wait_for(lambda: client.does_template_exist(new_template_name),
                         fail_condition=False,
                         delay=5)
                logger.info("SCVMM:%s template %s uploaded success", provider,
                            new_template_name)
                logger.info("SCVMM:%s Add template %s to trackerbot", provider,
                            new_template_name)
                trackerbot.trackerbot_add_provider_template(
                    kwargs.get('stream'), provider,
                    kwargs.get('template_name'))
            except Exception:
                logger.exception(
                    "SCVMM:%s Exception verifying the template %s", provider,
                    new_template_name)
        else:
            logger.info(
                "SCVMM: A Template with that name already exists in the SCVMMLibrary"
            )
Exemple #53
0
 def _finalize():
     try:
         vm_obj.cleanup_on_provider()
         provider.refresh_provider_relationships()
     except Exception as e:
         logger.exception(e)
Exemple #54
0
    def run_command(
            self, command, timeout=RUNCMD_TIMEOUT, reraise=False, ensure_host=False,
            ensure_user=False):
        """Run a command over SSH.

        Args:
            command: The command. Supports taking dicts as version picking.
            timeout: Timeout after which the command execution fails.
            reraise: Does not muffle the paramiko exceptions in the log.
            ensure_host: Ensure that the command is run on the machine with the IP given, not any
                container or such that we might be using by default.
            ensure_user: Ensure that the command is run as the user we logged in, so in case we are
                not root, setting this to True will prevent from running sudo.

        Returns:
            A :py:class:`SSHResult` instance.
        """
        if isinstance(command, dict):
            command = version.pick(command, active_version=self.vmdb_version)
        original_command = command
        uses_sudo = False
        logger.info("Running command %r", command)
        if self.is_pod and not ensure_host:
            # This command will be executed in the context of the host provider
            command_to_run = 'source /etc/default/evm; ' + command
            oc_cmd = 'oc exec --namespace={proj} {pod} -- bash -c {cmd}'.format(
                proj=self._project, pod=self._container, cmd=quote(command_to_run))
            command = oc_cmd
            ensure_host = True
        elif self.is_container and not ensure_host:
            command = 'docker exec {} bash -c {}'.format(self._container, quote(
                'source /etc/default/evm; ' + command))

        if self.username != 'root' and not ensure_user:
            # We need sudo
            command = 'sudo -i bash -c {command}'.format(command=quote(command))
            uses_sudo = True

        if command != original_command:
            logger.info("> Actually running command %r", command)
        command += '\n'

        output = []
        try:
            session = self.get_transport().open_session()
            if uses_sudo:
                # We need a pseudo-tty for sudo
                session.get_pty()
            if timeout:
                session.settimeout(float(timeout))

            session.exec_command(command)
            stdout = session.makefile()
            stderr = session.makefile_stderr()
            while True:
                if session.recv_ready:
                    for line in stdout:
                        output.append(line)
                        if self._streaming:
                            self.f_stdout.write(line)

                if session.recv_stderr_ready:
                    for line in stderr:
                        output.append(line)
                        if self._streaming:
                            self.f_stderr.write(line)

                if session.exit_status_ready():
                    break
            exit_status = session.recv_exit_status()
            return SSHResult(exit_status, ''.join(output))
        except paramiko.SSHException:
            if reraise:
                raise
            else:
                logger.exception('Exception happened during SSH call')
        except socket.timeout:
            logger.exception(
                "Command %r timed out. Output before it failed was:\n%r",
                command,
                ''.join(output))
            raise

        # Returning two things so tuple unpacking the return works even if the ssh client fails
        # Return whatever we have in the output
        return SSHResult(1, ''.join(output))
Exemple #55
0
    def create(
        self,
        text,
        hover,
        type="Default",
        image="fa-user",
        icon_color="#000000",
        display=True,
        group=None,
        dialog=None,
        display_for=None,
        submit=None,
        playbook_cat_item=None,
        inventory=None,
        hosts=None,
        open_url=None,
        system=None,
        request=None,
        attributes=None,
        visibility=None,
        enablement=None,
        roles=None,
    ):
        self.group = group or self.parent

        view = navigate_to(self, "Add")
        view.options.fill({"type": type})
        view.fill({
            "options": {
                "text": text,
                "display": display,
                "hover": hover,
                "image": image,
                "icon_color": icon_color,
                "open_url": open_url,
                "display_for": display_for,
                "submit": submit,
                "form": {
                    "dialog": dialog,
                    "playbook_cat_item": playbook_cat_item,
                    "inventory": inventory,
                    "hosts": hosts,
                },
            }
        })

        if visibility:
            # TODO: extend visibility expression variations if needed.
            if self.group.type in EVM_TAG_OBJS:
                tag = "EVM {obj_type}.{tag}".format(obj_type=self.group.type,
                                                    tag=visibility["tag"])
            elif self.group.type in BUILD_TAG_OBJS:
                _type = "Switch" if self.group.type == "Virtual Infra Switch" else self.group.type
                tag = "{obj_type}.Build.{tag}".format(obj_type=_type,
                                                      tag=visibility["tag"])
            else:
                tag = "{obj_type}.{tag}".format(obj_type=self.group.type,
                                                tag=visibility["tag"])

            if view.advanced.visibility.define_exp.is_displayed:
                view.advanced.visibility.define_exp.click()
            view.advanced.visibility.expression.fill_tag(
                tag=tag, value=visibility["value"])

        if enablement:
            # TODO: extend enablement expression variations if needed.
            if self.group.type in EVM_TAG_OBJS:
                tag = "EVM {obj_type}.{tag}".format(obj_type=self.group.type,
                                                    tag=enablement["tag"])
            elif self.group.type in BUILD_TAG_OBJS:
                _type = "Switch" if self.group.type == "Virtual Infra Switch" else self.group.type
                tag = "{obj_type}.Build.{tag}".format(obj_type=_type,
                                                      tag=enablement["tag"])
            else:
                tag = "{obj_type}.{tag}".format(obj_type=self.group.type,
                                                tag=enablement["tag"])

            if view.advanced.enablement.define_exp.is_displayed:
                view.advanced.enablement.define_exp.click()

            view.advanced.enablement.expression.fill_tag(
                tag=tag, value=enablement["value"])
            view.advanced.enablement.disabled_text.fill("Tag - {} : {}".format(
                enablement["tag"], enablement["value"]))

        view.fill({"advanced": {"system": system, "request": request}})

        if attributes:
            view.advanced.attributes.fill(attributes)

        if roles:
            view.advanced.role_show.fill("<By Role>")
            view.advanced.roles.wait_displayed("20s")
            view.advanced.roles.fill(roles)
        else:
            view.advanced.role_show.fill("<To All>")

        try:
            # add button slow to enable?
            wait_for(lambda: not view.add_button.disabled,
                     timeout=5,
                     handle_exception=True)
        except TimedOutError:
            logger.exception(
                'Timed out waiting for add button on button group form')
            raise CFMEException(
                'Custom button group add form button did not activate')

        view.add_button.click()
        view.flash.assert_no_error()

        return self.instantiate(
            self.group,
            text=text,
            hover=hover,
            type=type,
            display=display,
            dialog=dialog,
            display_for=display_for,
            submit=submit,
            playbook_cat_item=playbook_cat_item,
            inventory=inventory,
            hosts=hosts,
            image=image,
            icon_color=icon_color,
            open_url=open_url,
            system=system,
            request=request,
            attributes=attributes,
            visibility=visibility,
            enablement=enablement,
            roles=roles,
        )
def upload_template(rhosip, sshname, sshpass, username, password, auth_url,
                    provider, image_url, template_name, provider_data, stream):
    try:
        logger.info("RHOS:%r Starting template %r upload", provider,
                    template_name)

        if provider_data:
            kwargs = make_kwargs_rhos(provider_data, provider)
        else:
            kwargs = make_kwargs_rhos(cfme_data, provider)

        kwargs['image_url'] = image_url
        if template_name is None:
            template_name = cfme_data['basic_info']['appliance_template']

        export = make_export(username, password, kwargs.get('tenant_id'),
                             auth_url)

        with make_ssh_client(rhosip, sshname, sshpass) as ssh_client:
            if not check_image_exists(template_name, export, ssh_client):
                output = upload_qc2_file(ssh_client, kwargs.get('image_url'),
                                         template_name, export, provider)
                if not output:
                    logger.error("RHOS:%r upload_qc2_file returned None: %r",
                                 provider, template_name)
                else:
                    image_id = get_image_id(output)
                    wait_for(check_image_status,
                             [image_id, export, ssh_client],
                             fail_condition=False,
                             delay=5,
                             num_sec=300)
                    logger.info("RHOS:%r Successfully uploaded the template.",
                                provider)

                    if not provider_data:
                        logger.info("RHOS:%r Adding template %r to trackerbot",
                                    provider, template_name)
                        trackerbot.trackerbot_add_provider_template(
                            stream, provider, template_name)
            else:
                logger.info("RHOS:%r Found image with name %r. Exiting",
                            provider, template_name)
            if provider_data and check_image_exists(template_name, export,
                                                    ssh_client):
                logger.info("RHOS:%r Deploying Template %r....", provider,
                            template_name)
                vm_name = 'test_{}_{}'.format(template_name,
                                              fauxfactory.gen_alphanumeric(8))
                deploy_args = {
                    'provider': provider,
                    'vm_name': vm_name,
                    'template': template_name,
                    'deploy': True
                }
                getattr(__import__('clone_template'), "main")(**deploy_args)
    except Exception:
        logger.exception("RHOS:%r Exception while uploading template",
                         provider)
        return False
    finally:
        logger.info("RHOS:%r End template %r upload", provider, template_name)
Exemple #57
0
def main(trackerbot_url, mark_usable=None, selected_provider=None):
    api = trackerbot.api(trackerbot_url)

    thread_q = []
    thread_lock = Lock()
    template_providers = defaultdict(list)
    all_providers = (set(list_provider_keys())
                     if not selected_provider else set(selected_provider))
    unresponsive_providers = set()
    # Queue up list_template calls
    for provider_key in all_providers:
        ipaddress = cfme_data.management_systems[provider_key].get('ipaddress')
        if ipaddress and not net.is_pingable(ipaddress):
            continue
        thread = Thread(target=get_provider_templates,
                        args=(provider_key, template_providers,
                              unresponsive_providers, thread_lock))
        thread_q.append(thread)
        thread.start()

    # Join the queued calls
    for thread in thread_q:
        thread.join()

    seen_templates = set()

    if mark_usable is None:
        usable = {}
    else:
        usable = {'usable': mark_usable}

    existing_provider_templates = [
        pt['id'] for pt in trackerbot.depaginate(
            api, api.providertemplate.get())['objects']
    ]

    # Find some templates and update the API
    for template_name, providers in template_providers.items():
        template_name = str(template_name)
        template_info = TemplateName.parse_template(template_name)

        # it turned out that some providers like ec2 may have templates w/o names.
        # this is easy protection against such issue.
        if not template_name.strip():
            logger.warn('Ignoring template w/o name on provider %s',
                        provider_key)
            continue

        # Don't want sprout templates
        if template_info.group_name in ('sprout', 'rhevm-internal'):
            logger.info('Ignoring %s from group %s', template_name,
                        template_info.group_name)
            continue

        seen_templates.add(template_name)
        group = trackerbot.Group(template_info.group_name,
                                 stream=template_info.stream)
        try:
            template = trackerbot.Template(template_name, group,
                                           template_info.datestamp)
        except ValueError:
            logger.exception('Failure parsing provider %s template: %s',
                             provider_key, template_name)
            continue

        for provider_key in providers:
            provider = trackerbot.Provider(provider_key)

            if '{}_{}'.format(template_name,
                              provider_key) in existing_provider_templates:
                logger.info('Template %s already tracked for provider %s',
                            template_name, provider_key)
                continue

            try:
                trackerbot.mark_provider_template(api, provider, template,
                                                  **usable)
                logger.info(
                    'Added %s template %s on provider %s (datestamp: %s)',
                    template_info.group_name, template_name, provider_key,
                    template_info.datestamp)
            except SlumberHttpBaseException:
                logger.exception('%s: exception marking template %s', provider,
                                 template)

    # Remove provider relationships where they no longer exist, skipping unresponsive providers,
    # and providers not known to this environment
    for pt in trackerbot.depaginate(api,
                                    api.providertemplate.get())['objects']:
        key, template_name = pt['provider']['key'], pt['template']['name']
        if key not in template_providers[
                template_name] and key not in unresponsive_providers:
            if key in all_providers:
                logger.info("Cleaning up template %s on %s", template_name,
                            key)
                trackerbot.delete_provider_template(api, key, template_name)
            else:
                logger.info(
                    "Skipping template cleanup %s on unknown provider %s",
                    template_name, key)

    # Remove templates that aren't on any providers anymore
    for template in trackerbot.depaginate(api, api.template.get())['objects']:
        if not template['providers'] and template['name'].strip():
            logger.info("Deleting template %s (no providers)",
                        template['name'])
            api.template(template['name']).delete()
def upload_template(provider,
                    template_name,
                    stream,
                    file_name,
                    file_path,
                    ssh_client,
                    bucket_name=None):
    bucket = bucket_name or cfme_data['template_upload'][
        'template_upload_gce']['bucket_name']
    try:
        # IMAGE CHECK
        logger.info('GCE: %r: Checking if template %r present', provider,
                    template_name)
        result = ssh_client.run_command(
            'gcloud compute images list {}'.format(template_name))
        if 'Listed 0 items' not in result.output:
            logger.info(
                'GCE: %r: Image %r already present in GCE, stopping upload',
                provider, template_name)
            return True
        logger.info('GCE: %r: Image %r NOT present, continuing upload',
                    provider, template_name)

        # MAKE BUCKET
        logger.info('GCE: %r: Creating bucket %r...', provider, bucket)
        # gsutil has RC 1 and a API 409 in stdout if bucket exists
        result = ssh_client.run_command('gsutil mb gs://{}'.format(bucket))
        assert result or 'already exists' in result

        # BUCKET CHECK
        logger.info('GCE: %r: Checking if file on bucket already', provider)
        result = ssh_client.run_command('gsutil ls gs://{}'.format(
            join(bucket, file_name)))
        if result.failed:
            # FILE UPLOAD
            logger.info('GCE: %r: Uploading to bucket...')
            result = ssh_client.run_command('gsutil cp {} gs://{}'.format(
                join(file_path, file_name), bucket))
            assert result.success
            logger.info('GCE: %r: File uploading done ...')
        else:
            logger.info('GCE: %r: File already on bucket...')

        # IMAGE CREATION
        logger.info('GCE: %r: Creating template %r', provider, template_name)
        template_name = check_template_name(template_name)
        result = ssh_client.run_command(
            'gcloud compute images create {} --source-uri gs://{}'.format(
                template_name, join(bucket, file_name)))
        assert result.success
        logger.info('GCE: %r: Successfully added template %r from bucket %r',
                    provider, template_name, bucket)

        logger.info('GCE: %r: Adding template %r to trackerbot for stream %r',
                    provider, template_name, stream)
        trackerbot.trackerbot_add_provider_template(stream, provider,
                                                    template_name)

        # DELETE FILE FROM BUCKET
        logger.info('GCE: %r: Cleaning up, removing %r from bucket %r',
                    provider, file_name, bucket)
        result = ssh_client.run_command('gsutil rm gs://{}'.format(
            join(bucket, file_name)))
        assert result.success
    except Exception:
        # Exception often empty, include last code's stdout
        logger.exception(
            'GCE: %r: Exception occurred in upload_template, last ssh stdout: \n %r',
            provider, str(result))
        return False
    finally:
        logger.info('GCE: %r: End template %r upload...', provider,
                    template_name)
        return True
Exemple #59
0
def deploy_template(provider_key,
                    vm_name,
                    template_name=None,
                    timeout=900,
                    **deploy_args):
    """
    Args:
        provider_key: Provider key on which the VM is to be created
        vm_name: Name of the VM to be deployed
        template_name: Name of the template that the VM is deployed from
        timeout: the timeout for template deploy
    """
    allow_skip = deploy_args.pop("allow_skip", ())
    if isinstance(allow_skip, dict):
        skip_exceptions = allow_skip.keys()
        callable_mapping = allow_skip
    elif isinstance(allow_skip,
                    six.string_types) and allow_skip.lower() == "default":
        skip_exceptions = DEFAULT_SKIP
        callable_mapping = {}
    else:
        skip_exceptions = allow_skip
        callable_mapping = {}
    provider_crud = get_crud(provider_key)

    deploy_args.update(vm_name=vm_name)

    if template_name is None:
        try:
            deploy_args.update(template=provider_crud.data['templates']
                               ['small_template']['name'])
        except KeyError:
            raise KeyError(
                'small_template not defined for Provider {} in cfme_data.yaml'.
                format(provider_key))
    else:
        deploy_args.update(template=template_name)

    deploy_args.update(provider_crud.deployment_helper(deploy_args))

    logger.info(
        "Getting ready to deploy VM/instance %s from template %s on provider %s",
        vm_name, deploy_args['template'], provider_crud.data['name'])
    try:
        try:
            logger.debug("Deploy args: %s", deploy_args)
            vm_name = provider_crud.mgmt.deploy_template(timeout=timeout,
                                                         **deploy_args)
            logger.info("Provisioned VM/instance %s",
                        vm_name)  # instance ID in case of EC2
        except Exception as e:
            logger.exception('Could not provisioning VM/instance %s (%s: %s)',
                             vm_name,
                             type(e).__name__, str(e))
            try:
                provider_crud.mgmt.delete_vm(vm_name)
            except Exception:
                logger.exception("Unable to clean up vm:", vm_name)
            raise
    except skip_exceptions as e:
        e_c = type(e)
        if e_c in callable_mapping and not callable_mapping[e_c](e):
            raise
        # Make it visible also in the log.
        store.write_line("Skipping due to a provider error: {}: {}\n".format(
            e_c.__name__, str(e)),
                         purple=True)
        logger.exception(e)
        pytest.skip("{}: {}".format(e_c.__name__, str(e)))
    return vm_name
def list_orphaned_files_per_host(host_name, host_datastore_urls, provider_key,
                                 vm_registered_files, unregistered_files):
    try:
        providers_data = cfme_data.get("management_systems", {})
        hosts = providers_data[provider_key]['hosts']
        hostname = [
            host['name'] for host in hosts if host_name in host['name']
        ]
        # check if hostname returned is ipaddress
        if not hostname:
            hostname = re.findall(r'[0-9]+(?:\.[0-9]+){3}', host_name)
        connect_kwargs = {
            'username': credentials['host_default']['username'],
            'password': credentials['host_default']['password'],
            'hostname': hostname[0]
        }
        with SSHClient(**connect_kwargs) as ssh_client:
            for datastore_url in host_datastore_urls:
                datastore_path = re.findall(r'([^ds:`/*].*)',
                                            str(datastore_url))

                command = 'ls ~/{}'.format(datastore_path[0])
                result = ssh_client.run_command(command)
                files_in_datastore = result.output.splitlines(
                ) if result.success else []
                for fil in files_in_datastore:
                    if fil not in vm_registered_files:
                        file_type = 'UNKNOWN'
                        number_of_files = 0
                        command = 'test -d ~/{}/{}; echo $?'.format(
                            datastore_path[0], fil)
                        result = ssh_client.run_command(command)
                        file_extension = re.findall(r'.*\.(\w*)', fil)
                        if file_extension:
                            file_type = file_extension[0]
                            number_of_files = 1
                        if int(result.output.strip()) == 0:
                            command = 'ls ~/{}/{} | wc -l'.format(
                                datastore_path[0], fil)
                            result = ssh_client.run_command(command)
                            number_of_files = result.output.strip()
                            command = 'find ~/{}/{} -name "*.vmx" | wc -l'.format(
                                datastore_path[0], fil)
                            vmx_result = ssh_client.run_command(command)
                            command = 'find ~/{}/{} -name "*.vmtx" | wc -l'.format(
                                datastore_path[0], fil)
                            vmtx_result = ssh_client.run_command(command)
                            command = 'find ~/{}/{} -name "*.vmdk" | wc -l'.format(
                                datastore_path[0], fil)
                            vmdk_result = ssh_client.run_command(command)

                            if int(vmx_result.output.strip()) > 0:
                                file_type = 'VirtualMachine'
                            elif int(vmtx_result.output.strip()) > 0:
                                file_type = 'Template'
                            elif int(vmdk_result.output.strip()) > 0:
                                file_type = 'VMDK'
                                # delete_this = '~/' + datastore_path[0] + fil
                                # command = 'rm -rf {}'.format(delete_this)
                                # result = ssh_client.run_command(command)
                                # logger.info(result.output)

                        file_path = '~/' + datastore_path[0] + fil
                        if file_path not in unregistered_files:
                            unregistered_files.append(file_path)
                            logger.info(
                                '{host}\t\t{path}\t\t{ftype}\t\t{num}'.format(
                                    host=hostname[0],
                                    path=file_path,
                                    ftype=file_type,
                                    num=number_of_files))

    except Exception:
        logger.exception('Exception listing orphaned files per host')
        return False