def chargeback_report_custom(appliance, vm_ownership, assign_custom_rate, provider):
    # Create a Chargeback report based on a custom rate; Queue the report
    owner = vm_ownership
    data = {
        'menu_name': 'cb_custom_' + provider.name,
        'title': 'cb_custom' + provider.name,
        'base_report_on': 'Chargeback for Vms',
        'report_fields': ['Memory Used', 'Memory Used Cost', 'Owner',
        'CPU Used', 'CPU Used Cost',
        'Disk I/O Used', 'Disk I/O Used Cost',
        'Network I/O Used', 'Network I/O Used Cost',
        'Storage Used', 'Storage Used Cost'],
        'filter': {
            'filter_show_costs': 'Owner',
            'filter_owner': owner,
            'interval_end': 'Today (partial)'
        }
    }
    report = appliance.collections.reports.create(is_candu=True, **data)

    logger.info('Queuing chargeback report with custom rate for {} provider'.format(provider.name))
    report.queue(wait_for_finish=True)

    yield list(report.saved_reports.all()[0].data.rows)
    report.delete()
def test_order_tower_catalog_item(appliance, config_manager, catalog_item, request, job_type):
    """Tests ordering of catalog items for Ansible Template and Workflow jobs
    Metadata:
        test_flag: provision

    Polarion:
        assignee: nachandr
        initialEstimate: 1/4h
        casecomponent: Services
        caseimportance: high
    """
    if job_type == 'template_limit':
        host = config_manager.yaml_data['provisioning_data']['inventory_host']
        dialog_values = {'limit': host}
        service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog, catalog_item.name,
            dialog_values=dialog_values)
    else:
        service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog, catalog_item.name)

    service_catalogs.order()
    logger.info('Waiting for cfme provision request for service %s', catalog_item.name)
    cells = {'Description': catalog_item.name}
    order_request = appliance.collections.requests.instantiate(cells=cells, partial_check=True)
    order_request.wait_for_request(method='ui')
    msg = 'Request failed with the message {}'.format(order_request.row.last_message.text)
    assert order_request.is_succeeded(method='ui'), msg
    appliance.user.my_settings.default_views.set_default_view('Configuration Management Providers',
                                                              'List View')
示例#3
0
文件: base.py 项目: apagac/cfme_tests
    def main(self):
        track = False
        teardown = False
        try:
            if self.stream in self.blocked_streams:
                logger.info('This stream (%s) is blocked for the given provider type, %s',
                            self.stream, self.provider_type)
                return True
            if self.provider_type != 'openshift' and self.mgmt.does_template_exist(
                    self.template_name):
                logger.info("(template-upload) [%s:%s:%s] Template already exists",
                            self.log_name, self.provider_key, self.template_name)
                track = True
            else:
                teardown = True
                if self.decorated_run():
                    track = True
            if track and self.provider_type != 'openshift':
                # openshift run will call track_template since it needs custom_data kwarg
                self.track_template()

            return True

        except TemplateUploadException:
            logger.exception('TemplateUploadException, failed upload')
            return False
        except Exception:
            logger.exception('non-TemplateUploadException, failed interaction with provider')
            return False

        finally:
            if teardown:
                self.teardown()
def test_ssa_vm(ssa_vm, soft_assert, vm_system_type):
    """ Tests SSA can be performed and returns sane results

    Metadata:
        test_flag: vm_analysis

    Polarion:
        assignee: sbulage
        casecomponent: SmartState
        caseimportance: high
        initialEstimate: 1/2h
        tags: smartstate
    """
    ssa_vm.smartstate_scan(wait_for_task_result=True)
    # Check release and quadricon
    quadicon_os_icon = ssa_vm.find_quadicon().data['os']
    view = navigate_to(ssa_vm, 'Details')
    details_os_icon = view.entities.summary('Properties').get_text_of('Operating System')
    logger.info("Icons: %s, %s", details_os_icon, quadicon_os_icon)
    c_lastanalyzed = ssa_vm.last_analysed

    soft_assert(c_lastanalyzed != 'Never', "Last Analyzed is set to Never")
    # RHEL has 'Red Hat' in details_os_icon, but 'redhat' in quadicon_os_icon
    os_type = vm_system_type if vm_system_type != 'redhat' else 'red hat'
    soft_assert(os_type in details_os_icon.lower(),
                "details icon: '{}' not in '{}'".format(os_type, details_os_icon))
    soft_assert(vm_system_type in quadicon_os_icon.lower(),
                "quad icon: '{}' not in '{}'".format(vm_system_type, quadicon_os_icon))

    if ssa_vm.system_type != WINDOWS:
        compare_linux_vm_data(ssa_vm)
    else:
        # Make sure windows-specific data is not empty
        compare_windows_vm_data(ssa_vm)
def upload_ova(hostname, username, password, name, datastore,
               cluster, datacenter, url, provider, proxy,
               ovf_tool_client, default_user, default_pass):

    cmd_args = []
    cmd_args.append('ovftool --noSSLVerify')
    cmd_args.append("--datastore={}".format(datastore))
    cmd_args.append("--name={}".format(name))
    cmd_args.append("--vCloudTemplate=True")
    cmd_args.append("--overwrite")  # require when failures happen and it retries
    if proxy:
        cmd_args.append("--proxy={}".format(proxy))
    cmd_args.append(url)
    cmd_args.append(
        "'vi://{}:{}@{}/{}/host/{}'"
        .format(username, password, hostname, datacenter, cluster)
    )
    logger.info("VSPHERE:%r Running OVFTool", provider)

    command = ' '.join(cmd_args)
    with make_ssh_client(ovf_tool_client, default_user, default_pass) as ssh_client:
        try:
            result = ssh_client.run_command(command)
        except Exception:
            logger.exception("VSPHERE:%r Exception during upload", provider)
            return False

    if "successfully" in result.output:
        logger.info(" VSPHERE:%r Upload completed", provider)
        return True
    else:
        logger.error("VSPHERE:%r Upload failed: %r", provider, result.output)
        return False
示例#6
0
def create_catalog_item(provider, provisioning, vm_name, dialog, catalog, console_template=None):

    template, host, datastore, iso_file, catalog_item_type, vlan = map(provisioning.get,
        ('template', 'host', 'datastore', 'iso_file', 'catalog_item_type', 'vlan'))
    if console_template:
        logger.info("Console template name : {}".format(console_template.name))
        template = console_template.name
    item_name = dialog.label

    provisioning_data = {
        'catalog': {'vm_name': vm_name,
                    },
        'environment': {'host_name': {'name': host},
                        'datastore_name': {'name': datastore},
                        },
        'network': {'vlan': partial_match(vlan),
                    },
    }

    if provider.type == 'rhevm':
        provisioning_data['catalog']['provision_type'] = 'Native Clone'
    elif provider.type == 'virtualcenter':
        provisioning_data['catalog']['provision_type'] = 'VMware'
    catalog_item = CatalogItem(item_type=catalog_item_type, name=item_name,
        description="my catalog", display_in=True, catalog=catalog,
        dialog=dialog, catalog_name=template,
        provider=provider, prov_data=provisioning_data)
    return catalog_item
示例#7
0
文件: base.py 项目: apagac/cfme_tests
    def glance_upload(self):
        """Push template to glance server
        if session is true, use keystone auth session from self.mgmt

        if session is false, use endpoint directly:
        1. download template to NFS mounted share on glance server via ssh+wget
        2. create image record in glance's db
        3. update image record with the infra-truenas webdav URL
        """
        if self.provider_type == 'openstack':
            # This means its a full openstack provider, and we should use its mgmt session
            client_kwargs = dict(session=self.mgmt.session)
        else:
            # standalone glance server indirectly hosting image to be templatized
            client_kwargs = dict(endpoint=self.from_template_upload(self.glance_key).get('url'))
        client = Client(version='2', **client_kwargs)
        if self.image_name in [i.name for i in client.images.list()]:
            logger.info('Image "%s" already exists on %s, skipping glance_upload',
                        self.image_name, self.glance_key)
            return True

        glance_image = client.images.create(
            name=self.image_name,
            container_format='bare',
            disk_format='qcow2',
            visibility='public')
        if self.template_upload_data.get('remote_location'):
            # add location for image on standalone glance
            client.images.add_location(glance_image.id, self.raw_image_url, {})
        else:
            if self.download_image():
                client.images.upload(glance_image.id, open(self.local_file_path, 'rb'))
            else:
                return False
        return True
示例#8
0
def wait_for_alert(smtp, alert, delay=None, additional_checks=None):
    """DRY waiting function

    Args:
        smtp: smtp_test funcarg
        alert: Alert name
        delay: Optional delay to pass to wait_for
        additional_checks: Additional checks to perform on the mails. Keys are names of the mail
            sections, values the values to look for.
    """
    logger.info("Waiting for informative e-mail of alert %s to come", alert.description)
    additional_checks = additional_checks or {}

    def _mail_arrived():
        for mail in smtp.get_emails():
            if "Alert Triggered: {}".format(alert.description) in mail["subject"]:
                if not additional_checks:
                    return True
                else:
                    for key, value in additional_checks.iteritems():
                        if value in mail.get(key, ""):
                            return True
        return False
    wait_for(
        _mail_arrived,
        num_sec=delay,
        delay=5,
        message="wait for e-mail to come!"
    )
 def _check_skip_logs(self, line):
     for pattern in self.skip_patterns:
         if re.match(pattern, line):
             logger.info('Skip pattern {} was matched on line {},\
                         so skipping this line'.format(pattern, line))
             return True
     return False
def test_soft_reboot(appliance, provider, testing_instance, ensure_vm_running, soft_assert):
    """ Tests instance soft reboot

    Metadata:
        test_flag: power_control, provision

    Polarion:
        assignee: ghubale
        casecomponent: Cloud
        initialEstimate: 1/4h
    """
    testing_instance.wait_for_instance_state_change(desired_state=testing_instance.STATE_ON)
    view = navigate_to(testing_instance, 'Details')
    pwr_mgmt = view.entities.summary('Power Management')
    state_change_time = pwr_mgmt.get_text_of('State Changed On')

    testing_instance.power_control_from_cfme(option=testing_instance.SOFT_REBOOT)
    view.flash.assert_success_message(text='Restart Guest initiated', partial=True)
    wait_for_ui_state_refresh(testing_instance, provider, state_change_time, timeout=720)
    pwr_state = pwr_mgmt.get_text_of('Power State')

    if provider.one_of(GCEProvider) and pwr_state == testing_instance.STATE_UNKNOWN:
        """Wait for one more state change as transitional state also
        changes "State Changed On" time on GCE provider
        """
        logger.info("Instance is still in \"{}\" state. please wait before CFME will show correct "
                    "state".format(pwr_state))
        state_change_time = pwr_mgmt.get_text_of('State Changed On')
        wait_for_ui_state_refresh(testing_instance, provider, state_change_time, timeout=720)

    wait_for_instance_state(soft_assert, testing_instance, state="started")
示例#11
0
    def _filter_required_flags(self, provider):
        """ Filters by required yaml flags """
        if self.required_flags is None:
            return None
        if self.required_flags:
            test_flags = [flag.strip() for flag in self.required_flags]

            defined_flags = conf.cfme_data.get('test_flags', '')
            if isinstance(defined_flags, six.string_types):
                defined_flags = defined_flags.split(',')
            defined_flags = [flag.strip() for flag in defined_flags]

            excluded_flags = provider.data.get('excluded_test_flags', '')
            if isinstance(excluded_flags, six.string_types):
                excluded_flags = excluded_flags.split(',')
            excluded_flags = [flag.strip() for flag in excluded_flags]

            allowed_flags = set(defined_flags) - set(excluded_flags)

            if set(test_flags) - allowed_flags:
                logger.info("Filtering Provider %s out because it does not have the right flags, "
                            "%s does not contain %s",
                            provider.name, list(allowed_flags),
                            list(set(test_flags) - allowed_flags))
                return False
        return True
def wait_for_termination(provider, instance):
    """ Waits for VM/instance termination and refreshes power states and relationships
    """
    view = navigate_to(instance, 'Details')
    pwr_mgmt = view.entities.summary('Power Management')
    state_change_time = pwr_mgmt.get_text_of('State Changed On')
    provider.refresh_provider_relationships()
    logger.info("Refreshing provider relationships and power states")
    refresh_timer = RefreshTimer(time_for_refresh=300)
    wait_for(provider.is_refreshed,
             [refresh_timer],
             message="Waiting for provider.is_refreshed",
             num_sec=1000,
             delay=60,
             handle_exception=True)
    wait_for_ui_state_refresh(instance, provider, state_change_time, timeout=720)
    term_states = {instance.STATE_TERMINATED, instance.STATE_ARCHIVED, instance.STATE_UNKNOWN}
    if pwr_mgmt.get_text_of('Power State') not in term_states:
        """Wait for one more state change as transitional state also changes "State Changed On" time
        """
        logger.info("Instance is still powering down. please wait before termination")
        state_change_time = pwr_mgmt.get_text_of('State Changed On')
        wait_for_ui_state_refresh(instance, provider, state_change_time, timeout=720)

    return (instance.mgmt.state == VmState.DELETED
            if provider.one_of(EC2Provider)
            else pwr_mgmt.get_text_of('Power State') in term_states)
def test_quadicon_terminate(appliance, provider, testing_instance, ensure_vm_running, soft_assert):
    """ Tests terminate instance

    Polarion:
        assignee: ghubale
        initialEstimate: 1/4h
        casecomponent: Cloud
        caseimportance: high
        tags: power
    """
    testing_instance.wait_for_instance_state_change(desired_state=testing_instance.STATE_ON)
    testing_instance.power_control_from_cfme(option=testing_instance.TERMINATE, from_details=False)
    logger.info("Terminate initiated")
    appliance.browser.create_view(BaseLoggedInPage).flash.assert_success_message(
        "Terminate initiated for 1 VM and Instance from the {} Database"
        .format(appliance.product_name)
    )

    soft_assert(
        testing_instance.wait_for_instance_state_change(
            desired_state=(
                testing_instance.STATE_TERMINATED,
                testing_instance.STATE_ARCHIVED,
                testing_instance.STATE_UNKNOWN
            ),
            timeout=1200
        )
    )
def vm_ownership(enable_candu, provider, appliance):
    """In these tests, chargeback reports are filtered on VM owner.So,VMs have to be
    assigned ownership.
    """
    vm_name = provider.data['cap_and_util']['chargeback_vm']
    vm = appliance.provider_based_collection(provider, coll_type='vms').instantiate(vm_name,
                                                                                    provider)
    if not vm.exists_on_provider:
        pytest.skip('Skipping test, {} VM does not exist'.format(vm_name))
    vm.mgmt.ensure_state(VmState.RUNNING)

    group_collection = appliance.collections.groups
    cb_group = group_collection.instantiate(description='EvmGroup-user')
    user = appliance.collections.users.create(
        name="{}_{}".format(provider.name, fauxfactory.gen_alphanumeric()),
        credential=Credential(principal='uid{}'.format(fauxfactory.gen_alphanumeric()),
                              secret='secret'),
        email='*****@*****.**',
        groups=cb_group,
        cost_center='Workload',
        value_assign='Database')
    vm.set_ownership(user=user)
    logger.info('Assigned VM OWNERSHIP for {} running on {}'.format(vm_name, provider.name))
    yield user.name

    vm.unset_ownership()
    if user:
        user.delete()
示例#15
0
 def __getattr__(self, name):
     if name not in self._availiable_collections:
         sorted_collection_keys = sorted(self._availiable_collections)
         raise AttributeError('Collection [{}] not known to object, available collections: {}'
                              .format(name, sorted_collection_keys))
     if name not in self._collection_cache:
         item_filters = self._filters.copy()
         cls_and_or_filter = self._availiable_collections[name]
         if isinstance(cls_and_or_filter, tuple):
             item_filters.update(cls_and_or_filter[1])
             cls_or_verpick = cls_and_or_filter[0]
         else:
             cls_or_verpick = cls_and_or_filter
         # Now check whether we verpick the collection or not
         if isinstance(cls_or_verpick, VersionPick):
             cls = cls_or_verpick.pick(self._parent.appliance.version)
             try:
                 logger.info(
                     '[COLLECTIONS] Version picked collection %s as %s.%s',
                     name, cls.__module__, cls.__name__)
             except (AttributeError, TypeError, ValueError):
                 logger.exception('[COLLECTIONS] Is the collection %s truly a collection?', name)
         else:
             cls = cls_or_verpick
         self._collection_cache[name] = cls(self._parent, filters=item_filters)
     return self._collection_cache[name]
示例#16
0
        def _get_timeline_events(target, policy_events):
            """Navigate to the timeline of the target and select the management timeline or the
            policy timeline. Returns an array of the found events.
            """

            timelines_view = navigate_to(target, 'Timelines')

            if isinstance(timelines_view, ServerDiagnosticsView):
                timelines_view = timelines_view.timelines
            timeline_filter = timelines_view.filter

            if policy_events:
                logger.info('Will search in Policy event timelines')
                timelines_view.filter.event_type.select_by_visible_text('Policy Events')
                timeline_filter.policy_event_category.select_by_visible_text(self.tl_category)
                timeline_filter.policy_event_status.fill('Both')
            else:
                timeline_filter.detailed_events.fill(True)
                for selected_option in timeline_filter.event_category.all_selected_options:
                    timeline_filter.event_category.select_by_visible_text(selected_option)
                timeline_filter.event_category.select_by_visible_text(self.tl_category)

            timeline_filter.time_position.select_by_visible_text('centered')
            timeline_filter.apply.click()
            logger.info('Searching for event type: %r in timeline category: %r', self.event,
                        self.tl_category)
            return timelines_view.chart.get_events(self.tl_category)
def chargeback_report_custom(appliance, vm_ownership, assign_custom_rate, provider):
    """Create a Chargeback report based on a custom rate; Queue the report"""
    owner = vm_ownership
    data = {
        'menu_name': '{}_{}'.format(provider.name, fauxfactory.gen_alphanumeric()),
        'title': '{}_{}'.format(provider.name, fauxfactory.gen_alphanumeric()),
        'base_report_on': 'Chargeback for Vms',
        'report_fields': ['Memory Allocated Cost', 'Memory Allocated over Time Period', 'Owner',
        'vCPUs Allocated over Time Period', 'vCPUs Allocated Cost',
        'Storage Allocated', 'Storage Allocated Cost'],
        'filter': {
            'filter_show_costs': 'Owner',
            'filter_owner': owner,
            'interval_end': 'Today (partial)'
        }
    }
    report = appliance.collections.reports.create(is_candu=True, **data)

    logger.info('Queuing chargeback report with custom rate for {} provider'.format(provider.name))
    report.queue(wait_for_finish=True)

    if not list(report.saved_reports.all()[0].data.rows):
        pytest.skip('Empty report')
    else:
        yield list(report.saved_reports.all()[0].data.rows)

    if report.exists:
        report.delete()
def vm_ownership(enable_candu, clean_setup_provider, provider, appliance):
    # In these tests, Metering report is filtered on VM owner.So,VMs have to be
    # assigned ownership.

    vm_name = provider.data['cap_and_util']['chargeback_vm']

    if not provider.mgmt.does_vm_exist(vm_name):
        pytest.skip("Skipping test, {} VM does not exist".format(vm_name))
    provider.mgmt.start_vm(vm_name)
    provider.mgmt.wait_vm_running(vm_name)

    group_collection = appliance.collections.groups
    cb_group = group_collection.instantiate(description='EvmGroup-user')
    user = appliance.collections.users.create(
        name=fauxfactory.gen_alphanumeric(),
        credential=Credential(principal='uid' + '{}'.format(fauxfactory.gen_alphanumeric()),
            secret='secret'),
        email='*****@*****.**',
        groups=cb_group,
        cost_center='Workload',
        value_assign='Database')

    vm = VM.factory(vm_name, provider)

    try:
        vm.set_ownership(user=user.name)
        logger.info('Assigned VM OWNERSHIP for {} running on {}'.format(vm_name, provider.name))

        yield user.name
    finally:
        vm.unset_ownership()
        user.delete()
示例#19
0
 def _delete_vm(self):
     try:
         logger.info("attempting to delete vm %s", self.inst.name)
         self.inst.mgmt.cleanup()
     except NotFoundError:
         logger.info("can't delete vm %r, does not exist", self.inst.name)
         pass
示例#20
0
def verify_retirement_date(retire_vm, expected_date='Never'):
    """Verify the retirement date for a variety of situations

    Args:
        expected_date: a string, datetime, or a dict datetime dates with 'start' and 'end' keys.
    """
    if isinstance(expected_date, dict):
        # convert to a parsetime object for comparsion, function depends on version
        if 'UTC' in VM.RETIRE_DATE_FMT.pick(retire_vm.appliance.version):
            convert_func = parsetime.from_american_minutes_with_utc
        elif VM.RETIRE_DATE_FMT.pick(retire_vm.appliance.version).endswith('+0000'):
            convert_func = parsetime.from_saved_report_title_format
        else:
            convert_func = parsetime.from_american_date_only
        expected_date.update({'retire': convert_func(retire_vm.retirement_date)})
        logger.info('Asserting retire date "%s" is between "%s" and "%s"',  # noqa
                    expected_date['retire'],
                    expected_date['start'],
                    expected_date['end'])

        assert expected_date['start'] <= expected_date['retire'] <= expected_date['end']

    elif isinstance(expected_date, (parsetime, datetime, date)):
        assert retire_vm.retirement_date == expected_date.strftime(
            VM.RETIRE_DATE_FMT.pick(retire_vm.appliance.version))
    else:
        assert retire_vm.retirement_date == expected_date
def import_template_from_glance(api, sdomain, cluster, temp_template_name,
        glance_server, provider, template_name):
    try:
        if api.templates.get(temp_template_name) is not None:
            logger.info("RHEVM:%r Warning: found another template with this name.", provider)
            logger.info("RHEVM:%r Skipping this step, attempting to continue...", provider)
            return

        # Find the storage domain:
        sd = api.storagedomains.get(name=glance_server)

        # Find the image:
        image = sd.images.get(name=template_name)

        # Import the image:
        image.import_image(params.Action(
            async=True,
            import_as_template=True,
            template=params.Template(
                name=temp_template_name
            ),
            cluster=params.Cluster(
                name=cluster
            ),
            storage_domain=params.StorageDomain(
                name=sdomain
            )
        )
        )
示例#22
0
 def close_console_window(self):
     """Attempt to close Console window at the end of test."""
     if self.console_handle is not None:
         self.switch_to_console()
         self.selenium.close()
         logger.info("Browser window/tab containing Console was closed.")
         self.switch_to_appliance()
示例#23
0
    def get_screen_text(self):
        """
        Return the text from a text console.

        Uses OCR to scrape the text from the console image taken at the time of the call.
        """
        image_str = self.get_screen()

        # Write the image string to a file as pytesseract requires
        # a file, and doesn't take a string.
        tmp_file = tempfile.NamedTemporaryFile(suffix='.jpeg')
        tmp_file.write(image_str)
        tmp_file.flush()
        tmp_file_name = tmp_file.name
        # Open Image file, resize it to high resolution, sharpen it for clearer text
        # and then run image_to_string operation which returns unicode that needs to
        # be converted to utf-8 which gives us text [typr(text) == 'str']
        # higher resolution allows tesseract to recognize text correctly
        text = (image_to_string(((Image.open(tmp_file_name)).resize((7680, 4320),
         Image.ANTIALIAS)).filter(ImageFilter.SHARPEN), lang='eng',
         config='--user-words eng.user-words')).encode('utf-8')
        tmp_file.close()

        logger.info('screen text:{}'.format(text))
        return text
示例#24
0
 def clear_providers(cls):
     """ Clear all providers of given class on the appliance """
     from cfme.utils.appliance import current_appliance as app
     app.rest_api.collections.providers.reload()
     # cfme 5.9 doesn't allow to remove provider thru api
     bz_blocked = BZ(1501941, forced_streams=['5.9']).blocks
     if app.version < '5.9' or (app.version >= '5.9' and not bz_blocked):
         for prov in app.rest_api.collections.providers.all:
             try:
                 if any(db_type in prov.type for db_type in cls.db_types):
                     logger.info('Deleting provider: %s', prov.name)
                     prov.action.delete()
                     prov.wait_not_exists()
             except APIException as ex:
                 # Provider is already gone (usually caused by NetworkManager objs)
                 if 'RecordNotFound' not in str(ex):
                     raise ex
     else:
         # Delete all matching
         for prov in app.managed_known_providers:
             if prov.one_of(cls):
                 logger.info('Deleting provider: %s', prov.name)
                 prov.delete(cancel=False)
         # Wait for all matching to be deleted
         for prov in app.managed_known_providers:
             if prov.one_of(cls):
                 prov.wait_for_delete()
     app.rest_api.collections.providers.reload()
def add_delete_custom_attributes(provider):
    provider.add_custom_attributes(*ATTRIBUTES_DATASET)
    yield
    try:
        provider.delete_custom_attributes(*ATTRIBUTES_DATASET)
    except:
        logger.info("No custom attributes to delete")
def pytest_generate_tests(metafunc):
    """
    Build a list of tuples containing (group_name, context)
    Returns:
        tuple containing (group_name, context)
        where group_name is a string and context is ViaUI/SSUI
    """
    appliance = find_appliance(metafunc)
    parameter_list = []
    id_list = []
    # TODO: Include SSUI role_access dict and VIASSUI context
    role_access_ui = VersionPick({
        Version.lowest(): role_access_ui_58z,
        '5.9': role_access_ui_59z,
        '5.10': role_access_ui_510z
    }).pick(appliance.version)
    logger.info('Using the role access dict: %s', role_access_ui)
    roles_and_context = [(
        role_access_ui, ViaUI)
    ]
    for role_access, context in roles_and_context:
        for group in role_access.keys():
            parameter_list.append((group, role_access, context))
            id_list.append('{}-{}'.format(group, context))
    metafunc.parametrize('group_name, role_access, context', parameter_list)
示例#27
0
def cleanup_vm(vm_name, provider):
    try:
        logger.info('Cleaning up VM %s on provider %s', vm_name, provider.key)
        provider.mgmt.delete_vm(vm_name)
    except:
        # The mgmt_sys classes raise Exception :\
        logger.warning('Failed to clean up VM %s on provider %s', vm_name, provider.key)
示例#28
0
 def send_ctrl_alt_delete(self):
     """Press the ctrl-alt-delete button in the console tab."""
     self.switch_to_console()
     ctrl_alt_del_btn = self.provider.get_console_ctrl_alt_del_btn()
     logger.info("Sending following Keys to Console CTRL+ALT+DEL")
     ctrl_alt_del_btn.click()
     self.switch_to_appliance()
示例#29
0
 def create_bucket(self):
     if not self.mgmt.bucket_exists(self.bucket_name):
         self.mgmt.create_bucket(self.bucket_name)
     else:
         logger.info('(template-upload) [%s:%s:%s] Bucket %s already exists.',
                     self.log_name, self.provider, self.template_name, self.bucket_name)
     return True
def download_image_file(image_url):
    """
    Download the image to the local working directory
    :param image_url: URL of the file to download
    :return: tuple, file name and file path strings
    """
    file_name = image_url.split('/')[-1]
    u = urllib2.urlopen(image_url)
    meta = u.info()
    file_size = int(meta.getheaders("Content-Length")[0])
    file_path = os.path.abspath(file_name)
    if os.path.isfile(file_name):
        if file_size == os.path.getsize(file_name):
            return file_name, file_path
        os.remove(file_name)
    logger.info("Downloading: %r Bytes: %r", file_name, file_size)
    with open(file_name, 'wb') as image_file:
        # os.system('cls')
        file_size_dl = 0
        block_sz = 8192
        while True:
            buffer_f = u.read(block_sz)
            if not buffer_f:
                break

            file_size_dl += len(buffer_f)
            image_file.write(buffer_f)
    return file_name, file_path
def gen_report_base(appliance, obj_type, provider, rate_desc, rate_interval):
    """Base function for report generation
    Args:
        :py:type:`str` obj_type: Object being tested; only 'Project' and 'Image' are supported
        :py:class:`ContainersProvider` provider: The Containers Provider
        :py:type:`str` rate_desc: The rate description as it appears in the report
        :py:type:`str` rate_interval: The rate interval, (Hourly/Daily/Weekly/Monthly)
    """
    title = 'report_{}_{}'.format(obj_type.lower(), rate_desc)
    if obj_type == 'Project':
        data = {
            'menu_name':
            title,
            'title':
            title,
            'base_report_on':
            'Chargeback for Projects',
            'report_fields': [
                'Archived', 'Chargeback Rates', 'Fixed Compute Metric',
                'Cpu Cores Used Cost', 'Cpu Cores Used Metric',
                'Network I/O Used', 'Network I/O Used Cost',
                'Fixed Compute Cost 1', 'Fixed Compute Cost 2', 'Memory Used',
                'Memory Used Cost', 'Provider Name', 'Fixed Total Cost',
                'Total Cost'
            ],
            'filter': {
                'filter_show_costs': 'Container Project',
                'filter_provider': provider.name,
                'filter_project': 'All Container Projects'
            }
        }
    elif obj_type == 'Image':
        data = {
            'base_report_on':
            'Chargeback for Images',
            'report_fields': [
                'Archived', 'Chargeback Rates', 'Fixed Compute Metric',
                'Cpu Cores Used Cost', 'Cpu Cores Used Metric',
                'Network I/O Used', 'Network I/O Used Cost',
                'Fixed Compute Cost 1', 'Fixed Compute Cost 2', 'Memory Used',
                'Memory Used Cost', 'Provider Name', 'Fixed Total Cost',
                'Total Cost'
            ],
            'filter': {
                'filter_show_costs': 'Container Image',
                'filter_provider': provider.name,
            }
        }
    else:
        raise Exception("Unknown object type: {}".format(obj_type))

    data['menu_name'] = title
    data['title'] = title
    if rate_interval == 'Hourly':
        data['filter']['interval'] = 'Day'
        data['filter']['interval_end'] = 'Yesterday'
        data['filter']['interval_size'] = '1 Day'
    elif rate_interval == 'Daily':
        data['filter']['interval'] = 'Week',
        data['filter']['interval_end'] = 'Last Week'
        data['filter']['interval_size'] = '1 Week'
    elif rate_interval in ('Weekly', 'Monthly'):
        data['filter']['interval'] = 'Month',
        data['filter']['interval_end'] = 'Last Month'
        data['filter']['interval_size'] = '1 Month'
    else:
        raise Exception('Unsupported rate interval: "{}"; available options: '
                        '(Hourly/Daily/Weekly/Monthly)')
    report = appliance.collections.reports.create(is_candu=True, **data)

    logger.info('QUEUING CUSTOM CHARGEBACK REPORT FOR CONTAINER {}'.format(
        obj_type.upper()))
    report.queue(wait_for_finish=True)

    return report
示例#32
0
def create_catalog_item(appliance,
                        provider,
                        provisioning,
                        dialog,
                        catalog,
                        vm_count='1',
                        console_test=False):
    provision_type, template, host, datastore, iso_file, vlan = map(
        provisioning.get, ('provision_type', 'template', 'host', 'datastore',
                           'iso_file', 'vlan'))
    if console_test:
        template = _get_template(provider, 'console_template').name
        logger.info("Console template name : {}".format(template))
    item_name = dialog.label
    if provider.one_of(InfraProvider):
        catalog_name = template
        provisioning_data = {
            'catalog': {
                'catalog_name': {
                    'name': catalog_name,
                    'provider': provider.name
                },
                'vm_name': random_vm_name('serv'),
                'provision_type': provision_type,
                'num_vms': vm_count
            },
            'environment': {
                'host_name': {
                    'name': host
                },
                'datastore_name': {
                    'name': datastore
                }
            },
            'network': {
                'vlan': partial_match(vlan)
            },
        }
    elif provider.one_of(CloudProvider):
        catalog_name = provisioning['image']['name']
        provisioning_data = {
            'catalog': {
                'catalog_name': {
                    'name': catalog_name,
                    'provider': provider.name
                },
                'vm_name': random_vm_name('serv')
            },
            'properties': {
                'instance_type':
                partial_match(provisioning.get('instance_type', None)),
                'guest_keypair':
                provisioning.get('guest_keypair', None)
            },
        }
        # Azure specific
        if provider.one_of(AzureProvider):
            recursive_update(
                provisioning_data, {
                    'customize': {
                        'admin_username': provisioning['customize_username'],
                        'root_password': provisioning['customize_password']
                    },
                    'environment': {
                        'security_groups': provisioning['security_group'],
                        'cloud_network': provisioning['cloud_network'],
                        'cloud_subnet': provisioning['cloud_subnet'],
                        'resource_groups': provisioning['resource_group']
                    },
                })
        # GCE specific
        if provider.one_of(GCEProvider):
            recursive_update(
                provisioning_data, {
                    'properties': {
                        'boot_disk_size': provisioning['boot_disk_size'],
                        'is_preemptible': True
                    },
                    'environment': {
                        'availability_zone': provisioning['availability_zone'],
                        'cloud_network': provisioning['cloud_network']
                    },
                })
        # EC2 specific
        if provider.one_of(EC2Provider):
            recursive_update(
                provisioning_data, {
                    'environment': {
                        'availability_zone': provisioning['availability_zone'],
                        'cloud_network': provisioning['cloud_network'],
                        'cloud_subnet': provisioning['cloud_subnet'],
                        'security_groups': provisioning['security_group'],
                    },
                })
            # OpenStack specific
        if provider.one_of(OpenStackProvider):
            recursive_update(
                provisioning_data, {
                    'environment': {
                        'availability_zone': provisioning['availability_zone'],
                        'cloud_network': provisioning['cloud_network'],
                        'cloud_tenant': provisioning['cloud_tenant'],
                        'security_groups': provisioning['security_group'],
                    },
                })

    catalog_item = appliance.collections.catalog_items.create(
        provider.catalog_item_type,
        name=item_name,
        description="my catalog",
        display_in=True,
        catalog=catalog,
        dialog=dialog,
        prov_data=provisioning_data,
        provider=provider)
    return catalog_item
示例#33
0
 def db_url(self):
     """The connection URL for this database, including credentials"""
     template = "postgresql://{username}:{password}@{host}:{port}/vmdb_production"
     result = template.format(host=self.hostname, port=self.port, **self.credentials)
     logger.info("[DB] db_url is %s", result)
     return result
示例#34
0
        streams = cmd_args.stream
    else:
        streams = ALL_STREAMS

    thread_queue = []
    for stream in streams:
        stream_url = ALL_STREAMS.get(stream)
        image_url = cmd_args.image_url

        if not cmd_args.template_name:
            template_name = trackerbot.TemplateName(stream_url).template_name
        else:
            template_name = "{}-{}".format(cmd_args.template_name, stream)

        if cmd_args.print_name_only:
            logger.info("%s Template name: %s", stream, template_name)
            continue

        for provider_type in provider_types:
            providers = list_provider_keys(provider_type)

            if cmd_args.provider:
                providers = filter(lambda x: x in providers, cmd_args.provider)

            for provider in providers:
                if provider not in list_provider_keys(provider_type):
                    continue

                template_kwargs = {
                    'stream': stream,
                    'stream_url': stream_url,
示例#35
0
 def start(self, url_key=None):
     log.info('starting browser')
     url_key = self.coerce_url_key(url_key)
     if self.browser is not None:
         self.quit()
     return self.open_fresh(url_key=url_key)
def test_provision_with_additional_volume(request, testing_instance, provider, small_template,
                                          soft_assert, modified_request_class, appliance,
                                          copy_domains):
    """ Tests provisioning with setting specific image from AE and then also making it create and
    attach an additional 3G volume.

    Metadata:
        test_flag: provision, volumes
    """
    instance, inst_args, image = testing_instance

    # Set up automate
    method = modified_request_class.methods.instantiate(name="openstack_CustomizeRequest")
    try:
        image_id = provider.mgmt.get_template_id(small_template.name)
    except KeyError:
        pytest.skip("No small_template in provider adta!")
    with update(method):
        method.script = dedent('''\
            $evm.root["miq_provision"].set_option(
              :clone_options, {{
                :image_ref => nil,
                :block_device_mapping_v2 => [{{
                  :boot_index => 0,
                  :uuid => "{}",
                  :device_name => "vda",
                  :source_type => "image",
                  :destination_type => "volume",
                  :volume_size => 3,
                  :delete_on_termination => false
                }}]
              }}
        )
        '''.format(image_id))

    def _finish_method():
        with update(method):
            method.script = """prov = $evm.root["miq_provision"]"""
    request.addfinalizer(_finish_method)

    instance.create(**inst_args)

    request_description = 'Provision from [{}] to [{}]'.format(small_template.name, instance.name)
    provision_request = appliance.collections.requests.instantiate(request_description)
    try:
        provision_request.wait_for_request(method='ui')
    except Exception as e:
        logger.info(
            "Provision failed {}: {}".format(e, provision_request.request_state))
        raise e
    assert provision_request.is_succeeded(method='ui'), (
        "Provisioning failed with the message {}".format(
            provision_request.row.last_message.text))

    prov_instance = provider.mgmt._find_instance_by_name(instance.name)
    try:
        assert hasattr(prov_instance, 'os-extended-volumes:volumes_attached')
        volumes_attached = getattr(prov_instance, 'os-extended-volumes:volumes_attached')
        assert len(volumes_attached) == 1
        volume_id = volumes_attached[0]["id"]
        assert provider.mgmt.volume_exists(volume_id)
        volume = provider.mgmt.get_volume(volume_id)
        assert volume.size == 3
    finally:
        instance.delete_from_provider()
        wait_for(lambda: not instance.does_vm_exist_on_provider(), num_sec=180, delay=5)
        if "volume_id" in locals():  # To handle the case of 1st or 2nd assert
            if provider.mgmt.volume_exists(volume_id):
                provider.mgmt.delete_volume(volume_id)
def testing_instance(request, setup_provider, provider, provisioning, vm_name, tag):
    """ Fixture to prepare instance parameters for provisioning
    """
    image = provisioning['image']['name']
    note = ('Testing provisioning from image {} to vm {} on provider {}'.format(
        image, vm_name, provider.key))

    instance = Instance.factory(vm_name, provider, image)

    inst_args = dict()

    # Base instance info
    inst_args['request'] = {
        'email': '*****@*****.**',
        'first_name': 'Image',
        'last_name': 'Provisioner',
        'notes': note,
    }
    # TODO Move this into helpers on the provider classes
    recursive_update(inst_args, {'catalog': {'vm_name': vm_name}})

    # Check whether auto-selection of environment is passed
    auto = False  # By default provisioning will be manual
    try:
        parameter = request.param
        if parameter == 'tag':
            inst_args['purpose'] = {
                'apply_tags': Check_tree.CheckNode(
                    ['{} *'.format(tag.category.display_name), tag.display_name])
            }
        else:
            auto = parameter
    except AttributeError:
        # in case nothing was passed just skip
        pass

    recursive_update(inst_args, {
        'environment': {
            'availability_zone': provisioning.get('availability_zone', None),
            'security_groups': [provisioning.get('security_group', None)],
            'cloud_network': provisioning.get('cloud_network', None),
            'cloud_subnet': provisioning.get('cloud_subnet', None),
            'resource_groups': provisioning.get('resource_group', None)
        },
        'properties': {
            'instance_type': partial_match(provisioning.get('instance_type', None)),
            'guest_keypair': provisioning.get('guest_keypair', None)}
    })
    # GCE specific
    if provider.one_of(GCEProvider):
        recursive_update(inst_args, {
            'properties': {
                'boot_disk_size': provisioning['boot_disk_size'],
                'is_preemptible': True}
        })

    # Azure specific
    if provider.one_of(AzureProvider):
        # Azure uses different provisioning keys for some reason
        try:
            template = provider.data.templates.small_template
            vm_user = credentials[template.creds].username
            vm_password = credentials[template.creds].password
        except AttributeError:
            pytest.skip('Could not find small_template or credentials for {}'.format(provider.name))
        recursive_update(inst_args, {
            'customize': {
                'admin_username': vm_user,
                'root_password': vm_password}})
    if auto:
        inst_args.update({'environment': {'automatic_placement': auto}})
    yield instance, inst_args, image

    logger.info('Fixture cleanup, deleting test instance: %s', instance.name)
    try:
        instance.delete_from_provider()
    except Exception as ex:
        logger.warning('Exception while deleting instance fixture, continuing: {}'
                       .format(ex.message))
示例#38
0
def test_provision_approval(appliance, provider, vm_name, smtp_test, request,
                            edit):
    """ Tests provisioning approval. Tests couple of things.

    * Approve manually
    * Approve by editing the request to conform

    Prerequisities:
        * A provider that can provision.
        * Automate role enabled
        * User with e-mail set so you can receive and view them

    Steps:
        * Create a provisioning request that does not get automatically approved (eg. ``num_vms``
            bigger than 1)
        * Wait for an e-mail to come, informing you that the auto-approval was unsuccessful.
        * Depending on whether you want to do manual approval or edit approval, do:
            * MANUAL: manually approve the request in UI
            * EDIT: Edit the request in UI so it conforms the rules for auto-approval.
        * Wait for an e-mail with approval
        * Wait until the request finishes
        * Wait until an email, informing about finished provisioning, comes.

    Metadata:
        test_flag: provision
        suite: infra_provisioning

    Polarion:
        assignee: jhenner
        caseimportance: high
        initialEstimate: 1/8h
    """
    # generate_tests makes sure these have values
    # template, host, datastore = map(provisioning.get, ('template', 'host', 'datastore'))

    # It will provision two of them
    vm_names = [vm_name + "001", vm_name + "002"]
    requester = "" if BZ(1628240, forced_streams=['5.10']).blocks else "[email protected] "
    collection = appliance.provider_based_collection(provider)
    inst_args = {'catalog': {
        'vm_name': vm_name,
        'num_vms': '2'
    }}

    vm = collection.create(vm_name, provider, form_values=inst_args, wait=False)
    subject = VersionPicker({
        LOWEST: "your request for a new vms was not autoapproved",
        "5.10": "your virtual machine request is pending"
    }).pick()
    wait_for(
        lambda:
        len(filter(
            lambda mail:
            subject in normalize_text(mail["subject"]),
            smtp_test.get_emails())) == 1,
        num_sec=90, delay=5)
    subject = VersionPicker({
        LOWEST: "virtual machine request was not approved",
        "5.10": "virtual machine request from {}pending approval".format(requester)
    }).pick()
    wait_for(
        lambda:
        len(filter(
            lambda mail:
            subject in normalize_text(mail["subject"]),
            smtp_test.get_emails())) == 1,
        num_sec=90, delay=5)
    smtp_test.clear_database()

    cells = {'Description': 'Provision from [{}] to [{}###]'.format(vm.template_name, vm.name)}
    provision_request = appliance.collections.requests.instantiate(cells=cells)
    navigate_to(provision_request, 'Details')
    if edit:
        # Automatic approval after editing the request to conform
        new_vm_name = '{}-xx'.format(vm_name)
        modifications = {
            'catalog': {'num_vms': "1", 'vm_name': new_vm_name},
            'Description': 'Provision from [{}] to [{}]'.format(vm.template_name, new_vm_name)}
        provision_request.edit_request(values=modifications)
        vm_names = [new_vm_name]  # Will be just one now
        request.addfinalizer(
            lambda: collection.instantiate(new_vm_name, provider).cleanup_on_provider()
        )
    else:
        # Manual approval
        provision_request.approve_request(method='ui', reason="Approved")
        vm_names = [vm_name + "001", vm_name + "002"]  # There will be two VMs
        request.addfinalizer(
            lambda: [appliance.collections.infra_vms.instantiate(name,
                                                                 provider).cleanup_on_provider()
                     for name in vm_names]
        )
    subject = VersionPicker({
        LOWEST: "your virtual machine configuration was approved",
        "5.10": "your virtual machine request was approved"
    }).pick()
    wait_for(
        lambda:
        len(filter(
            lambda mail:
            subject in normalize_text(mail["subject"]),
            smtp_test.get_emails())) == 1,
        num_sec=120, delay=5)
    smtp_test.clear_database()

    # Wait for the VM to appear on the provider backend before proceeding to ensure proper cleanup
    logger.info('Waiting for vms %s to appear on provider %s', ", ".join(vm_names), provider.key)
    wait_for(
        lambda: all(map(provider.mgmt.does_vm_exist, vm_names)),
        handle_exception=True, num_sec=600)

    provision_request.wait_for_request(method='ui')
    msg = "Provisioning failed with the message {}".format(provision_request.row.last_message.text)
    assert provision_request.is_succeeded(method='ui'), msg

    # Wait for e-mails to appear
    def verify():
        subject = VersionPicker({
            LOWEST: "your virtual machine request has completed vm {}".format(
                normalize_text(vm_name)),
            "5.10": "your virtual machine request has completed vm name {}".format(
                normalize_text(vm_name))
        }).pick()
        return (
            len(filter(
                lambda mail: subject in normalize_text(mail["subject"]),
                smtp_test.get_emails())) == len(vm_names)
        )
    wait_for(verify, message="email receive check", delay=5)
示例#39
0
def test_workload_smartstate_analysis(appliance, request, scenario):
    """Runs through provider based scenarios initiating smart state analysis against VMs, Hosts,
    and Datastores

    Polarion:
        assignee: rhcf3_machine
        casecomponent: SmartState
        initialEstimate: 1/4h
    """
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))
    appliance.install_vddk()

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {
        'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-ssa',
        'test_name': 'SmartState Analysis',
        'appliance_roles': ', '.join(roles_smartstate),
        'scenario': scenario
    }
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(f'Finished cleaning up monitoring thread in {timediff}')

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_smartstate})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(
        scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    # Add host credentials and set CFME relationship for RHEVM SSA
    for provider in scenario['providers']:
        for api_host in appliance.rest_api.collections.hosts.all:
            host_collection = appliance.collections.hosts
            test_host = host_collection.instantiate(name=api_host.name,
                                                    provider=provider)
            host_data = get_host_data_by_name(get_crud(provider),
                                              api_host.name)
            credentials = host.get_credentials_from_config(
                host_data['credentials'])
            test_host.update_credentials_rest(credentials)
        appliance.set_cfme_server_relationship(
            cfme_performance['appliance']['appliance_name'])

    # Variable amount of time for SmartState Analysis workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_analyses = scenario['time_between_analyses']
    total_scanned_vms = 0

    while ((time.time() - starttime) < total_time):
        start_ssa_time = time.time()
        for vm in list(scenario['vms_to_scan'].values())[0]:
            vm_api = appliance.rest_api.collections.vms.get(name=vm)
            vm_api.action.scan()
            total_scanned_vms += 1
        iteration_time = time.time()

        ssa_time = round(iteration_time - start_ssa_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug(f'Time to Queue SmartState Analyses: {ssa_time}')
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))

        if ssa_time < time_between_analyses:
            wait_diff = time_between_analyses - ssa_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_analyses):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warning(
                'Time to Queue SmartState Analyses ({}) exceeded time between '
                '({})'.format(ssa_time, time_between_analyses))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Scans'] = total_scanned_vms
    logger.info('Test Ending...')
示例#40
0
def test_provision_with_boot_volume(request, instance_args, provider, soft_assert,
                                    modified_request_class, appliance, copy_domains):
    """ Tests provisioning from a template and attaching one booting volume.

    Metadata:
        test_flag: provision, volumes

    Polarion:
        assignee: jhenner
        caseimportance: high
        initialEstimate: None
    """
    vm_name, inst_args = instance_args

    image = inst_args.get('template_name')

    with provider.mgmt.with_volume(1, imageRef=provider.mgmt.get_template_id(image)) as volume:
        # Set up automate
        method = modified_request_class.methods.instantiate(name="openstack_CustomizeRequest")
        with update(method):
            method.script = dedent('''\
                $evm.root["miq_provision"].set_option(
                    :clone_options, {{
                        :image_ref => nil,
                        :block_device_mapping_v2 => [{{
                            :boot_index => 0,
                            :uuid => "{}",
                            :device_name => "vda",
                            :source_type => "volume",
                            :destination_type => "volume",
                            :volume_size => 1,
                            :delete_on_termination => false
                        }}]
                    }}
                )
            '''.format(volume))

        @request.addfinalizer
        def _finish_method():
            with update(method):
                method.script = """prov = $evm.root["miq_provision"]"""

        instance = appliance.collections.cloud_instances.create(vm_name,
                                                                provider,
                                                                form_values=inst_args)

        request_description = 'Provision from [{}] to [{}]'.format(image,
                                                                   instance.name)
        provision_request = appliance.collections.requests.instantiate(request_description)
        try:
            provision_request.wait_for_request(method='ui')
        except Exception as e:
            logger.info(
                "Provision failed {}: {}".format(e, provision_request.request_state))
            raise e
        msg = "Provisioning failed with the message {}".format(
            provision_request.row.last_message.text)
        assert provision_request.is_succeeded(method='ui'), msg
        soft_assert(instance.name in provider.mgmt.volume_attachments(volume))
        soft_assert(provider.mgmt.volume_attachments(volume)[instance.name] == "/dev/vda")
        instance.mgmt.delete()  # To make it possible to delete the volume
        wait_for(lambda: not instance.exists_on_provider, num_sec=180, delay=5)
示例#41
0
def ec2cleanup(exclude_volumes, exclude_eips, exclude_elbs, exclude_enis,
               exclude_stacks, exclude_snapshots, exclude_queues,
               stack_template, output):
    with open(output, 'w') as report:
        report.write(
            'ec2cleanup.py, Address, Volume, LoadBalancer, Snapshot and '
            'Network Interface Cleanup')
        report.write("\nDate: {}\n".format(datetime.now()))
    for provider_key in list_provider_keys('ec2'):
        provider_mgmt = get_mgmt(provider_key)
        logger.info("----- Provider: %r -----", provider_key)
        logger.info("Deleting volumes...")
        delete_unattached_volumes(provider_mgmt=provider_mgmt,
                                  excluded_volumes=exclude_volumes,
                                  output=output)
        logger.info("Deleting Elastic LoadBalancers...")
        delete_unused_loadbalancers(provider_mgmt=provider_mgmt,
                                    excluded_elbs=exclude_elbs,
                                    output=output)
        logger.info("Deleting Elastic Network Interfaces...")
        delete_unused_network_interfaces(provider_mgmt=provider_mgmt,
                                         excluded_enis=exclude_enis,
                                         output=output)
        logger.info("Deleting old stacks...")
        delete_stacks(provider_mgmt=provider_mgmt,
                      excluded_stacks=exclude_stacks,
                      stack_template=stack_template,
                      output=output)
        logger.info("Deleting old queues...")
        delete_queues(provider_mgmt=provider_mgmt,
                      excluded_queues=exclude_queues,
                      output=output)
        logger.info("Deleting snapshots...")
        delete_snapshots(provider_mgmt=provider_mgmt,
                         excluded_snapshots=exclude_snapshots,
                         output=output)
        logger.info("Releasing addresses...")
        delete_disassociated_addresses(provider_mgmt=provider_mgmt,
                                       excluded_eips=exclude_eips,
                                       output=output)
def deploy_template(provider_key,
                    vm_name,
                    template_name=None,
                    timeout=900,
                    **deploy_args):
    """
    Args:
        provider_key: Provider key on which the VM is to be created
        vm_name: Name of the VM to be deployed
        template_name: Name of the template that the VM is deployed from
        timeout: the timeout for template deploy
    """
    allow_skip = deploy_args.pop("allow_skip", ())
    if isinstance(allow_skip, dict):
        skip_exceptions = allow_skip.keys()
        callable_mapping = allow_skip
    elif isinstance(allow_skip,
                    basestring) and allow_skip.lower() == "default":
        skip_exceptions = (OSOverLimit, RHEVRequestError,
                           exceptions.VMInstanceNotCloned, SSLError)
        callable_mapping = {}
    else:
        skip_exceptions = allow_skip
        callable_mapping = {}
    provider_crud = get_crud(provider_key)

    deploy_args.update(vm_name=vm_name)

    if template_name is None:
        try:
            deploy_args.update(template=provider_crud.data['small_template'])
        except KeyError:
            raise ValueError(
                'small_template not defined for Provider {} in cfme_data.yaml'.
                format(provider_key))
    else:
        deploy_args.update(template=template_name)

    deploy_args.update(provider_crud.deployment_helper(deploy_args))

    logger.info(
        "Getting ready to deploy VM/instance %s from template %s on provider %s",
        vm_name, deploy_args['template'], provider_crud.data['name'])
    try:
        try:
            logger.debug("Deploy args: %s", deploy_args)
            vm_name = provider_crud.mgmt.deploy_template(timeout=timeout,
                                                         **deploy_args)
            logger.info("Provisioned VM/instance %s",
                        vm_name)  # instance ID in case of EC2
        except Exception as e:
            logger.error('Could not provisioning VM/instance %s (%s: %s)',
                         vm_name,
                         type(e).__name__, str(e))
            _vm_cleanup(provider_crud.mgmt, vm_name)
            raise
    except skip_exceptions as e:
        e_c = type(e)
        if e_c in callable_mapping and not callable_mapping[e_c](e):
            raise
        # Make it visible also in the log.
        store.write_line("Skipping due to a provider error: {}: {}\n".format(
            e_c.__name__, str(e)),
                         purple=True)
        logger.exception(e)
        pytest.skip("{}: {}".format(e_c.__name__, str(e)))
    return vm_name
def test_pod_appliance_basic_ipa_auth(temp_pod_appliance, provider,
                                      setup_provider, template_folder,
                                      ipa_auth_provider,
                                      setup_ipa_auth_provider, ipa_user):
    """ Test basic ipa authentication in appliance

    Polarion:
        assignee: izapolsk
        initialEstimate: 1/2h
        casecomponent: Containers
        testSteps:
          - enable external httpd authentication in appliance
          - deploy latest configmap generator
          - generate new configuration
          - deploy new httpd configmap configuration
          - restart httpd pod
          - to login to appliance using external credentials
    """
    appliance = temp_pod_appliance
    auth_prov = ipa_auth_provider

    logger.info(
        "retrieving necessary configmap-generator version in order to pull it beforehand"
    )
    image_data = read_host_file(
        appliance,
        os.path.join(template_folder, 'cfme-httpd-configmap-generator'))
    image_url = image_data.strip().split()[-1]
    generator_url, generator_version = image_url.rsplit(':', 1)
    logger.info("generator image url: %s, version %s", generator_url,
                generator_version)
    try:
        logger.info("check that httpd-scc-sysadmin is present")
        provider.mgmt.get_scc('httpd-scc-sysadmin')
    except ApiException as e:
        logger.info("scc 'httpd-scc-sysadmin' isn't present. adding it")
        if e.status == 404:
            sysadmin_template = read_host_file(
                appliance,
                os.path.join(template_folder, 'httpd-scc-sysadmin.yaml'))
            provider.mgmt.create_scc(body=yaml.safe_load(sysadmin_template))
        else:
            pytest.fail("Couldn't create required scc")

    logger.info("making configmap generator to be run under appropriate scc")
    provider.mgmt.append_sa_to_scc(scc_name='httpd-scc-sysadmin',
                                   namespace=appliance.project,
                                   sa='httpd-configmap-generator')

    # oc create -f templates/httpd-configmap-generator-template.yaml
    logger.info("reading and parsing configmap generator template")
    generator_data = yaml.safe_load(
        read_host_file(
            appliance,
            os.path.join(template_folder,
                         'httpd-configmap-generator-template.yaml')))

    generator_dc_name = generator_data['metadata']['name']
    processing_params = {
        'HTTPD_CONFIGMAP_GENERATOR_IMG_NAME': generator_url,
        'HTTPD_CONFIGMAP_GENERATOR_IMG_TAG': generator_version
    }

    template_entities = provider.mgmt.process_raw_template(
        body=generator_data,
        namespace=appliance.project,
        parameters=processing_params)
    # oc new-app --template=httpd-configmap-generator
    logger.info("deploying configmap generator app")
    provider.mgmt.create_template_entities(namespace=appliance.project,
                                           entities=template_entities)
    provider.mgmt.wait_pod_running(namespace=appliance.project,
                                   name=generator_dc_name)

    logger.info("running configmap generation command inside generator app")
    output_file = '/tmp/ipa_configmap'
    generator_cmd = [
        '/usr/bin/bash -c',
        '"httpd_configmap_generator',
        'ipa',
        '--host={}'.format(appliance.hostname),
        '--ipa-server={}'.format(auth_prov.host1),
        '--ipa-domain={}'.format(
            auth_prov.iparealm),  # looks like yaml value is wrong
        '--ipa-realm={}'.format(auth_prov.iparealm),
        '--ipa-principal={}'.format(auth_prov.ipaprincipal),
        '--ipa-password={}'.format(auth_prov.bind_password),
        '--output={}'.format(output_file),
        '-d',
        '-f"'
    ]

    # todo: implement this in wrapanapi by resolving chain dc->rc->po/st
    def get_pod_name(pattern):
        def func(name):
            try:
                all_pods = provider.mgmt.list_pods(namespace=appliance.project)
                return next(p.metadata.name for p in all_pods
                            if p.metadata.name.startswith(name)
                            and not p.metadata.name.endswith('-deploy'))
            except StopIteration:
                return None

        return wait_for(func=func,
                        func_args=[pattern],
                        timeout='5m',
                        delay=5,
                        fail_condition=None)[0]

    logger.info("generator cmd: %s", generator_cmd)
    generator_pod_name = get_pod_name(generator_dc_name)
    logger.info("generator pod name: {}", generator_pod_name)
    # workaround generator pod becomes ready but cannot property run commands for some time
    sleep(60)
    logger.info(
        appliance.ssh_client.run_command('oc get pods -n {}'.format(
            appliance.project),
                                         ensure_host=True))
    generator_output = str(
        appliance.ssh_client.run_command(
            'oc exec {pod} -n {ns} -- {cmd}'.format(
                pod=generator_pod_name,
                ns=appliance.project,
                cmd=" ".join(generator_cmd)),
            ensure_host=True))

    assert_output = "config map generation failed because of {}".format(
        generator_output)
    assert 'Saving Auth Config-Map to' in generator_output, assert_output

    httpd_config = provider.mgmt.run_command(namespace=appliance.project,
                                             name=generator_pod_name,
                                             cmd=["/usr/bin/cat", output_file])

    # oc scale dc httpd-configmap-generator --replicas=0
    logger.info("stopping configmap generator since it is no longer needed")
    provider.mgmt.scale_entity(name=generator_dc_name,
                               namespace=appliance.project,
                               replicas=0)

    # oc replace configmaps httpd-auth-configs --filename ./ipa_configmap
    logger.info("replacing auth configmap")
    new_httpd_config = provider.mgmt.rename_structure(
        yaml.safe_load(httpd_config))
    provider.mgmt.replace_config_map(namespace=appliance.project,
                                     **new_httpd_config)

    # oc scale dc/httpd --replicas=0
    # oc scale dc/httpd --replicas=1
    logger.info(
        "stopping & starting httpd pod in order to re-read current auth configmap"
    )
    httpd_name = 'httpd'
    provider.mgmt.scale_entity(name=httpd_name,
                               namespace=appliance.project,
                               replicas=0)
    provider.mgmt.wait_pod_stopped(namespace=appliance.project,
                                   name=httpd_name)
    provider.mgmt.scale_entity(name=httpd_name,
                               namespace=appliance.project,
                               replicas=1)
    provider.mgmt.wait_pod_running(namespace=appliance.project,
                                   name=httpd_name)

    # workaround, httpd pod becomes running but cannot handle requests properly for some short time
    sleep(60)
    # connect to appliance and try to login
    logger.info("trying to login with user from ext auth system")
    appliance.server.login(user=ipa_user)

    # check that appliance is running and provider is available
    collection = appliance.collections.container_projects
    proj = collection.instantiate(name=appliance.project, provider=provider)
    assert navigate_to(proj, 'Dashboard')
示例#44
0
def test_vmware_vimapi_hotadd_disk(appliance, request, testing_group,
                                   testing_vm, domain, cls):
    """Tests hot adding a disk to vmware vm. This test exercises the `VMware_HotAdd_Disk` method,
       located in `/Integration/VMware/VimApi`

    Polarion:
        assignee: ghubale
        initialEstimate: 1/8h
        casecomponent: Automate
        caseimportance: critical
        tags: automate
        testSteps:
            1. It creates an instance in ``System/Request`` that can be accessible from eg. button
            2. Then it creates a button, that refers to the ``VMware_HotAdd_Disk`` in ``Request``.
               The button shall belong in the VM and instance button group.
            3. After the button is created, it goes to a VM's summary page, clicks the button.
            4. The test waits until the capacity of disks is raised.

    Bugzillas:
        1211627, 1311221
    """
    meth = cls.methods.create(name='load_value_{}'.format(
        fauxfactory.gen_alpha()),
                              script=dedent('''\
            # Sets the capacity of the new disk.

            $evm.root['size'] = 1  # GB
            exit MIQ_OK
            '''))

    request.addfinalizer(meth.delete_if_exists)

    # Instance that calls the method and is accessible from the button
    instance = cls.instances.create(
        name="VMware_HotAdd_Disk_{}".format(fauxfactory.gen_alpha()),
        fields={
            "meth4": {
                'value': meth.name
            },  # To get the value
            "rel5": {
                'value': "/Integration/VMware/VimApi/VMware_HotAdd_Disk"
            },
        },
    )

    request.addfinalizer(instance.delete_if_exists)

    # Button that will invoke the dialog and action
    button_name = fauxfactory.gen_alphanumeric()
    button = testing_group.buttons.create(text=button_name,
                                          hover=button_name,
                                          system="Request",
                                          request=instance.name)
    request.addfinalizer(button.delete_if_exists)

    def _get_disk_capacity():
        view = testing_vm.load_details(refresh=True)
        return view.entities.summary(
            'Datastore Allocation Summary').get_text_of('Total Allocation')

    original_disk_capacity = _get_disk_capacity()
    logger.info('Initial disk allocation: %s', original_disk_capacity)

    class CustomButtonView(View):
        custom_button = Dropdown(testing_group.text)

    view = appliance.browser.create_view(CustomButtonView)
    view.custom_button.item_select(button.text)

    view = appliance.browser.create_view(BaseLoggedInPage)
    view.flash.assert_no_error()
    try:
        wait_for(lambda: _get_disk_capacity() > original_disk_capacity,
                 num_sec=180,
                 delay=5)
    finally:
        logger.info('End disk capacity: %s', _get_disk_capacity())
示例#45
0
def test_ssa_vm(provider, instance, soft_assert):
    """ Tests SSA can be performed and returns sane results

    Metadata:
        test_flag: vm_analysis
    """

    # TODO: check if previously scanned?
    #       delete the vm itself if it did have a scan already
    #       delete all previous scan tasks

    e_users = None
    e_groups = None
    e_packages = None
    e_services = None
    e_icon_part = instance.system_type['icon']

    if instance.system_type != WINDOWS:
        e_users = instance.ssh.run_command(
            "cat /etc/passwd | wc -l").output.strip('\n')
        e_groups = instance.ssh.run_command(
            "cat /etc/group | wc -l").output.strip('\n')
        e_packages = instance.ssh.run_command(
            instance.system_type['package-number']).output.strip('\n')
        e_services = instance.ssh.run_command(
            instance.system_type['services-number']).output.strip('\n')

    logger.info(
        "Expecting to have {} users, {} groups, {} packages and {} services".
        format(e_users, e_groups, e_packages, e_services))

    instance.smartstate_scan()
    wait_for(lambda: is_vm_analysis_finished(instance.name),
             delay=15,
             timeout="35m",
             fail_func=lambda: toolbar.select('Reload'))

    # Check release and quadricon
    quadicon_os_icon = instance.find_quadicon().data['os']
    details_os_icon = instance.get_detail(properties=('Properties',
                                                      'Operating System'),
                                          icon_href=True)
    logger.info("Icons: %s, %s", details_os_icon, quadicon_os_icon)

    # We shouldn't use get_detail anymore - it takes too much time
    c_lastanalyzed = InfoBlock.text('Lifecycle', 'Last Analyzed')
    c_users = InfoBlock.text('Security', 'Users')
    c_groups = InfoBlock.text('Security', 'Groups')
    c_packages = 0
    c_services = 0
    if instance.system_type != WINDOWS:
        c_packages = InfoBlock.text('Configuration', 'Packages')
        c_services = InfoBlock.text('Configuration', 'Init Processes')

    logger.info(
        "SSA shows {} users, {} groups {} packages and {} services".format(
            c_users, c_groups, c_packages, c_services))

    soft_assert(c_lastanalyzed != 'Never', "Last Analyzed is set to Never")
    soft_assert(
        e_icon_part in details_os_icon,
        "details icon: '{}' not in '{}'".format(e_icon_part, details_os_icon))
    soft_assert(
        e_icon_part in quadicon_os_icon,
        "quad icon: '{}' not in '{}'".format(e_icon_part, details_os_icon))

    if instance.system_type != WINDOWS:
        soft_assert(c_users == e_users,
                    "users: '{}' != '{}'".format(c_users, e_users))
        soft_assert(c_groups == e_groups,
                    "groups: '{}' != '{}'".format(c_groups, e_groups))
        soft_assert(c_packages == e_packages,
                    "packages: '{}' != '{}'".format(c_packages, e_packages))
        soft_assert(c_services == e_services,
                    "services: '{}' != '{}'".format(c_services, e_services))
    else:
        # Make sure windows-specific data is not empty
        c_patches = InfoBlock.text('Security', 'Patches')
        c_applications = InfoBlock.text('Configuration', 'Applications')
        c_win32_services = InfoBlock.text('Configuration', 'Win32 Services')
        c_kernel_drivers = InfoBlock.text('Configuration', 'Kernel Drivers')
        c_fs_drivers = InfoBlock.text('Configuration', 'File System Drivers')

        soft_assert(c_patches != '0', "patches: '{}' != '0'".format(c_patches))
        soft_assert(c_applications != '0',
                    "applications: '{}' != '0'".format(c_applications))
        soft_assert(c_win32_services != '0',
                    "win32 services: '{}' != '0'".format(c_win32_services))
        soft_assert(c_kernel_drivers != '0',
                    "kernel drivers: '{}' != '0'".format(c_kernel_drivers))
        soft_assert(c_fs_drivers != '0',
                    "fs drivers: '{}' != '0'".format(c_fs_drivers))
示例#46
0
 def is_collected():
     metrics_count = self.get_metrics(table=table_name).count()
     logger.info("Current metrics found count is {count}".format(
         count=metrics_count))
     return metrics_count > 0
示例#47
0
def instance(request, local_setup_provider, provider, vm_name,
             vm_analysis_data, appliance):
    """ Fixture to provision instance on the provider """

    vm = VM.factory(vm_name, provider, template_name=vm_analysis_data['image'])
    request.addfinalizer(lambda: cleanup_vm(vm_name, provider))

    provision_data = vm_analysis_data.copy()
    del provision_data['image']
    vm.create_on_provider(find_in_cfme=True, **provision_data)

    if provider.type == "openstack":
        vm.provider.mgmt.assign_floating_ip(vm.name, 'public')

    logger.info("VM %s provisioned, waiting for IP address to be assigned",
                vm_name)

    mgmt_system = provider.get_mgmt_system()

    @wait_for_decorator(timeout="20m", delay=5)
    def get_ip_address():
        logger.info("Power state for {} vm: {}, is_vm_stopped: {}".format(
            vm_name, mgmt_system.vm_status(vm_name),
            mgmt_system.is_vm_stopped(vm_name)))
        if mgmt_system.is_vm_stopped(vm_name):
            mgmt_system.start_vm(vm_name)

        ip = mgmt_system.current_ip_address(vm_name)
        logger.info("Fetched IP for %s: %s", vm_name, ip)
        return ip is not None

    connect_ip = mgmt_system.get_ip_address(vm_name)
    assert connect_ip is not None

    # Check that we can at least get the uptime via ssh this should only be possible
    # if the username and password have been set via the cloud-init script so
    # is a valid check
    if vm_analysis_data['fs-type'] not in ['ntfs', 'fat32']:
        logger.info("Waiting for %s to be available via SSH", connect_ip)
        ssh_client = ssh.SSHClient(hostname=connect_ip,
                                   username=vm_analysis_data['username'],
                                   password=vm_analysis_data['password'],
                                   port=22)
        wait_for(ssh_client.uptime, num_sec=3600, handle_exception=True)
        vm.ssh = ssh_client
    vm.system_type = detect_system_type(vm)
    logger.info("Detected system type: %s", vm.system_type)
    vm.image = vm_analysis_data['image']
    vm.connect_ip = connect_ip

    # TODO:  This is completely wrong and needs to be fixed
    #   CFME relationship is suppose to be set to the appliance, which is required
    #   to be placed within the same datastore that the VM resides
    #
    #   Also, if rhev and iscsi, it need direct_lun
    if provider.type == 'rhevm':
        logger.info("Setting a relationship between VM and appliance")
        from cfme.infrastructure.virtual_machines import Vm
        cfme_rel = Vm.CfmeRelationship(vm)
        server_name = appliance.server_name()
        cfme_rel.set_relationship(str(server_name), configuration.server_id())

    yield vm

    # Close the SSH client if we have one
    if getattr(vm, 'ssh', None):
        vm.ssh.close()
def temp_pod_ansible_appliance(provider, appliance_data, template_tags):
    tags = template_tags
    params = appliance_data.copy()
    project = 'test-pod-ansible-{t}'.format(
        t=fauxfactory.gen_alphanumeric().lower())
    try:
        with ssh.SSHClient(
                hostname=params['openshift_creds']['hostname'],
                username=params['openshift_creds']['ssh']['username'],
                password=params['openshift_creds']['ssh']['password'],
                oc_username=params['openshift_creds']['username'],
                oc_password=params['openshift_creds']['password'],
                project=project,
                is_pod=True) as ssh_client:

            # copying ansible configuration file to openshift server
            fulfilled_config = ansible_config.format(
                host=provider.provider_data['hostname'],
                subdomain=provider.provider_data['base_url'],
                proj=project,
                app_ui_url=tags['cfme-openshift-app-ui']['url'],
                app_ui_tag=tags['cfme-openshift-app-ui']['tag'],
                app_url=tags['cfme-openshift-app']['url'],
                app_tag=tags['cfme-openshift-app']['tag'],
                ansible_url=tags['cfme-openshift-embedded-ansible']['url'],
                ansible_tag=tags['cfme-openshift-embedded-ansible']['tag'],
                httpd_url=tags['cfme-openshift-httpd']['url'],
                httpd_tag=tags['cfme-openshift-httpd']['tag'],
                memcached_url=tags['cfme-openshift-memcached']['url'],
                memcached_tag=tags['cfme-openshift-memcached']['tag'],
                db_url=tags['cfme-openshift-postgresql']['url'],
                db_tag=tags['cfme-openshift-postgresql']['tag'])
            logger.info(
                "ansible config file:\n {conf}".format(conf=fulfilled_config))
            with tempfile.NamedTemporaryFile('w') as f:
                f.write(fulfilled_config)
                f.flush()
                os.fsync(f.fileno())
                remote_file = os.path.join('/tmp', f.name)
                ssh_client.put_file(f.name, remote_file, ensure_host=True)

            # run ansible deployment
            ansible_cmd = ('/usr/bin/ansible-playbook -v -i {inventory_file} '
                           '/usr/share/ansible/openshift-ansible/playbooks/'
                           'openshift-management/config.yml').format(
                               inventory_file=remote_file)
            cmd_result = ssh_client.run_command(ansible_cmd, ensure_host=True)
            logger.info(u"deployment result: {result}".format(
                result=cmd_result.output))
            ssh_client.run_command('rm -f {f}'.format(f=remote_file))

            assert cmd_result.success
            # retrieve data of created appliance
            assert provider.mgmt.is_vm_running(
                project), "Appliance was not deployed correctly"
            params['db_host'] = provider.mgmt.expose_db_ip(project)
            params['project'] = project
            params['hostname'] = provider.mgmt.get_appliance_url(project)
            # create instance of appliance
            with IPAppliance(**params) as appliance:
                # framework will try work with default appliance if browser restarts w/o this
                # workaround
                holder = config.pluginmanager.get_plugin(PLUGIN_KEY)
                holder.held_appliance = appliance
                yield appliance
    finally:
        if provider.mgmt.does_vm_exist(project):
            provider.mgmt.delete_vm(project)
示例#49
0
    def create(self,
               vm_name,
               provider,
               form_values=None,
               cancel=False,
               check_existing=False,
               find_in_cfme=False,
               wait=True):
        """Provisions an vm/instance with the given properties through CFME

        Args:
            vm_name: the vm/instance's name
            provider: provider object
            form_values: dictionary of form values for provisioning, structured into tabs
            cancel: boolean, whether or not to cancel form filling
            check_existing: verify if such vm_name exists
            find_in_cfme: verify that vm was created and appeared in CFME
            wait: wait for vm provision request end

        Note:
            Calling create on a sub-class of instance will generate the properly formatted
            dictionary when the correct fields are supplied.
        """
        vm = self.instantiate(vm_name, provider)
        if check_existing and vm.exists:
            return vm
        if not provider.is_refreshed():
            provider.refresh_provider_relationships()
            wait_for(provider.is_refreshed,
                     func_kwargs={'refresh_delta': 10},
                     timeout=600)
        if not form_values:
            form_values = vm.vm_default_args
        else:
            inst_args = vm.vm_default_args
            form_values = recursive_update(inst_args, form_values)
        env = form_values.get('environment') or {}
        if env.get('automatic_placement'):
            form_values['environment'] = {'automatic_placement': True}
        form_values.update({'provider_name': provider.name})
        if not form_values.get('template_name'):
            template_name = (
                provider.data.get('provisioning').get('image', {}).get('name')
                or provider.data.get('provisioning').get('template'))
            vm.template_name = template_name
            form_values.update({'template_name': template_name})
        view = navigate_to(self, 'Provision')
        view.form.fill(form_values)

        if cancel:
            view.form.cancel_button.click()
            view = self.browser.create_view(BaseLoggedInPage)
            view.flash.assert_success_message(self.ENTITY.PROVISION_CANCEL)
            view.flash.assert_no_error()
        else:
            view.form.submit_button.click()

            view = vm.appliance.browser.create_view(RequestsView)
            wait_for(lambda: view.flash.messages,
                     fail_condition=[],
                     timeout=10,
                     delay=2,
                     message='wait for Flash Success')
            view.flash.assert_no_error()

            if wait:
                request_description = 'Provision from [{}] to [{}]'.format(
                    form_values.get('template_name'), vm.name)
                provision_request = vm.appliance.collections.requests.instantiate(
                    request_description)
                logger.info('Waiting for cfme provision request for vm %s',
                            vm.name)
                provision_request.wait_for_request(method='ui', num_sec=900)
                if provision_request.is_succeeded(method='ui'):
                    logger.info('Waiting for vm %s to appear on provider %s',
                                vm.name, provider.key)
                    wait_for(provider.mgmt.does_vm_exist, [vm.name],
                             handle_exception=True,
                             num_sec=600)
                else:
                    raise Exception(
                        "Provisioning vm {} failed with: {}".format(
                            vm.name, provision_request.row.last_message.text))
        if find_in_cfme:
            vm.wait_to_appear(timeout=800)

        return vm
示例#50
0
def test_ssa_template(request, local_setup_provider, provider, soft_assert,
                      vm_analysis_data, appliance):
    """ Tests SSA can be performed on a template

    Metadata:
        test_flag: vm_analysis
    """

    template_name = vm_analysis_data['image']
    template = Template.factory(template_name, provider, template=True)

    # Set credentials to all hosts set for this datastore
    if provider.type in ['virtualcenter', 'rhevm']:
        datastore_name = vm_analysis_data['datastore']
        datastore_collection = datastore.DatastoreCollection(
            appliance=appliance)
        test_datastore = datastore_collection.instantiate(name=datastore_name,
                                                          provider=provider)
        host_list = cfme_data.get('management_systems',
                                  {})[provider.key].get('hosts', [])
        host_names = [h.name for h in test_datastore.get_hosts()]
        for host_name in host_names:
            test_host = host.Host(name=host_name, provider=provider)
            hosts_data = [x for x in host_list if x.name == host_name]
            if len(hosts_data) > 0:
                host_data = hosts_data[0]

                if not test_host.has_valid_credentials:
                    creds = host.get_credentials_from_config(
                        host_data['credentials'])
                    test_host.update(updates={'credentials': creds},
                                     validate_credentials=True)

    template.smartstate_scan()
    wait_for(lambda: is_vm_analysis_finished(template_name),
             delay=15,
             timeout="35m",
             fail_func=lambda: toolbar.select('Reload'))

    # Check release and quadricon
    quadicon_os_icon = template.find_quadicon().data['os']
    details_os_icon = template.get_detail(properties=('Properties',
                                                      'Operating System'),
                                          icon_href=True)
    logger.info("Icons: {}, {}".format(details_os_icon, quadicon_os_icon))

    # We shouldn't use get_detail anymore - it takes too much time
    c_users = InfoBlock.text('Security', 'Users')
    c_groups = InfoBlock.text('Security', 'Groups')
    c_packages = 0
    if vm_analysis_data['fs-type'] not in ['ntfs', 'fat32']:
        c_packages = InfoBlock.text('Configuration', 'Packages')

    logger.info("SSA shows {} users, {} groups and {} packages".format(
        c_users, c_groups, c_packages))

    if vm_analysis_data['fs-type'] not in ['ntfs', 'fat32']:
        soft_assert(c_users != '0', "users: '{}' != '0'".format(c_users))
        soft_assert(c_groups != '0', "groups: '{}' != '0'".format(c_groups))
        soft_assert(c_packages != '0',
                    "packages: '{}' != '0'".format(c_packages))
    else:
        # Make sure windows-specific data is not empty
        c_patches = InfoBlock.text('Security', 'Patches')
        c_applications = InfoBlock.text('Configuration', 'Applications')
        c_win32_services = InfoBlock.text('Configuration', 'Win32 Services')
        c_kernel_drivers = InfoBlock.text('Configuration', 'Kernel Drivers')
        c_fs_drivers = InfoBlock.text('Configuration', 'File System Drivers')

        soft_assert(c_patches != '0', "patches: '{}' != '0'".format(c_patches))
        soft_assert(c_applications != '0',
                    "applications: '{}' != '0'".format(c_applications))
        soft_assert(c_win32_services != '0',
                    "win32 services: '{}' != '0'".format(c_win32_services))
        soft_assert(c_kernel_drivers != '0',
                    "kernel drivers: '{}' != '0'".format(c_kernel_drivers))
        soft_assert(c_fs_drivers != '0',
                    "fs drivers: '{}' != '0'".format(c_fs_drivers))
def test_workload_capacity_and_utilization_rep(appliance, request, scenario,
                                               setup_perf_provider):
    """Runs through provider based scenarios enabling C&U and replication, run for a set period of
    time. Memory Monitor creates graphs and summary at the end of each scenario.

    Polarion:
        assignee: rhcf3_machine
        initialEstimate: 1/4h
    """
    from_ts = int(time.time() * 1000)
    ssh_client = appliance.ssh_client()

    ssh_master_args = {
        'hostname': scenario['replication_master']['ip_address'],
        'username': scenario['replication_master']['ssh']['username'],
        'password': scenario['replication_master']['ssh']['password']
    }
    master_appliance = IPAppliance(
        hostname=scenario['replication_master']['ip_address'],
        openshift_creds=ssh_master_args)

    ssh_client_master = SSHClient(**ssh_master_args)
    logger.debug('Scenario: {}'.format(scenario['name']))

    is_pglogical = True if scenario['replication'] == 'pglogical' else False

    # Turn off master pglogical replication incase rubyrep scenario follows a pglogical scenario
    appliance.set_pglogical_replication(replication_type=':none')
    # Spawn tail before hand to prevent unncessary waiting on MiqServer starting since applinace
    # under test is cleaned first, followed by master appliance
    sshtail_evm = SSHTail('/var/www/miq/vmdb/log/evm.log')
    sshtail_evm.set_initial_file_end()
    logger.info('Clean appliance under test ({})'.format(ssh_client))
    appliance.clean_appliance()
    logger.info('Clean master appliance ({})'.format(ssh_client_master))
    master_appliance.clean_appliance()  # Clean Replication master appliance

    if is_pglogical:
        scenario_data = {
            'appliance_ip': appliance.hostname,
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (pgLogical)',
            'appliance_roles': ', '.join(roles_cap_and_util_rep),
            'scenario': scenario
        }
    else:
        scenario_data = {
            'appliance_ip': cfme_performance['appliance']['ip_address'],
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (RubyRep)',
            'appliance_roles': ', '.join(roles_cap_and_util_rep),
            'scenario': scenario
        }
    quantifiers = {}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(evm_tail=sshtail_evm,
                                                  poll_interval=2)
    appliance.update_server_roles(
        {role: True
         for role in roles_cap_and_util_rep})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(
        scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])
    appliance.set_cap_and_util_all_via_rails()

    # Configure Replication
    if is_pglogical:
        # Setup appliance under test to :remote
        appliance.set_pglogical_replication(replication_type=':remote')
        # Setup master appliance to :global
        master_appliance.set_pglogical_replication(replication_type=':global')
        # Setup master to subscribe:
        master_appliance.add_pglogical_replication_subscription(
            ssh_client_master, appliance.hostname)
    else:
        # Setup local towards Master
        appliance.set_rubyrep_replication(
            scenario['replication_master']['ip_address'])
        # Force uninstall rubyrep for this region from master (Unsure if still needed)
        # ssh_client.run_rake_command('evm:dbsync:uninstall')
        # time.sleep(30)  # Wait to quiecse
        # Turn on DB Sync role
        appliance.update_server_roles(
            {role: True
             for role in roles_cap_and_util_rep})

    # Variable amount of time for C&U collections/processing
    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    # Turn off replication:
    if is_pglogical:
        appliance.set_pglogical_replication(replication_type=':none')
    else:
        appliance.update_server_roles(
            {role: True
             for role in roles_cap_and_util_rep})

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')
示例#52
0
    def set_retirement_date(self, when=None, offset=None, warn=None):
        """Overriding common method to use widgetastic views/widgets properly

        Args:
            when: :py:class:`datetime.datetime` object, when to retire (date in future)
            offset: :py:class:`dict` with months, weeks, days, hours keys. other keys ignored
            warn: When to warn, fills the select in the form in case the ``when`` is specified.

        Note: this should be moved up to the common VM class when infra+cloud+common are all WT

        If when and offset are both None, this removes retirement date

        Examples:
            # To set a specific retirement date 2 days from today
            two_days_later = datetime.date.today() + datetime.timedelta(days=2)
            vm.set_retirement_date(when=two_days_later)

            # To set a retirement offset 2 weeks from now
            vm.set_retirement_date(offset={weeks=2})

        Offset is dict to remove ambiguity between timedelta/datetime and months/weeks/days/hours
        timedelta supports creation with weeks, but not months
        timedelta supports days attr, but not weeks or months
        timedelta days attr will report a total summary, not the component that was passed to it
        For these reasons timedelta isn't appropriate for offset
        An enhancement to cfme.utils.timeutil extending timedelta would be great for making this a
        bit cleaner
        """
        new_retire = self.appliance.version >= "5.9"
        view = navigate_to(self, 'SetRetirement')
        fill_date = None
        fill_offset = None

        # explicit is/not None use here because of empty strings and dicts

        if when is not None and offset is not None:
            raise ValueError(
                'set_retirement_date takes when or offset, but not both')
        if not new_retire and offset is not None:
            raise ValueError(
                'Offset retirement only available in CFME 59z+ or miq-gaprindashvili'
            )
        if when is not None and not isinstance(when, (datetime, date)):
            raise ValueError('when argument must be a datetime object')

        # due to major differences between the forms and their interaction, I'm splitting this
        # method into two major blocks, one for each version. As a result some patterns will be
        # repeated in both blocks
        # This will allow for making changes to one version or the other without strange
        # interaction in the logic

        # format the date
        # needs 4 digit year for fill
        # displayed 2 digit year for flash message
        if new_retire:
            # 59z/G-release retirement
            if when is not None and offset is None:
                # Specific datetime retire, H+M are 00:00 by default if just date passed
                fill_date = when.strftime('%m/%d/%Y %H:%M')  # 4 digit year
                msg_date = when.strftime(
                    '%m/%d/%y %H:%M UTC')  # two digit year and timestamp
                msg = 'Retirement date set to {}'.format(msg_date)
            elif when is None and offset is None:
                # clearing retirement date with space in textinput,
                # using space here as with empty string calendar input is not cleared correctly
                fill_date = ' '
                msg = 'Retirement date removed'
            elif offset is not None:
                # retirement by offset
                fill_date = None
                fill_offset = {
                    k: v
                    for k, v in offset.items()
                    if k in ['months', 'weeks', 'days', 'hours']
                }
                # hack together an offset
                # timedelta can take weeks, but not months
                # copy and pop, only used to generate message, not used for form fill
                offset_copy = fill_offset.copy()
                if 'months' in offset_copy:
                    new_weeks = offset_copy.get(
                        'weeks', 0) + int(offset_copy.pop('months', 0)) * 4
                    offset_copy.update({'weeks': new_weeks})

                msg_date = datetime.utcnow() + timedelta(**offset_copy)
                msg = 'Retirement date set to {}'.format(
                    msg_date.strftime('%m/%d/%y %H:%M UTC'))
            # TODO move into before_fill when no need to click away from datetime picker
            view.form.fill({
                'retirement_mode':
                'Time Delay from Now'
                if fill_offset else 'Specific Date and Time'
            })
            view.flush_widget_cache(
            )  # since retirement_date is conditional widget
            if fill_date is not None:  # specific check because of empty string
                # two part fill, widget seems to block warn selection when open
                changed_date = view.form.fill(
                    {'retirement_date': {
                        'datetime_select': fill_date
                    }})
                view.title.click()  # close datetime widget
                changed_warn = view.form.fill({'retirement_warning': warn})
                changed = changed_date or changed_warn
            elif fill_offset:
                changed = view.form.fill({
                    'retirement_date': fill_offset,
                    'retirement_warning': warn
                })

        else:
            # 58z/euwe retirement
            if when:
                fill_date = when.strftime('%m/%d/%Y')  # 4 digit year
                msg_date = when.strftime(
                    '%m/%d/%y 00:00 UTC')  # two digit year and default 0 UTC
                msg = 'Retirement date set to {}'.format(msg_date)
            else:
                fill_date = None
                msg = 'Retirement date removed'
            if fill_date:
                changed = view.form.fill({
                    'retirement_date': fill_date,
                    'retirement_warning': warn
                })
            else:
                if view.form.remove_date.is_displayed:
                    view.form.remove_date.click()
                    changed = True
                else:
                    # no date set, nothing to change
                    logger.info(
                        'Retirement date not set, cannot clear, canceling form'
                    )
                    changed = False

        # Form save and flash messages are the same between versions
        if changed:
            view.form.save.click()
        else:
            logger.info(
                'No form changes for setting retirement, clicking cancel')
            view.form.cancel.click()
            msg = 'Set/remove retirement date was cancelled by the user'
        if self.DETAILS_VIEW_CLASS is not None:
            view = self.create_view(self.DETAILS_VIEW_CLASS)
            assert view.is_displayed
        view.flash.assert_success_message(msg)
def cleanup_vms(texts, max_hours=24, providers=None, tags=None, dryrun=True):
    """
    Main method for the cleanup process
    Generates regex match objects
    Checks providers for cleanup boolean in yaml
    Checks provider connectivity (using ping)
    Process Pool for provider scanning
    Each provider process will thread vm scanning and deletion

    Args:
        texts (list): List of regex strings to match with
        max_hours (int): age limit for deletion
        providers (list): List of provider keys to scan and cleanup
        tags (list): List of tags to filter providers by
        dryrun (bool): Whether or not to actually delete VMs or just report
    Returns:
        int: return code, 0 on success, otherwise raises exception
    """
    logger.info(
        'Matching VM names against the following case-insensitive strings: %r',
        texts)
    # Compile regex, strip leading/trailing single quotes from cli arg
    matchers = [re.compile(text.strip("'"), re.IGNORECASE) for text in texts]

    # setup provider filter with cleanup (default), tags, and providers (from cli opts)
    filters = [ProviderFilter(required_fields=[('cleanup', True)])]
    if tags:
        logger.info('Adding required_tags ProviderFilter for: %s', tags)
        filters.append(ProviderFilter(required_tags=tags))
    if providers:
        logger.info('Adding keys ProviderFilter for: %s', providers)
        filters.append(ProviderFilter(keys=providers))

    # Just want keys, use list_providers with no global filters to include disabled.
    with DummyAppliance():
        providers_to_scan = [
            prov.key
            for prov in list_providers(filters, use_global_filters=False)
        ]
    logger.info(
        'Potential providers for cleanup, filtered with given tags and provider keys: \n%s',
        '\n'.join(providers_to_scan))

    # scan providers for vms with name matches
    scan_fail_queue = manager.Queue()
    with Pool(4) as pool:
        deleted_vms = pool.starmap(
            cleanup_provider,
            ((provider_key, matchers, scan_fail_queue, max_hours, dryrun)
             for provider_key in providers_to_scan))

    # flatten deleted_vms list, as its top level is by provider process
    # at same time remove None responses
    deleted_vms = [
        report for prov_list in deleted_vms if prov_list is not None
        for report in prov_list
    ]

    scan_fail_vms = []
    # add the scan failures into deleted vms for reporting sake
    while not scan_fail_queue.empty():
        scan_fail_vms.append(scan_fail_queue.get())

    with open(args.outfile, 'a') as report:
        report.write('## VM/Instances deleted via:\n'
                     '##   text matches: {}\n'
                     '##   age matches: {}\n'.format(texts, max_hours))
        message = tabulate(
            sorted(scan_fail_vms + deleted_vms, key=attrgetter('result')),
            headers=['Provider', 'Name', 'Age', 'Status Before', 'Delete RC'],
            tablefmt='orgtbl')
        report.write(message + '\n')
    logger.info(message)
    return 0
示例#54
0
def main(**kwargs):
    # get_mgmt validates, since it will explode without an existing key or type
    if kwargs.get('deploy'):
        kwargs['configure'] = True
        kwargs['outfile'] = 'appliance_ip_address_1'
        providers = provider_data['management_systems']
        provider_dict = provider_data['management_systems'][kwargs['provider']]
        credentials =\
            {'username': provider_dict['username'],
             'password': provider_dict['password'],
             'tenant': provider_dict['template_upload'].get('tenant_admin', 'admin'),
             'auth_url': provider_dict.get('auth_url'),
             }
        provider = get_mgmt(kwargs['provider'],
                            providers=providers,
                            credentials=credentials)
        flavors = provider_dict['template_upload'].get('flavors',
                                                       ['m1.medium'])
        provider_type = provider_data['management_systems'][
            kwargs['provider']]['type']
        deploy_args = {
            'vm_name': kwargs['vm_name'],
            'template': kwargs['template'],
        }
    else:
        provider = get_mgmt(kwargs['provider'])
        provider_dict = cfme_data['management_systems'][kwargs['provider']]
        provider_type = provider_dict['type']
        flavors = cfme_data['appliance_provisioning']['default_flavors'].get(
            provider_type, [])
        deploy_args = {
            'vm_name': kwargs['vm_name'],
            'template': kwargs['template'],
        }

    logger.info('Connecting to %s', kwargs['provider'])

    if kwargs.get('destroy'):
        # TODO: destroy should be its own script
        # but it's easy enough to just hijack the parser here
        # This returns True if destroy fails to give POSIXy exit codes (0 is good, False is 0, etc)
        return not destroy_vm(provider, deploy_args['vm_name'])

    # Try to snag defaults from cfme_data here for each provider type
    if provider_type == 'rhevm':
        cluster = provider_dict.get('default_cluster', kwargs.get('cluster'))
        if cluster is None:
            raise Exception(
                '--cluster is required for rhev instances and default is not set'
            )
        deploy_args['cluster'] = cluster

        if kwargs.get('place_policy_host') and kwargs.get('place_policy_aff'):
            deploy_args['placement_policy_host'] = kwargs['place_policy_host']
            deploy_args['placement_policy_affinity'] = kwargs[
                'place_policy_aff']
    elif provider_type == 'ec2':
        # ec2 doesn't have an api to list available flavors, so the first flavor is the default
        try:
            # c3.xlarge has 4 CPU cores and 7.5GB RAM - minimal requirements for CFME Appliance
            flavor = kwargs.get('flavor', 'c3.xlarge')
        except IndexError:
            raise Exception(
                '--flavor is required for EC2 instances and default is not set'
            )
        deploy_args['instance_type'] = flavor
        deploy_args['key_name'] = "shared"
        # we want to override default cloud-init which disables root login and password login
        cloud_init_dict = {
            'chpasswd': {
                'expire':
                False,
                'list':
                '{}:{}\n'.format(cred['ssh']['username'],
                                 cred['ssh']['password'])
            },
            'disable_root': False,
            'ssh_pwauth': True
        }
        cloud_init = "#cloud-config\n{}".format(
            yaml.safe_dump(cloud_init_dict, default_flow_style=False))
        deploy_args['user_data'] = cloud_init
    elif provider_type == 'openstack':
        # filter openstack flavors based on what's available
        available_flavors = provider.list_flavor()
        logger.info("Available flavors on provider: %s", available_flavors)
        generic_flavors = filter(lambda f: f in available_flavors, flavors)

        try:
            flavor = (kwargs.get('flavor')
                      or provider_dict.get('sprout', {}).get('flavor_name')
                      or generic_flavors[0])
        except IndexError:
            raise Exception('--flavor is required for RHOS instances and '
                            'default is not set or unavailable on provider')
        logger.info('Selected flavor: %s', flavor)

        deploy_args['flavor_name'] = flavor

        if 'network' in provider_dict:
            # support rhos4 network names
            deploy_args['network_name'] = provider_dict['network']

        provider_pools = [
            p.name for p in provider.api.floating_ip_pools.list()
        ]
        try:
            # TODO: If there are multiple pools, have a provider default in cfme_data
            floating_ip_pool = kwargs.get(
                'floating_ip_pool') or provider_pools[0]
        except IndexError:
            raise Exception('No floating IP pools available on provider')

        if floating_ip_pool is not None:
            deploy_args['floating_ip_pool'] = floating_ip_pool
    elif provider_type == "virtualcenter":
        if "allowed_datastores" in provider_dict:
            deploy_args["allowed_datastores"] = provider_dict[
                "allowed_datastores"]
    elif provider_type == 'scvmm':
        deploy_args["host_group"] = provider_dict["provisioning"]['host_group']
    elif provider_type == 'gce':
        deploy_args['ssh_key'] = '{user_name}:{public_key}'.format(
            user_name=cred['ssh']['ssh-user'],
            public_key=cred['ssh']['public_key'])
    elif provider_type == 'openshift':
        trackerbot = api()
        raw_tags = trackerbot.providertemplate().get(
            provider=kwargs['provider'],
            template=deploy_args['template'])['objects']
        raw_tags = raw_tags[-1]['template'].get('custom_data', "{}")
        deploy_args["tags"] = yaml.safe_load(raw_tags)['TAGS']
    # Do it!
    try:
        logger.info('Cloning %s to %s on %s', deploy_args['template'],
                    deploy_args['vm_name'], kwargs['provider'])
        # TODO: change after openshift wrapanapi refactor
        output = None  # 'output' is only used for openshift providers
        if isinstance(provider, Openshift):
            output = provider.deploy_template(**deploy_args)
        else:
            template = provider.get_template(deploy_args['template'])
            template.deploy(**deploy_args)

    except Exception as e:
        logger.exception(e)
        logger.error('template deploy failed')
        if kwargs.get('cleanup'):
            logger.info('attempting to destroy %s', deploy_args['vm_name'])
            destroy_vm(provider, deploy_args['vm_name'])
        return 12

    if not provider.does_vm_exist(deploy_args['vm_name']):
        logger.error('provider.deploy_template failed without exception')
        return 12

    # TODO: change after openshift wrapanapi refactor
    if isinstance(provider, Openshift):
        if provider.is_vm_running(deploy_args['vm_name']):
            logger.info('VM %s is running', deploy_args['vm_name'])
        else:
            logger.error('VM %s is not running', deploy_args['vm_name'])
            return 10
    else:
        vm = provider.get_vm(deploy_args['vm_name'])
        vm.ensure_state(VmState.RUNNING, timeout='5m')
        if provider_type == 'gce':
            try:
                attach_gce_disk(vm)
            except Exception:
                logger.exception("Failed to attach db disk")
                destroy_vm(provider, deploy_args['vm_name'])
                return 10

    if provider_type == 'openshift':
        ip = output['url']
    else:
        try:
            ip, _ = wait_for(lambda: vm.ip, num_sec=1200, fail_condition=None)
            logger.info('IP Address returned is %s', ip)
        except Exception as e:
            logger.exception(e)
            logger.error('IP address not returned')
            return 10

    try:
        if kwargs.get('configure'):
            logger.info('Configuring appliance, this can take a while.')
            if kwargs.get('deploy'):
                app = IPAppliance(hostname=ip)
            else:
                app_args = (kwargs['provider'], deploy_args['vm_name'])
                app_kwargs = {}
                if provider_type == 'openshift':
                    ocp_creds = cred[provider_dict['credentials']]
                    ssh_creds = cred[provider_dict['ssh_creds']]
                    app_kwargs = {
                        'project': output['project'],
                        'db_host': output['external_ip'],
                        'container': 'cloudforms-0',
                        'hostname': ip,
                        'openshift_creds': {
                            'hostname': provider_dict['hostname'],
                            'username': ocp_creds['username'],
                            'password': ocp_creds['password'],
                            'ssh': {
                                'username': ssh_creds['username'],
                                'password': ssh_creds['password'],
                            },
                        }
                    }
                app = Appliance.from_provider(*app_args, **app_kwargs)

            if provider_type == 'ec2':
                wait_for(cloud_init_done,
                         func_args=[app],
                         num_sec=600,
                         handle_exception=True,
                         delay=5)
            if provider_type == 'gce':
                app.configure_gce()
            elif provider_type == 'openshift':
                # openshift appliances don't need any additional configuration
                pass
            else:
                app.configure()
            logger.info('Successfully Configured the appliance.')
    except Exception as e:
        logger.exception(e)
        logger.error('Appliance Configuration Failed')
        if not kwargs.get('deploy'):
            app = Appliance.from_provider(kwargs['provider'],
                                          deploy_args['vm_name'])
            ssh_client = app.ssh_client()
            result = ssh_client.run_command('find /root/anaconda-post.log')
            if result.success:
                ssh_client.get_file('/root/anaconda-post.log',
                                    log_path.join('anaconda-post.log').strpath)
            ssh_client.close()
        destroy_vm(app.provider, deploy_args['vm_name'])
        return 10

    if kwargs.get('outfile') or kwargs.get('deploy'):
        # todo: to get rid of those scripts in jenkins or develop them from scratch
        with open(kwargs['outfile'], 'w') as outfile:
            if provider_type == 'openshift':
                output_data = {
                    'appliances': [
                        {
                            'project': output['project'],
                            'db_host': output['external_ip'],
                            'hostname': ip,
                            'container': 'cloudforms-0',
                            'openshift_creds': {
                                'hostname': provider_dict['hostname'],
                                'username': ocp_creds['username'],
                                'password': ocp_creds['password'],
                                'ssh': {
                                    'username': ssh_creds['username'],
                                    'password': ssh_creds['password'],
                                }
                            },
                        },
                    ],
                }
            else:
                output_data = {'appliances': [{'hostname': ip}]}
            yaml_data = yaml.safe_dump(output_data, default_flow_style=False)
            outfile.write(yaml_data)

        # In addition to the outfile, drop the ip address on stdout for easy parsing
        print(yaml_data)
示例#55
0
 def get_tabel_data(widget):
     ret = [row.name.text for row in widget.contents]
     logger.info("Widget text data:{%s}" % ret)
     return ret
def cleanup_provider(provider_key, matchers, scan_failure_queue, max_hours,
                     dryrun):
    """
    Process the VMs on a given provider, comparing name and creation time.
    Use thread pools to scan vms, then to delete vms in batches

    Args:
        provider_key (string): the provider key from yaml
        matchers (list): A list of regex objects with match() method
        scan_failure_queue (Queue.Queue): MP queue to hold vms that we could not compare age
    Returns:
        None: if there aren't any old vms to delete
        List of VMReport tuples
    """
    logger.info('%r: Start scan for vm text matches', provider_key)
    try:
        vm_list = get_mgmt(provider_key).list_vms()
    except Exception:  # noqa
        scan_failure_queue.put(VmReport(provider_key, FAIL, NULL, NULL, NULL))
        logger.exception('%r: Exception listing vms', provider_key)
        return

    text_matched_vms = [vm for vm in vm_list if match(matchers, vm.name)]

    logger.info(
        '%r: NOT matching text filters: %r', provider_key,
        set([v.name
             for v in vm_list]) - set([v.name for v in text_matched_vms]))
    logger.info('%r: MATCHED text filters: %r', provider_key,
                [vm.name for vm in text_matched_vms])

    if not text_matched_vms:
        return

    with ThreadPool(4) as tp:
        scan_args = ((provider_key, vm, timedelta(hours=int(max_hours)),
                      scan_failure_queue) for vm in text_matched_vms)
        old_vms = [
            vm for vm in tp.starmap(scan_vm, scan_args) if vm is not None
        ]

    if old_vms and dryrun:
        logger.warning(
            'DRY RUN: Would have deleted the following VMs on provider %s: \n %s',
            provider_key, [(vm[0].name, vm[1], vm[2]) for vm in old_vms])
        # for tabulate consistency on dry runs. 0=vm, 1=age, 2=status
        return [
            VmReport(provider_key, vm[0].name, vm[1], vm[2], NULL)
            for vm in old_vms
        ]

    elif old_vms:
        with ThreadPool(4) as tp:
            delete_args = (
                (
                    provider_key,
                    old_tuple[0],  # vm
                    old_tuple[1])  # age
                for old_tuple in old_vms)
            delete_results = tp.starmap(delete_vm, delete_args)

            return delete_results
def _test_vm_removal():
    logger.info("Testing for VM removal permission")
    vm_name = vms.get_first_vm()
    logger.debug("VM {} selected".format(vm_name))
    vms.remove(vm_name, cancel=True)
示例#58
0
def resource_usage(vm_ownership, appliance, provider):
    # Retrieve resource usage values from metric_rollups table.
    cpu_used_in_mhz = 0
    memory_used_in_mb = 0
    network_io = 0
    disk_io = 0
    storage_used = 0

    vm_name = provider.data['cap_and_util']['chargeback_vm']
    metrics = appliance.db.client['metrics']
    rollups = appliance.db.client['metric_rollups']
    ems = appliance.db.client['ext_management_systems']
    logger.info('Deleting METRICS DATA from metrics and metric_rollups tables')

    appliance.db.client.session.query(metrics).delete()
    appliance.db.client.session.query(rollups).delete()

    # Metering reports are done on hourly and daily rollup values and not real-time values.So, we
    # are capturing C&U data and forcing hourly rollups by running these commands through
    # the Rails console.
    #
    # Metering reports differ from Chargeback reports in that Metering reports 1)report only
    # resource usage and not costs and 2)sum total of resource usage is reported instead of
    # the average usage.For eg:If we have 24 hourly rollups, resource usage in a Metering report
    # is the sum of these 24 rollups, whereas resource usage in a Chargeback report is the
    # average of these 24 rollups. So, we need data from at least 2 hours in order to validate that
    # the resource usage is actually being summed up.

    def verify_records_metrics_table(appliance, provider, vm_name):
        # Verify that rollups are present in the metric_rollups table.

        ems = appliance.db.client['ext_management_systems']
        metrics = appliance.db.client['metrics']

        # Capture real-time C&U data
        ret = appliance.ssh_client.run_rails_command(
            "\"vm = Vm.where(:ems_id => {}).where(:name => {})[0];\
            vm.perf_capture('realtime', 2.hour.ago.utc, Time.now.utc)\""
            .format(provider.id, repr(vm_name)))
        assert ret.success, f"Failed to capture VM C&U data:"

        with appliance.db.client.transaction:
            result = (
                appliance.db.client.session.query(metrics.id)
                .join(ems, metrics.parent_ems_id == ems.id)
                .filter(metrics.capture_interval_name == 'realtime',
                        metrics.resource_name == vm_name,
                        ems.name == provider.name, metrics.timestamp >= date.today())
            )

        for record in appliance.db.client.session.query(metrics).filter(
                metrics.id.in_(result.subquery())):
            if (record.cpu_usagemhz_rate_average or
               record.cpu_usage_rate_average or
               record.derived_memory_used or
               record.net_usage_rate_average or
               record.disk_usage_rate_average):
                return True
        return False

    wait_for(verify_records_metrics_table, [appliance, provider, vm_name], timeout=600,
        fail_condition=False, message='Waiting for VM real-time data')

    # New C&U data may sneak in since 1)C&U server roles are running and 2)collection for clusters
    # and hosts is on.This would mess up our calculations, so we are disabling C&U
    # collection after data has been fetched for the last two hours.

    appliance.server.settings.disable_server_roles(
        'ems_metrics_coordinator', 'ems_metrics_collector')
    # Perfrom rollup of C&U data.
    ret = appliance.ssh_client.run_rails_command(
        "\"vm = Vm.where(:ems_id => {}).where(:name => {})[0];\
        vm.perf_rollup_range(2.hour.ago.utc, Time.now.utc,'realtime')\"".
        format(provider.id, repr(vm_name)))
    assert ret.success, f"Failed to rollup VM C&U data:"

    wait_for(verify_records_rollups_table, [appliance, provider, vm_name], timeout=600,
        fail_condition=False, message='Waiting for hourly rollups')

    # Since we are collecting C&U data for > 1 hour, there will be multiple hourly records per VM
    # in the metric_rollups DB table.The values from these hourly records are summed up.

    with appliance.db.client.transaction:
        result = (
            appliance.db.client.session.query(rollups.id)
            .join(ems, rollups.parent_ems_id == ems.id)
            .filter(rollups.capture_interval_name == 'hourly',
                    rollups.resource_name == vm_name,
                    ems.name == provider.name, rollups.timestamp >= date.today())
        )

    for record in appliance.db.client.session.query(rollups).filter(
            rollups.id.in_(result.subquery())):
        cpu_used_in_mhz = cpu_used_in_mhz + record.cpu_usagemhz_rate_average
        memory_used_in_mb = memory_used_in_mb + record.derived_memory_used
        network_io = network_io + record.net_usage_rate_average
        disk_io = disk_io + record.disk_usage_rate_average
        storage_used = storage_used + record.derived_vm_used_disk_storage

    # Convert storage used in Bytes to GB
    storage_used = storage_used * math.pow(2, -30)

    return {"cpu_used": cpu_used_in_mhz,
            "memory_used": memory_used_in_mb,
            "network_io": network_io,
            "disk_io_used": disk_io,
            "storage_used": storage_used}
def _test_vm_provision(appliance):
    logger.info("Checking for provision access")
    navigate_to(vms.Vm, 'VMsOnly')
    vms.lcl_btn("Provision VMs")
示例#60
0
def test_replication_global_region_dashboard(request, setup_replication):
    """
    Global dashboard show remote data

    Polarion:
        assignee: dgaikwad
        casecomponent: Replication
        initialEstimate: 1/4h
        testSteps:
            1. Have a VM created in the provider in the Remote region which is
               subscribed to Global.
            2. Check the dashboard on the Global shows data from the Remote region.
        expectedResults:
            1.
            2. Dashboard on the Global displays data from the Remote region
    """
    remote_app, global_app = setup_replication
    remote_provider = provider_app_crud(InfraProvider, remote_app)
    remote_provider.setup()
    assert remote_provider.name in remote_app.managed_provider_names, "Provider is not available."

    new_vm_name = fauxfactory.gen_alphanumeric(start="test_rep_dashboard",
                                               length=25).lower()
    vm = create_vm(provider=remote_provider, vm_name=new_vm_name)
    request.addfinalizer(vm.cleanup_on_provider)
    data_items = ('EVM: Recently Discovered Hosts',
                  'EVM: Recently Discovered VMs', 'Top Storage Consumers')
    remote_app_data, global_app_data = {}, {}

    def get_tabel_data(widget):
        ret = [row.name.text for row in widget.contents]
        logger.info("Widget text data:{%s}" % ret)
        return ret

    def data_check(view, table):
        return bool(
            get_tabel_data(
                view.dashboards("Default Dashboard").widgets(table)))

    view = navigate_to(remote_app.server, "Dashboard")
    for table_name in data_items:
        logger.info("Table name:{%s}" % table_name)
        wait_for(
            data_check,
            func_args=[view, table_name],
            delay=20,
            num_sec=600,
            fail_func=view.dashboards("Default Dashboard").browser.refresh,
            message=f"Waiting for table data item: {table_name} ")
        remote_app_data[table_name] = get_tabel_data(
            view.dashboards("Default Dashboard").widgets(table_name))

    view = navigate_to(global_app.server, "Dashboard")
    for table_name in data_items:
        logger.info("Table name:{%s}" % table_name)
        wait_for(
            data_check,
            func_args=[view, table_name],
            delay=20,
            num_sec=600,
            fail_func=view.dashboards("Default Dashboard").browser.refresh,
            message=f"Waiting for table data item: {table_name}")

        global_app_data[table_name] = get_tabel_data(
            view.dashboards("Default Dashboard").widgets(table_name))

    # TODO(ndhandre): Widget not implemented so some widget not checking in this test case they are
    #  'Vendor and Guest OS Chart', 'Top Memory Consumers (weekly)', 'Top CPU Consumers (weekly)',
    #  'Virtual Infrastructure Platforms', 'Guest OS Information'

    assert are_dicts_same(
        remote_app_data, global_app_data), "Dashboard is not same of both app."