Esempio n. 1
0
 def is_successfully_finished(self, silent_failure=False, *tasks):
     view = navigate_to(self, self.tab)
     tab_view = getattr(view.tabs, self.tab.lower())
     rows = []
     # expected_status support also regular expression pattern
     expected_status = re.compile('finished', re.IGNORECASE)
     for task in tasks:
         try:
             rows.append(list(tab_view.table.rows(task_name=task, state=expected_status)).pop())
         except IndexError:
             logger.warn('IndexError exception suppressed when searching for task row,'
                         ' no match found.')
             return False
     for row in rows:
         message = row.message.text.lower()
         if row[1].browser.is_displayed('i[@class="pficon pficon-error-circle-o"]',
                                        parent=row[1]):
             if silent_failure:
                 logger.warning("Task {} error: {}".format(row.task_name.text, message))
                 return False
             elif 'timed out' in message:
                 raise TimedOutError("Task {} timed out: {}".format(row.task_name.text, message))
             else:
                 Exception("Task {} error: {}".format(row.task_name.text, message))
     return True
Esempio n. 2
0
 def hostname(self, value):
     if self.default_endpoint:
         if value:
             self.default_endpoint.hostname = value
     else:
         logger.warn(
             "can't set hostname because default endpoint is absent")
Esempio n. 3
0
 def add_disk_to_vm(self):
     """Add a disk with specs from cfme_data.template_upload
         Generally for database disk
     """
     temp_vm = self.mgmt.get_vm(self.temp_vm_name)
     if temp_vm.get_disks_count() > 1:
         logger.warn(
             '%s Warning: found more than one disk in existing VM (%s).',
             self.provider_key, self.temp_vm_name)
         return
     rhevm_specs = cfme_data.template_upload.template_upload_rhevm
     disk_kwargs = dict(
         storage_domain=self.provider_data.template_upload.sdomain,
         size=rhevm_specs.disk_size,
         interface=rhevm_specs.disk_interface,
         format=rhevm_specs.disk_format)
     temp_vm.add_disk(**disk_kwargs)
     # check, if there are two disks
     if temp_vm.get_disks_count() < 2:
         raise TemplateUploadException(
             '%s disk failed to add with specs: %r', self.provider_key,
             disk_kwargs)
     logger.info('%s:%s Successfully added disk', self.provider_key,
                 self.temp_vm_name)
     return True
Esempio n. 4
0
    def create_on_provider(self,
                           timeout=900,
                           find_in_cfme=False,
                           delete_on_failure=True,
                           **kwargs):
        """Create the VM on the provider via MgmtSystem. `deploy_template` handles errors during
        VM provision on MgmtSystem sideNS deletes VM if provisioned incorrectly

        Args:
            timeout: Number of seconds to wait for the VM to appear in CFME
                     Will not wait at all, if set to 0 (Defaults to ``900``)
            find_in_cfme: Verifies that VM exists in CFME UI
            delete_on_failure: Attempts to remove VM on UI navigation failure
        """
        vm = deploy_template(self.provider.key, self.name, self.template_name,
                             **kwargs)
        try:
            if find_in_cfme:
                self.wait_to_appear(timeout=timeout, load_details=False)
        except Exception as e:
            logger.warn("Couldn't find VM or Instance '%s' in CFME", self.name)
            if delete_on_failure:
                logger.info("Removing VM or Instance from mgmt system")
                self.cleanup_on_provider()
            raise e
        return vm
Esempio n. 5
0
    def from_config(cls):
        bz_conf = env.get('bugzilla', {})  # default empty so we can call .get() later
        url = bz_conf.get('url')
        if url is None:
            url = 'https://bugzilla.redhat.com/xmlrpc.cgi'
            logger.warning("No Bugzilla URL specified in conf, using default: %s", url)
        cred_key = bz_conf.get("credentials")
        bz_kwargs = dict(
            url=url,
            cookiefile=None,
            tokenfile=None,
            product=bz_conf.get("bugzilla", {}).get("product"),
            config_options=bz_conf)
        if cred_key:
            bz_creds = credentials.get(cred_key, {})
            if bz_creds.get('username'):
                logger.info('Using username/password for Bugzilla authentication')
                bz_kwargs.update(dict(
                    user=bz_creds.get("username"),
                    password=bz_creds.get("password")
                ))
            elif bz_creds.get('api_key'):
                logger.info('Using api key for Bugzilla authentication')
                bz_kwargs.update(dict(api_key=bz_creds.get('api_key')))
            else:
                logger.error('Credentials key for bugzilla does not have username or api key')
        else:
            logger.warn('No credentials found for bugzilla')

        return cls(**bz_kwargs)
Esempio n. 6
0
def is_task_finished(destination, task_name, expected_status, clear_tasks_after_success=True):
    view = navigate_to(Tasks, destination)
    tab_view = getattr(view.tabs, destination.lower())

    # task_name change from str to support also regular expression pattern
    task_name = re.compile(task_name)
    # expected_status change from str to support also regular expression pattern
    expected_status = re.compile(expected_status, re.IGNORECASE)

    try:
        row = tab_view.table.row(task_name=task_name, state=expected_status)
    except IndexError:
        logger.warn('IndexError exception suppressed when searching for task row, no match found.')
        return False

    # throw exception if error in message
    message = row.message.text.lower()
    if 'error' in message:
        raise Exception("Task {} error: {}".format(task_name, message))
    elif 'timed out' in message:
        raise TimedOutError("Task {} timed out: {}".format(task_name, message))
    elif 'failed' in message:
        raise Exception("Task {} has a failure: {}".format(task_name, message))

    if clear_tasks_after_success:
        # Remove all finished tasks so they wouldn't poison other tests
        delete_all_tasks(destination)

    return True
Esempio n. 7
0
def is_task_finished(destination,
                     task_name,
                     expected_status,
                     clear_tasks_after_success=True):
    view = navigate_to(Tasks, destination)
    tab_view = getattr(view.tabs, destination.lower())
    try:
        row = tab_view.table.row(task_name=task_name, state=expected_status)
    except IndexError:
        logger.warn(
            'IndexError exception suppressed when searching for task row, no match found.'
        )
        return False

    # throw exception if error in message
    message = row.message.text.lower()
    if 'error' in message:
        raise Exception("Task {} error: {}".format(task_name, message))
    elif 'timed out' in message:
        raise TimedOutError("Task {} timed out: {}".format(task_name, message))
    elif 'failed' in message:
        raise Exception("Task {} has a failure: {}".format(task_name, message))

    if clear_tasks_after_success:
        # Remove all finished tasks so they wouldn't poison other tests
        delete_all_tasks(destination)

    return True
Esempio n. 8
0
 def ip_address(self, value):
     if self.default_endpoint:
         if value:
             self.default_endpoint.ipaddress = value
     else:
         logger.warn(
             "can't set ipaddress because default endpoint is absent")
Esempio n. 9
0
 def is_successfully_finished(self, silent_failure=False, *tasks):
     view = navigate_to(self, self.tab)
     tab_view = getattr(view.tabs, self.tab.lower())
     rows = []
     # expected_status support also regular expression pattern
     expected_status = re.compile('finished', re.IGNORECASE)
     for task in tasks:
         try:
             rows.append(
                 list(
                     tab_view.table.rows(task_name=task,
                                         state=expected_status)).pop())
         except IndexError:
             logger.warn(
                 'IndexError exception suppressed when searching for task row,'
                 ' no match found.')
             return False
     for row in rows:
         message = row.message.text.lower()
         if row[1].browser.is_displayed(
                 'i[@class="pficon pficon-error-circle-o"]', parent=row[1]):
             if silent_failure:
                 logger.warning("Task {} error: {}".format(
                     row.task_name.text, message))
                 return False
             elif 'timed out' in message:
                 raise TimedOutError("Task {} timed out: {}".format(
                     row.task_name.text, message))
             else:
                 Exception("Task {} error: {}".format(
                     row.task_name.text, message))
     return True
Esempio n. 10
0
    def _delete_vm(self):
        if self.inst.provider.mgmt.does_vm_exist(self.inst.name):
            navigate_to(self.inst, 'Details')
            logger.info('%r will be deleted', self.inst.name)
            return self.inst.provider.mgmt.delete_vm(self.inst.name)

        else:
            logger.warn('%r is already not existing!', self.inst.name)
    def _delete_vm(self):
        if self.inst.provider.mgmt.does_vm_exist(self.inst.name):
            navigate_to(self.inst, 'Details')
            logger.info('%r will be deleted', self.inst.name)
            return self.inst.provider.mgmt.delete_vm(self.inst.name)

        else:
            logger.warn('%r is already not existing!', self.inst.name)
Esempio n. 12
0
    def _check_timelines(self, target):
        """Navigate to the TL of the given target, select the category of the event and verify
        that the tl_event of the VMEvent is present. if will return the length of the array
        containing  the events found in that timeline.


        Args:
            target: A entity where a Timeline is present ( VM, Host, Cluster...)

        Returns:
             The length of the array containing the event found on the Timeline of the target.
        """
        timelines_view = navigate_to(target, 'Timelines')

        if isinstance(timelines_view, ServerDiagnosticsView):
            timelines_view = timelines_view.timelines
            timeline_filter = timelines_view.filter
        else:
            timeline_filter = timelines_view.filter

        for selected_option in timeline_filter.event_category.all_selected_options:
            timeline_filter.event_category.select_by_visible_text(
                selected_option)

        timeline_filter.event_category.select_by_visible_text(self.tl_category)
        timeline_filter.time_position.select_by_visible_text('centered')
        timeline_filter.apply.click()
        events_list = timelines_view.chart.get_events(self.tl_category)
        logger.debug('events_list: ', events_list)
        logger.info('Searching for event type: %r in timeline category: %r',
                    self.event, self.tl_category)

        if not len(events_list):
            self.vm.provider.refresh_provider_relationships()
            logger.warn('Event list of %r is empty!', target)

        found_events = []

        for evt in events_list:
            if hasattr(evt, 'destination_vm'):
                #  Specially for the VmDeployedEvent where the source_vm defers from the
                # self.vm.name
                if evt.destination_vm == self.vm.name and evt.event_type in self.tl_event:
                    found_events.append(evt)
                    break
            elif not hasattr(evt, 'source_vm') or not hasattr(
                    evt, 'source_host'):
                logger.warn(
                    'Event %r does not have source_vm, source_host. Probably an issue',
                    evt)
            elif evt.source_vm == self.vm.name and evt.event_type in self.tl_event:
                found_events.append(evt)
                break

        logger.info('found events on {tgt}: {evt}'.format(
            tgt=target, evt="\n".join([repr(e) for e in found_events])))
        return len(found_events)
Esempio n. 13
0
def check_tasks_have_no_errors(task_name,
                               task_type,
                               expected_num_of_tasks,
                               silent_failure=False,
                               clear_tasks_after_success=False):
    """ Check if all tasks analysis match state with no errors"""

    tabs_data = TABS_DATA_PER_PROVIDER[task_type]
    destination = tabs_data['tab']
    task_name = tabs_data['task'].format(task_name)
    expected_status = tabs_data['state']

    view = navigate_to(Tasks, destination)
    tab_view = getattr(view.tabs, destination.lower())

    # task_name change from str to support also regular expression pattern
    task_name = re.compile(task_name)
    # expected_status change from str to support also regular expression pattern
    expected_status = re.compile(expected_status, re.IGNORECASE)

    try:
        rows = list(
            tab_view.table.rows(task_name=task_name, state=expected_status))
    except IndexError:
        logger.warn(
            'IndexError exception suppressed when searching for task row, no match found.'
        )
        return False

    # check state for all tasks
    if expected_num_of_tasks != len(rows):
        logger.warn('There is no match between expected number of tasks "{}",'
                    ' and number of tasks on state "{}'.format(
                        expected_num_of_tasks, expected_status))
        return False

    # throw exception if error in message
    for row in rows:
        message = row.message.text.lower()
        for term in ('error', 'timed out', 'failed', 'unable to run openscap'):
            if term in message:
                if silent_failure:
                    logger.warning("Task {} error: {}".format(
                        row.task_name.text, message))
                    return False
                elif term == 'timed out':
                    raise TimedOutError("Task {} timed out: {}".format(
                        row.task_name.text, message))
                else:
                    raise TaskFailedException(task_name=row.task_name.text,
                                              message=message)

    if clear_tasks_after_success:
        # Remove all finished tasks so they wouldn't poison other tests
        delete_all_tasks(destination)

    return True
Esempio n. 14
0
def check_tasks_have_no_errors(task_name, task_type, expected_num_of_tasks, silent_failure=False,
                               clear_tasks_after_success=False):
    """ Check if all tasks analysis match state with no errors"""

    tabs_data = TABS_DATA_PER_PROVIDER[task_type]
    destination = tabs_data['tab']
    task_name = tabs_data['task'].format(task_name)
    expected_status = tabs_data['state']

    view = navigate_to(Tasks, destination)
    tab_view = getattr(view.tabs, destination.lower())

    # task_name change from str to support also regular expression pattern
    task_name = re.compile(task_name)
    # expected_status change from str to support also regular expression pattern
    expected_status = re.compile(expected_status, re.IGNORECASE)

    wait_for(
        lambda: tab_view.table.is_displayed,
        timeout=10,
        delay=2,
        fail_func=view.reload.click
    )
    try:
        rows = list(tab_view.table.rows(task_name=task_name, state=expected_status))
    except IndexError:
        logger.warn('IndexError exception suppressed when searching for task row, no match found.')
        return False

    # check state for all tasks
    if expected_num_of_tasks != len(rows):
        logger.warn('There is no match between expected number of tasks "{}",'
                    ' and number of tasks on state "{}'.format(expected_num_of_tasks,
                                                               expected_status))
        return False

    # throw exception if error in message
    for row in rows:
        message = row.message.text.lower()
        for term in ('error', 'timed out', 'failed', 'unable to run openscap'):
            if term in message:
                if silent_failure:
                    logger.warning("Task {} error: {}".format(row.task_name.text, message))
                    return False
                elif term == 'timed out':
                    raise TimedOutError("Task {} timed out: {}".format(row.task_name.text, message))
                else:
                    raise TaskFailedException(task_name=row.task_name.text, message=message)

    if clear_tasks_after_success:
        # Remove all finished tasks so they wouldn't poison other tests
        delete_all_tasks(destination)

    return True
Esempio n. 15
0
    def _check_timelines(self, target):
        """Navigate to the TL of the given target, select the category of the event and verify
        that the tl_event of the VMEvent is present. if will return the length of the array
        containing  the events found in that timeline.


        Args:
            target: A entity where a Timeline is present ( VM, Host, Cluster...)

        Returns:
             The length of the array containing the event found on the Timeline of the target.
        """
        timelines_view = navigate_to(target, 'Timelines')

        if isinstance(timelines_view, ServerDiagnosticsView):
            timelines_view = timelines_view.timelines
            timeline_filter = timelines_view.filter
        else:
            timeline_filter = timelines_view.filter

        for selected_option in timeline_filter.event_category.all_selected_options:
            timeline_filter.event_category.select_by_visible_text(selected_option)

        timeline_filter.event_category.select_by_visible_text(self.tl_category)
        timeline_filter.time_position.select_by_visible_text('centered')
        timeline_filter.apply.click()
        events_list = timelines_view.chart.get_events(self.tl_category)
        logger.debug('events_list: ', events_list)
        logger.info('Searching for event type: %r in timeline category: %r', self.event,
                    self.tl_category)

        if not len(events_list):
            self.vm.provider.refresh_provider_relationships()
            logger.warn('Event list of %r is empty!', target)

        found_events = []

        for evt in events_list:
            if hasattr(evt, 'destination_vm'):
                #  Specially for the VmDeployedEvent where the source_vm defers from the
                # self.vm.name
                if evt.destination_vm == self.vm.name and evt.event_type in self.tl_event:
                    found_events.append(evt)
                    break
            elif not hasattr(evt, 'source_vm') or not hasattr(evt, 'source_host'):
                logger.warn('Event %r does not have source_vm, source_host. Probably an issue', evt)
            elif evt.source_vm == self.vm.name and evt.event_type in self.tl_event:
                found_events.append(evt)
                break

        logger.info('found events on {tgt}: {evt}'.format(tgt=target, evt="\n".join([repr(e) for e
                                                                                    in
                                                                    found_events])))
        return len(found_events)
Esempio n. 16
0
def count_events(target, vm):
    timelines_view = navigate_to(target, 'Timelines')
    if isinstance(target, Server):
        timelines_view = timelines_view.timelines
    timelines_view.filter.time_position.select_by_visible_text('centered')
    timelines_view.filter.apply.click()
    found_events = []
    for evt in timelines_view.chart.get_events():
        if not hasattr(evt, 'source_vm'):
            # BZ(1428797)
            logger.warn("event {evt} doesn't have source_vm field. Probably issue".format(evt=evt))
            continue
        elif evt.source_vm == vm.name:
            found_events.append(evt)

    logger.info("found events: {evt}".format(evt="\n".join([repr(e) for e in found_events])))
    return len(found_events)
Esempio n. 17
0
def all_tasks_match_status(destination, task_name, expected_status, expected_num_of_tasks):
    """ Check if all tasks with same task name states are finished - if not, reload page"""
    view = navigate_to(Tasks, destination)
    tab_view = getattr(view.tabs, destination.lower())

    # task_name change from str to support also regular expression pattern
    task_name = re.compile(task_name)
    # expected_status change from str to support also regular expression pattern
    expected_status = re.compile(expected_status, re.IGNORECASE)

    try:
        rows = list(tab_view.table.rows(task_name=task_name, state=expected_status))
    except IndexError:
        logger.warn('IndexError exception suppressed when searching for task row, no match found.')
        return False

    # check state = finished for all tasks
    return expected_num_of_tasks == len(rows), len(rows)
Esempio n. 18
0
def test_aws_smartstate_pod(temp_ssa_pod_appliance, ssa_vm, provider,
                            aws_provider):
    """
    deploy aws smartstate pod and that it works

    Polarion:
        assignee: izapolsk
        casecomponent: Containers
        caseimportance: medium
        initialEstimate: 1h
        testSteps:
          1. pull smartstate image from registry
          2. tag it accordingly and push to externally available registry
          3. setup appliance to use that image for smartstate in aws
          4. add aws provider
          5. find 24/7 vm in aws and perform smartstate analysis
    """
    appliance = temp_ssa_pod_appliance

    if BZ(1684203, forced_streams=['5.10']).blocks:
        logger.info(
            "stopping & starting appliance in order to re-read new AMI name")
        provider.mgmt.stop_vm(appliance.project)
        provider.mgmt.start_vm(appliance.project)
        provider.mgmt.wait_vm_running(appliance.project)
        for _ in range(3):
            try:
                # there is issue caused by unexpected log out and etc. this is workaround
                # it will be removed along with above BZ when it is fixed
                navigate_to(aws_provider, 'Details')
                break
            except Exception as e:
                logger.warn(
                    "attempt to go to aws_provider failed with '{e}'".format(
                        e=e.message))

    # run SSA against cu24x7 vm
    ssa_vm.smartstate_scan(wait_for_task_result=True)

    # check SSA has been run and there are some results
    c_lastanalyzed = ssa_vm.last_analysed

    assert c_lastanalyzed != 'Never', "Last Analyzed is set to Never"
Esempio n. 19
0
    def create_on_provider(self, timeout=900, find_in_cfme=False, delete_on_failure=True, **kwargs):
        """Create the VM on the provider via MgmtSystem. `deploy_template` handles errors during
        VM provision on MgmtSystem sideNS deletes VM if provisioned incorrectly

        Args:
            timeout: Number of seconds to wait for the VM to appear in CFME
                     Will not wait at all, if set to 0 (Defaults to ``900``)
            find_in_cfme: Verifies that VM exists in CFME UI
            delete_on_failure: Attempts to remove VM on UI navigation failure
        """
        deploy_template(self.provider.key, self.name, self.template_name, **kwargs)
        try:
            if find_in_cfme:
                self.wait_to_appear(timeout=timeout, load_details=False)
        except Exception as e:
            logger.warn("Couldn't find VM or Instance in CMFE")
            if delete_on_failure:
                logger.info("Removing VM or Instance from mgmt system")
                self.provider.mgmt.delete_vm(self.name)
            raise e
Esempio n. 20
0
 def add_disk_to_vm(self):
     """Add a disk with specs from cfme_data.template_upload
         Generally for database disk
     """
     temp_vm = self.mgmt.get_vm(self.temp_vm_name)
     if temp_vm.get_disks_count() > 1:
         logger.warn('%s Warning: found more than one disk in existing VM (%s).',
                     self.provider_key, self.temp_vm_name)
         return
     rhevm_specs = cfme_data.template_upload.template_upload_rhevm
     disk_kwargs = dict(storage_domain=self.provider_data.template_upload.sdomain,
                        size=rhevm_specs.disk_size,
                        interface=rhevm_specs.disk_interface,
                        format=rhevm_specs.disk_format)
     temp_vm.add_disk(**disk_kwargs)
     # check, if there are two disks
     if temp_vm.get_disks_count() < 2:
         raise TemplateUploadException('%s disk failed to add with specs: %r',
                                       self.provider_key, disk_kwargs)
     logger.info('%s:%s Successfully added disk', self.provider_key, self.temp_vm_name)
     return True
Esempio n. 21
0
 def checksum_verification(self):
     # Download checksum from url
     checksum = None
     try:
         response = request.urlopen('{}/SHA256SUM'.format(self.image_url))
         checksums = response.read().decode('utf-8')
         for line in checksums.split("\n"):
             if self.image_name in line:
                 checksum = line.strip().split()[0]
     except URLError:
         logger.warn('Failed download of checksum using urllib')
     if not checksum:
         logger.warn('Failed to get checksum of image from url')
     else:
         # Get checksum of downloaded file
         sha256 = hashlib.sha256()
         image_sha256 = None
         try:
             with open(self.image_name, 'rb') as f:
                 for block in iter(lambda: f.read(65536), b''):
                     sha256.update(block)
             image_sha256 = sha256.hexdigest()
         except Exception:
             logger.warn('Failed to get checksum of image')
         if image_sha256 and not checksum == image_sha256:
             logger.exception(
                 'Local image checksum does not match checksum from url')
             return False
         else:
             logger.info('Local image checksum matches checksum from url')
             return True
Esempio n. 22
0
def test_aws_smartstate_pod(temp_ssa_pod_appliance, ssa_vm, provider, aws_provider):
    """
    deploy aws smartstate pod and that it works

    Polarion:
        assignee: izapolsk
        casecomponent: Containers
        caseimportance: medium
        initialEstimate: 1h
        testSteps:
          1. pull smartstate image from registry
          2. tag it accordingly and push to externally available registry
          3. setup appliance to use that image for smartstate in aws
          4. add aws provider
          5. find 24/7 vm in aws and perform smartstate analysis
    """
    appliance = temp_ssa_pod_appliance

    if BZ(1684203, forced_streams=['5.10']).blocks:
        logger.info("stopping & starting appliance in order to re-read new AMI name")
        provider.mgmt.stop_vm(appliance.project)
        provider.mgmt.start_vm(appliance.project)
        provider.mgmt.wait_vm_running(appliance.project)
        for _ in range(3):
            try:
                # there is issue caused by unexpected log out and etc. this is workaround
                # it will be removed along with above BZ when it is fixed
                navigate_to(aws_provider, 'Details')
                break
            except Exception as e:
                logger.warn("attempt to go to aws_provider failed with '{e}'".format(e=e.message))

    # run SSA against cu24x7 vm
    ssa_vm.smartstate_scan(wait_for_task_result=True)

    # check SSA has been run and there are some results
    c_lastanalyzed = ssa_vm.last_analysed

    assert c_lastanalyzed != 'Never', "Last Analyzed is set to Never"
Esempio n. 23
0
def all_tasks_match_status(destination, task_name, expected_status,
                           expected_num_of_tasks):
    """ Check if all tasks with same task name states are finished - if not, reload page"""
    view = navigate_to(Tasks, destination)
    tab_view = getattr(view.tabs, destination.lower())

    # task_name change from str to support also regular expression pattern
    task_name = re.compile(task_name)
    # expected_status change from str to support also regular expression pattern
    expected_status = re.compile(expected_status, re.IGNORECASE)

    try:
        rows = list(
            tab_view.table.rows(task_name=task_name, state=expected_status))
    except IndexError:
        logger.warn(
            'IndexError exception suppressed when searching for task row, no match found.'
        )
        return False

    # check state = finished for all tasks
    return expected_num_of_tasks == len(rows), len(rows)
def test_workload_smartstate_analysis(appliance, request, scenario):
    """Runs through provider based scenarios initiating smart state analysis against VMs, Hosts,
    and Datastores"""
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))
    appliance.install_vddk()

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-ssa',
        'test_name': 'SmartState Analysis',
        'appliance_roles': ', '.join(roles_smartstate),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_smartstate})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    # Add host credentials and set CFME relationship for RHEVM SSA
    for provider in scenario['providers']:
        for api_host in appliance.rest_api.collections.hosts.all:
            host_collection = appliance.collections.hosts
            test_host = host_collection.instantiate(name=api_host.name, provider=provider)
            host_data = get_host_data_by_name(get_crud(provider), api_host.name)
            credentials = host.get_credentials_from_config(host_data['credentials'])
            test_host.update_credentials_rest(credentials)
        appliance.set_cfme_server_relationship(cfme_performance['appliance']['appliance_name'])

    # Variable amount of time for SmartState Analysis workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_analyses = scenario['time_between_analyses']
    total_scanned_vms = 0

    while ((time.time() - starttime) < total_time):
        start_ssa_time = time.time()
        for vm in scenario['vms_to_scan'].values()[0]:
            vm_api = appliance.rest_api.collections.vms.get(name=vm)
            vm_api.action.scan()
            total_scanned_vms += 1
        iteration_time = time.time()

        ssa_time = round(iteration_time - start_ssa_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue SmartState Analyses: {}'.format(ssa_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if ssa_time < time_between_analyses:
            wait_diff = time_between_analyses - ssa_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_analyses):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to Queue SmartState Analyses ({}) exceeded time between '
                '({})'.format(ssa_time, time_between_analyses))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Scans'] = total_scanned_vms
    logger.info('Test Ending...')
Esempio n. 25
0
    def _check_timelines(self, target, policy_events):
        """Verify that the event is present in the timeline

        Args:
            target: A entity where a Timeline is present (Instance, Availability zone, Provider...)
            policy_events: switch between the management event timeline and the policy timeline.
        Returns:
             The length of the array containing the event found on the Timeline of the target.
        """

        def _get_timeline_events(target, policy_events):
            """Navigate to the timeline of the target and select the management timeline or the
            policy timeline. Returns an array of the found events.
            """

            timelines_view = navigate_to(target, 'Timelines')

            if isinstance(timelines_view, ServerDiagnosticsView):
                timelines_view = timelines_view.timelines
            timeline_filter = timelines_view.filter

            if policy_events:
                logger.info('Will search in Policy event timelines')
                timelines_view.filter.event_type.select_by_visible_text('Policy Events')
                timeline_filter.policy_event_category.select_by_visible_text(self.tl_category)
                timeline_filter.policy_event_status.fill('Both')
            else:
                timeline_filter.detailed_events.fill(True)
                for selected_option in timeline_filter.event_category.all_selected_options:
                    timeline_filter.event_category.select_by_visible_text(selected_option)
                timeline_filter.event_category.select_by_visible_text(self.tl_category)

            timeline_filter.time_position.select_by_visible_text('centered')
            timeline_filter.apply.click()
            logger.info('Searching for event type: %r in timeline category: %r', self.event,
                        self.tl_category)
            return timelines_view.chart.get_events(self.tl_category)

        events_list = _get_timeline_events(target, policy_events)
        logger.debug('events_list: %r', str(events_list))

        if not events_list:
            self.inst.provider.refresh_provider_relationships()
            logger.warn('Event list of %r is empty!', target)

        found_events = []

        for evt in events_list:
            try:
                if not policy_events:
                    if evt.source_instance in self.inst.name and evt.event_type in self.tl_event:
                        found_events.append(evt)
                        break
                else:
                    if evt.event_type in self.tl_event and evt.target in self.inst.name:
                        found_events.append(evt)
                        break
            except AttributeError as err:
                logger.warn('Issue with TimelinesEvent: %r .Faulty event: %r', str(err), str(evt))
                continue

        logger.info('found events on %r: %s', target, "\n".join([repr(e) for e in found_events]))

        return len(found_events)
Esempio n. 26
0
 def _finalize():
     try:
         vm_obj.cleanup_on_provider()
     except Exception:
         logger.warn('Failed deleting VM from provider: %s', vm_name)
Esempio n. 27
0
def test_provisioning(appliance, request, scenario):
    """Runs through provisioning scenarios using the REST API to
    continously provision a VM for a specified period of time.
    Memory Monitor creates graphs and summary at the end of each scenario."""

    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-provisioning',
        'test_name': 'Provisioning',
        'appliance_roles': ', '.join(roles_provisioning),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    provision_order = []

    def cleanup_workload(scenario, from_ts, vms_to_cleanup, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        appliance.update_server_roles({role: True for role in roles_provisioning_cleanup})
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        final_vm_size = len(vms_to_cleanup)
        appliance.rest_api.collections.vms.action.delete(vms_to_cleanup)
        monitor_thread.join()
        logger.info('{} VMs were left over, and {} VMs were deleted in the finalizer.'
            .format(final_vm_size, final_vm_size - len(vms_to_cleanup)))
        logger.info('The following VMs were left over after the test: {}'
            .format(vms_to_cleanup))
        quantifiers['VMs_To_Delete_In_Finalizer'] = final_vm_size
        quantifiers['VMs_Deleted_In_Finalizer'] = final_vm_size - len(vms_to_cleanup)
        quantifiers['Leftover_VMs'] = vms_to_cleanup
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, vm_name, quantifiers,
            scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_provisioning})
    prov = get_crud(scenario['providers'][0])
    prov.create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    guid_list = prov.get_template_guids(scenario['templates'])
    guid_cycle = cycle(guid_list)
    cleanup_size = scenario['cleanup_size']
    number_of_vms = scenario['number_of_vms']
    total_time = scenario['total_time']
    time_between_provision = scenario['time_between_provision']
    total_provisioned_vms = 0
    total_deleted_vms = 0
    provisioned_vms = 0
    starttime = time.time()

    while ((time.time() - starttime) < total_time):
        start_iteration_time = time.time()
        provision_list = []
        for i in range(number_of_vms):
            total_provisioned_vms += 1
            provisioned_vms += 1
            vm_to_provision = '{}-provision-{}'.format(
                test_ts, str(total_provisioned_vms).zfill(4))
            guid_to_provision, provider_name = next(guid_cycle)
            provision_order.append((vm_to_provision, provider_name))
            provision_list.append((vm_to_provision, guid_to_provision,
                prov.data['provisioning']['vlan']))

        template = prov.data.templates.get('small_template')
        provision_data = get_provision_data(appliance.rest_api, prov, template.name)
        vm_name = provision_data["vm_fields"]["vm_name"]
        response = appliance.rest_api.collections.provision_requests.action.create(**provision_data)
        assert appliance.rest_api.response.status_code == 200
        provision_request = response[0]

        def _finished():
            provision_request.reload()
            if "error" in provision_request.status.lower():
                pytest.fail("Error when provisioning: `{}`".format(provision_request.message))
            return provision_request.request_state.lower() in ("finished", "provisioned")

        wait_for(_finished, num_sec=800, delay=5, message="REST provisioning finishes")

        vm = appliance.rest_api.collections.vms.get(name=vm_name)
        creation_time = time.time()
        provision_time = round(creation_time - start_iteration_time, 2)
        logger.debug('Time to initiate provisioning: {}'.format(provision_time))
        logger.info('{} VMs provisioned so far'.format(total_provisioned_vms))

        if provisioned_vms > cleanup_size * len(scenario['providers']):
            start_remove_time = time.time()
            if appliance.rest_api.collections.vms.action.delete(vm):
                provision_order.pop(0)
                provisioned_vms -= 1
                total_deleted_vms += 1
            deletion_time = round(time.time() - start_remove_time, 2)
            logger.debug('Time to initate deleting: {}'.format(deletion_time))
            logger.info('{} VMs deleted so far'.format(total_deleted_vms))

        end_iteration_time = time.time()
        iteration_time = round(end_iteration_time - start_iteration_time, 2)
        elapsed_time = end_iteration_time - starttime
        logger.debug('Time to initiate provisioning and deletion: {}'.format(iteration_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if iteration_time < time_between_provision:
            wait_diff = time_between_provision - iteration_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_provision):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
            else:
                logger.warn('Time to initiate provisioning ({}) exceeded time between '
                    '({})'.format(iteration_time, time_between_provision))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Provisionings'] = total_provisioned_vms
    quantifiers['Deleted_VMs'] = total_deleted_vms
    logger.info('Provisioned {} VMs and deleted {} VMs during the scenario.'
                .format(total_provisioned_vms, total_deleted_vms))
    logger.info('Test Ending...')
Esempio n. 28
0
    def _check_timelines(self, target, policy_events):
        """Verify that the event is present in the timeline

        Args:
            target: A entity where a Timeline is present (VM, host, cluster, Provider...)
            policy_events: switch between the management event timeline and the policy timeline.
        Returns:
             The length of the array containing the event found on the Timeline of the target.
        """
        def _get_timeline_events(target, policy_events):
            """Navigate to the timeline of the target and select the management timeline or the
            policy timeline. Returns an array of the found events.
            """

            timelines_view = navigate_to(target, 'Timelines')

            if isinstance(timelines_view, ServerDiagnosticsView):
                timelines_view = timelines_view.timelines
            timeline_filter = timelines_view.filter

            if policy_events:
                logger.info('Will search in Policy event timelines')
                timelines_view.filter.event_type.select_by_visible_text(
                    'Policy Events')
                timeline_filter.policy_event_category.select_by_visible_text(
                    self.tl_category)
                timeline_filter.policy_event_status.fill('Both')
            else:
                timeline_filter.detailed_events.fill(True)
                for selected_option in timeline_filter.event_category.all_selected_options:
                    timeline_filter.event_category.select_by_visible_text(
                        selected_option)
                timeline_filter.event_category.select_by_visible_text(
                    self.tl_category)

            timeline_filter.time_position.select_by_visible_text('centered')
            timeline_filter.apply.click()
            logger.info(
                'Searching for event type: %r in timeline category: %r',
                self.event, self.tl_category)
            return timelines_view.chart.get_events(self.tl_category)

        events_list = _get_timeline_events(target, policy_events)
        logger.debug('events_list: %r', str(events_list))

        if not len(events_list):
            self.vm.provider.refresh_provider_relationships()
            logger.warn('Event list of %r is empty!', str(target))

        found_events = []

        for evt in events_list:
            try:
                if not policy_events:
                    # Special case for create event
                    if hasattr(evt, 'destination_vm'
                               ) and evt.destination_vm in self.vm.name:
                        found_events.append(evt)
                        break
                    # Other events
                    elif evt.source_vm in self.vm.name and evt.event_type in self.tl_event:
                        found_events.append(evt)
                        break
                else:
                    if evt.event_type in self.tl_event and evt.target in self.vm.name:
                        found_events.append(evt)
                        break
            except AttributeError as err:
                logger.warn('Issue with TimelinesEvent: %r .Faulty event: %r',
                            str(err), str(evt))
                continue

        logger.info('found events on %r :\n %s', target,
                    '\n'.join([repr(e) for e in found_events]))

        return len(found_events)
Esempio n. 29
0
 def hostname(self, value):
     if self.default_endpoint:
         if value:
             self.default_endpoint.hostname = value
     else:
         logger.warn("can't set hostname because default endpoint is absent")
Esempio n. 30
0
 def ip_address(self, value):
     if self.default_endpoint:
         if value:
             self.default_endpoint.ipaddress = value
     else:
         logger.warn("can't set ipaddress because default endpoint is absent")
def test_workload_smartstate_analysis(appliance, request, scenario):
    """Runs through provider based scenarios initiating smart state analysis against VMs, Hosts,
    and Datastores

    Polarion:
        assignee: rhcf3_machine
        initialEstimate: 1/4h
    """
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))
    appliance.install_vddk()

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {
        'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-ssa',
        'test_name': 'SmartState Analysis',
        'appliance_roles': ', '.join(roles_smartstate),
        'scenario': scenario
    }
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_smartstate})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(
        scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    # Add host credentials and set CFME relationship for RHEVM SSA
    for provider in scenario['providers']:
        for api_host in appliance.rest_api.collections.hosts.all:
            host_collection = appliance.collections.hosts
            test_host = host_collection.instantiate(name=api_host.name,
                                                    provider=provider)
            host_data = get_host_data_by_name(get_crud(provider),
                                              api_host.name)
            credentials = host.get_credentials_from_config(
                host_data['credentials'])
            test_host.update_credentials_rest(credentials)
        appliance.set_cfme_server_relationship(
            cfme_performance['appliance']['appliance_name'])

    # Variable amount of time for SmartState Analysis workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_analyses = scenario['time_between_analyses']
    total_scanned_vms = 0

    while ((time.time() - starttime) < total_time):
        start_ssa_time = time.time()
        for vm in scenario['vms_to_scan'].values()[0]:
            vm_api = appliance.rest_api.collections.vms.get(name=vm)
            vm_api.action.scan()
            total_scanned_vms += 1
        iteration_time = time.time()

        ssa_time = round(iteration_time - start_ssa_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue SmartState Analyses: {}'.format(ssa_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))

        if ssa_time < time_between_analyses:
            wait_diff = time_between_analyses - ssa_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_analyses):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn(
                'Time to Queue SmartState Analyses ({}) exceeded time between '
                '({})'.format(ssa_time, time_between_analyses))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Scans'] = total_scanned_vms
    logger.info('Test Ending...')
    def _check_timelines(self, target, policy_events):
        """Verify that the event is present in the timeline

        Args:
            target: A entity where a Timeline is present (VM, host, cluster, Provider...)
            policy_events: switch between the management event timeline and the policy timeline.
        Returns:
             The length of the array containing the event found on the Timeline of the target.
        """

        def _get_timeline_events(target, policy_events):
            """Navigate to the timeline of the target and select the management timeline or the
            policy timeline. Returns an array of the found events.
            """

            timelines_view = navigate_to(target, 'Timelines')

            if isinstance(timelines_view, ServerDiagnosticsView):
                timelines_view = timelines_view.timelines
            timeline_filter = timelines_view.filter

            if policy_events:
                logger.info('Will search in Policy event timelines')
                timelines_view.filter.event_type.select_by_visible_text('Policy Events')
                timeline_filter.policy_event_category.select_by_visible_text(self.tl_category)
                timeline_filter.policy_event_status.fill('Both')
            else:
                if timelines_view.browser.product_version < "5.10":
                    timeline_filter.detailed_events.fill(True)
                for selected_option in timeline_filter.event_category.all_selected_options:
                    timeline_filter.event_category.select_by_visible_text(selected_option)
                timeline_filter.event_category.select_by_visible_text(self.tl_category)

            timeline_filter.time_position.select_by_visible_text('centered')
            timeline_filter.apply.click()
            logger.info('Searching for event type: %r in timeline category: %r', self.event,
                        self.tl_category)
            return timelines_view.chart.get_events(self.tl_category)

        events_list = _get_timeline_events(target, policy_events)
        logger.debug('events_list: %r', str(events_list))

        if not len(events_list):
            self.vm.provider.refresh_provider_relationships()
            logger.warn('Event list of %r is empty!', str(target))

        found_events = []

        for evt in events_list:
            try:
                if not policy_events:
                    # Special case for create event
                    if hasattr(evt, 'destination_vm') and evt.destination_vm in self.vm.name:
                        found_events.append(evt)
                        break
                    # Other events
                    elif evt.source_vm in self.vm.name and evt.event_type in self.tl_event:
                        found_events.append(evt)
                        break
                    elif (
                        self.event == 'create' and
                        BZ(1687493,
                           unblock=lambda provider: not provider.one_of(RHEVMProvider)).blocks and
                        self.vm.name in evt.message and evt.event_type in self.tl_event
                    ):
                        found_events.append(evt)
                        break
                else:
                    if evt.event_type in self.tl_event and evt.target in self.vm.name:
                        found_events.append(evt)
                        break
            except AttributeError as err:
                logger.warn('Issue with TimelinesEvent: %r .Faulty event: %r', str(err), str(evt))
                continue

        logger.info('found events on %r :\n %s', target, '\n'.join([repr(e) for e in found_events]))

        return len(found_events)
 def _finalize():
     try:
         vm_obj.delete_from_provider()
     except Exception:
         logger.warn('Failed deleting VM from provider: %s', vm_name)
Esempio n. 34
0
def test_refresh_vms(appliance, request, scenario):
    """Refreshes all vm's then waits for a specific amount of time. Memory Monitor creates
    graphs and summary at the end of the scenario."""
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {
        'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-refresh-vm',
        'test_name': 'Refresh VMs',
        'appliance_roles': ', '.join(roles_refresh_vms),
        'scenario': scenario
    }
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_refresh_vms})
    for prov in scenario['providers']:
        get_crud(prov).create_rest()
    logger.info('Sleeping for refresh: {}s'.format(
        scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    full_refresh_threshold_set = False
    if 'full_refresh_threshold' in scenario:
        if scenario['full_refresh_threshold'] != FULL_REFRESH_THRESHOLD_DEFAULT:
            appliance.set_full_refresh_threshold(
                scenario['full_refresh_threshold'])
            full_refresh_threshold_set = True
    if not full_refresh_threshold_set:
        logger.debug('Keeping full_refresh_threshold at default ({}).'.format(
            FULL_REFRESH_THRESHOLD_DEFAULT))

    refresh_size = scenario['refresh_size']

    vms = appliance.rest_api.collections.vms.all
    vms_iter = cycle(vms)
    logger.debug('Number of VM IDs: {}'.format(len(vms)))

    # Variable amount of time for refresh workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_refresh = scenario['time_between_refresh']
    total_refreshed_vms = 0

    while ((time.time() - starttime) < total_time):
        start_refresh_time = time.time()
        refresh_list = [next(vms_iter) for x in range(refresh_size)]
        for vm in refresh_list:
            vm.action.reload()
        total_refreshed_vms += len(refresh_list)
        iteration_time = time.time()

        refresh_time = round(iteration_time - start_refresh_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue VM Refreshes: {}'.format(refresh_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))

        if refresh_time < time_between_refresh:
            wait_diff = time_between_refresh - refresh_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_refresh):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn(
                'Time to Queue VM Refreshes ({}) exceeded time between '
                '({})'.format(refresh_time, time_between_refresh))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Refreshes'] = total_refreshed_vms
    logger.info('Test Ending...')
Esempio n. 35
0
def test_refresh_providers(appliance, request, scenario):
    """
    Refreshes providers then waits for a specific amount of time.
    Memory Monitor creates graphs and summary at the end of the scenario.

    Polarion:
        assignee: rhcf3_machine
        initialEstimate: 1/4h
    """
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {
        'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-refresh-providers',
        'test_name': 'Refresh Providers',
        'appliance_roles': ', '.join(roles_refresh_providers),
        'scenario': scenario
    }
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))
    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_refresh_providers})
    for prov in scenario['providers']:
        get_crud(prov).create_rest()

    # Variable amount of time for refresh workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_refresh = scenario['time_between_refresh']
    total_refreshed_providers = 0

    while ((time.time() - starttime) < total_time):
        start_refresh_time = time.time()
        appliance.rest_api.collections.providers.reload()
        for prov in appliance.rest_api.collections.providers.all:
            prov.action.reload()
            total_refreshed_providers += 1
        iteration_time = time.time()

        refresh_time = round(iteration_time - start_refresh_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue Refreshes: {}'.format(refresh_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if refresh_time < time_between_refresh:
            wait_diff = time_between_refresh - refresh_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_refresh):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to Queue Refreshes ({}) exceeded time between '
                        '({})'.format(refresh_time, time_between_refresh))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_Provider_Refreshes'] = total_refreshed_providers
    logger.info('Test Ending...')
Esempio n. 36
0
def test_provisioning(appliance, request, scenario):
    """Runs through provisioning scenarios using the REST API to
    continously provision a VM for a specified period of time.
    Memory Monitor creates graphs and summary at the end of each scenario.

    Polarion:
        assignee: None
        initialEstimate: None
    """

    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {
        'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-provisioning',
        'test_name': 'Provisioning',
        'appliance_roles': ', '.join(roles_provisioning),
        'scenario': scenario
    }
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    provision_order = []

    def cleanup_workload(scenario, from_ts, vms_to_cleanup, quantifiers,
                         scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        appliance.update_server_roles(
            {role: True
             for role in roles_provisioning_cleanup})
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        final_vm_size = len(vms_to_cleanup)
        appliance.rest_api.collections.vms.action.delete(vms_to_cleanup)
        monitor_thread.join()
        logger.info(
            '{} VMs were left over, and {} VMs were deleted in the finalizer.'.
            format(final_vm_size, final_vm_size - len(vms_to_cleanup)))
        logger.info(
            'The following VMs were left over after the test: {}'.format(
                vms_to_cleanup))
        quantifiers['VMs_To_Delete_In_Finalizer'] = final_vm_size
        quantifiers['VMs_Deleted_In_Finalizer'] = final_vm_size - len(
            vms_to_cleanup)
        quantifiers['Leftover_VMs'] = vms_to_cleanup
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, vm_name,
                                                  quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_provisioning})
    prov = get_crud(scenario['providers'][0])
    prov.create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(
        scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    guid_list = prov.get_template_guids(scenario['templates'])
    guid_cycle = cycle(guid_list)
    cleanup_size = scenario['cleanup_size']
    number_of_vms = scenario['number_of_vms']
    total_time = scenario['total_time']
    time_between_provision = scenario['time_between_provision']
    total_provisioned_vms = 0
    total_deleted_vms = 0
    provisioned_vms = 0
    starttime = time.time()

    while ((time.time() - starttime) < total_time):
        start_iteration_time = time.time()
        provision_list = []
        for i in range(number_of_vms):
            total_provisioned_vms += 1
            provisioned_vms += 1
            vm_to_provision = 'test-{}-prov-{}'.format(
                test_ts,
                str(total_provisioned_vms).zfill(4))
            guid_to_provision, provider_name = next(guid_cycle)
            provision_order.append((vm_to_provision, provider_name))
            provision_list.append((vm_to_provision, guid_to_provision,
                                   prov.data['provisioning']['vlan']))

        template = prov.data.templates.get('small_template')
        provision_data = get_provision_data(appliance.rest_api, prov,
                                            template.name)
        vm_name = provision_data["vm_fields"]["vm_name"]
        response = appliance.rest_api.collections.provision_requests.action.create(
            **provision_data)
        assert_response(appliance)
        provision_request = response[0]

        def _finished():
            provision_request.reload()
            if "error" in provision_request.status.lower():
                pytest.fail("Error when provisioning: `{}`".format(
                    provision_request.message))
            return provision_request.request_state.lower() in ("finished",
                                                               "provisioned")

        wait_for(_finished,
                 num_sec=800,
                 delay=5,
                 message="REST provisioning finishes")

        vm = appliance.rest_api.collections.vms.get(name=vm_name)
        creation_time = time.time()
        provision_time = round(creation_time - start_iteration_time, 2)
        logger.debug(
            'Time to initiate provisioning: {}'.format(provision_time))
        logger.info('{} VMs provisioned so far'.format(total_provisioned_vms))

        if provisioned_vms > cleanup_size * len(scenario['providers']):
            start_remove_time = time.time()
            if appliance.rest_api.collections.vms.action.delete(vm):
                provision_order.pop(0)
                provisioned_vms -= 1
                total_deleted_vms += 1
            deletion_time = round(time.time() - start_remove_time, 2)
            logger.debug('Time to initate deleting: {}'.format(deletion_time))
            logger.info('{} VMs deleted so far'.format(total_deleted_vms))

        end_iteration_time = time.time()
        iteration_time = round(end_iteration_time - start_iteration_time, 2)
        elapsed_time = end_iteration_time - starttime
        logger.debug('Time to initiate provisioning and deletion: {}'.format(
            iteration_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))

        if iteration_time < time_between_provision:
            wait_diff = time_between_provision - iteration_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0
                    and time_remaining < time_between_provision):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
            else:
                logger.warn(
                    'Time to initiate provisioning ({}) exceeded time between '
                    '({})'.format(iteration_time, time_between_provision))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Provisionings'] = total_provisioned_vms
    quantifiers['Deleted_VMs'] = total_deleted_vms
    logger.info(
        'Provisioned {} VMs and deleted {} VMs during the scenario.'.format(
            total_provisioned_vms, total_deleted_vms))
    logger.info('Test Ending...')
Esempio n. 37
0
def test_refresh_providers(appliance, request, scenario):
    """
    Refreshes providers then waits for a specific amount of time.
    Memory Monitor creates graphs and summary at the end of the scenario.
    """
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {
        'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-refresh-providers',
        'test_name': 'Refresh Providers',
        'appliance_roles': ', '.join(roles_refresh_providers),
        'scenario': scenario
    }
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))
    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles(
        {role: True
         for role in roles_refresh_providers})
    for prov in scenario['providers']:
        get_crud(prov).create_rest()

    # Variable amount of time for refresh workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_refresh = scenario['time_between_refresh']
    total_refreshed_providers = 0

    while ((time.time() - starttime) < total_time):
        start_refresh_time = time.time()
        appliance.rest_api.collections.providers.reload()
        for prov in appliance.rest_api.collections.providers.all:
            prov.action.reload()
            total_refreshed_providers += 1
        iteration_time = time.time()

        refresh_time = round(iteration_time - start_refresh_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue Refreshes: {}'.format(refresh_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))

        if refresh_time < time_between_refresh:
            wait_diff = time_between_refresh - refresh_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_refresh):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to Queue Refreshes ({}) exceeded time between '
                        '({})'.format(refresh_time, time_between_refresh))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_Provider_Refreshes'] = total_refreshed_providers
    logger.info('Test Ending...')
def main(trackerbot_url, mark_usable=None, selected_provider=None):
    api = trackerbot.api(trackerbot_url)

    thread_q = []
    thread_lock = Lock()
    template_providers = defaultdict(list)
    all_providers = (set(list_provider_keys())
                     if not selected_provider
                     else set(selected_provider))
    unresponsive_providers = set()
    # Queue up list_template calls
    for provider_key in all_providers:
        ipaddress = cfme_data.management_systems[provider_key].get('ipaddress')
        if ipaddress and not net.is_pingable(ipaddress):
            continue
        thread = Thread(target=get_provider_templates,
            args=(provider_key, template_providers, unresponsive_providers, thread_lock))
        thread_q.append(thread)
        thread.start()

    # Join the queued calls
    for thread in thread_q:
        thread.join()

    seen_templates = set()

    if mark_usable is None:
        usable = {}
    else:
        usable = {'usable': mark_usable}

    existing_provider_templates = [
        pt['id']
        for pt
        in trackerbot.depaginate(api, api.providertemplate.get())['objects']]

    # Find some templates and update the API
    for template_name, providers in template_providers.items():
        template_name = str(template_name)
        template_info = TemplateName.parse_template(template_name)

        # it turned out that some providers like ec2 may have templates w/o names.
        # this is easy protection against such issue.
        if not template_name.strip():
            logger.warn('Ignoring template w/o name on provider %s', provider_key)
            continue

        # Don't want sprout templates
        if template_info.group_name in ('sprout', 'rhevm-internal'):
            logger.info('Ignoring %s from group %s', template_name, template_info.group_name)
            continue

        seen_templates.add(template_name)
        group = trackerbot.Group(template_info.group_name, stream=template_info.stream)
        try:
            template = trackerbot.Template(template_name, group, template_info.datestamp)
        except ValueError:
            logger.exception('Failure parsing provider %s template: %s',
                             provider_key, template_name)
            continue

        for provider_key in providers:
            provider = trackerbot.Provider(provider_key)

            if '{}_{}'.format(template_name, provider_key) in existing_provider_templates:
                logger.info('Template %s already tracked for provider %s',
                            template_name, provider_key)
                continue

            try:
                trackerbot.mark_provider_template(api, provider, template, **usable)
                logger.info('Added %s template %s on provider %s (datestamp: %s)',
                            template_info.group_name,
                            template_name,
                            provider_key,
                            template_info.datestamp)
            except SlumberHttpBaseException:
                logger.exception('%s: exception marking template %s', provider, template)

    # Remove provider relationships where they no longer exist, skipping unresponsive providers,
    # and providers not known to this environment
    for pt in trackerbot.depaginate(api, api.providertemplate.get())['objects']:
        key, template_name = pt['provider']['key'], pt['template']['name']
        if key not in template_providers[template_name] and key not in unresponsive_providers:
            if key in all_providers:
                logger.info("Cleaning up template %s on %s", template_name, key)
                trackerbot.delete_provider_template(api, key, template_name)
            else:
                logger.info("Skipping template cleanup %s on unknown provider %s",
                            template_name, key)

    # Remove templates that aren't on any providers anymore
    for template in trackerbot.depaginate(api, api.template.get())['objects']:
        if not template['providers'] and template['name'].strip():
            logger.info("Deleting template %s (no providers)", template['name'])
            api.template(template['name']).delete()
Esempio n. 39
0
def main(trackerbot_url, mark_usable=None, selected_provider=None):
    api = trackerbot.api(trackerbot_url)

    thread_q = []
    thread_lock = Lock()
    template_providers = defaultdict(list)
    all_providers = (set(list_provider_keys())
                     if not selected_provider else set(selected_provider))
    unresponsive_providers = set()
    # Queue up list_template calls
    for provider_key in all_providers:
        ipaddress = cfme_data.management_systems[provider_key].get('ipaddress')
        if ipaddress and not net.is_pingable(ipaddress):
            continue
        thread = Thread(target=get_provider_templates,
                        args=(provider_key, template_providers,
                              unresponsive_providers, thread_lock))
        thread_q.append(thread)
        thread.start()

    # Join the queued calls
    for thread in thread_q:
        thread.join()

    seen_templates = set()

    if mark_usable is None:
        usable = {}
    else:
        usable = {'usable': mark_usable}

    existing_provider_templates = [
        pt['id'] for pt in trackerbot.depaginate(
            api, api.providertemplate.get())['objects']
    ]

    # Find some templates and update the API
    for template_name, providers in template_providers.items():
        template_name = str(template_name)
        template_info = TemplateName.parse_template(template_name)

        # it turned out that some providers like ec2 may have templates w/o names.
        # this is easy protection against such issue.
        if not template_name.strip():
            logger.warn('Ignoring template w/o name on provider %s',
                        provider_key)
            continue

        # Don't want sprout templates
        if template_info.group_name in ('sprout', 'rhevm-internal'):
            logger.info('Ignoring %s from group %s', template_name,
                        template_info.group_name)
            continue

        seen_templates.add(template_name)
        group = trackerbot.Group(template_info.group_name,
                                 stream=template_info.stream)
        try:
            template = trackerbot.Template(template_name, group,
                                           template_info.datestamp)
        except ValueError:
            logger.exception('Failure parsing provider %s template: %s',
                             provider_key, template_name)
            continue

        for provider_key in providers:
            provider = trackerbot.Provider(provider_key)

            if '{}_{}'.format(template_name,
                              provider_key) in existing_provider_templates:
                logger.info('Template %s already tracked for provider %s',
                            template_name, provider_key)
                continue

            try:
                trackerbot.mark_provider_template(api, provider, template,
                                                  **usable)
                logger.info(
                    'Added %s template %s on provider %s (datestamp: %s)',
                    template_info.group_name, template_name, provider_key,
                    template_info.datestamp)
            except SlumberHttpBaseException:
                logger.exception('%s: exception marking template %s', provider,
                                 template)

    # Remove provider relationships where they no longer exist, skipping unresponsive providers,
    # and providers not known to this environment
    for pt in trackerbot.depaginate(api,
                                    api.providertemplate.get())['objects']:
        key, template_name = pt['provider']['key'], pt['template']['name']
        if key not in template_providers[
                template_name] and key not in unresponsive_providers:
            if key in all_providers:
                logger.info("Cleaning up template %s on %s", template_name,
                            key)
                trackerbot.delete_provider_template(api, key, template_name)
            else:
                logger.info(
                    "Skipping template cleanup %s on unknown provider %s",
                    template_name, key)

    # Remove templates that aren't on any providers anymore
    for template in trackerbot.depaginate(api, api.template.get())['objects']:
        if not template['providers'] and template['name'].strip():
            logger.info("Deleting template %s (no providers)",
                        template['name'])
            api.template(template['name']).delete()
Esempio n. 40
0
def test_refresh_vms(appliance, request, scenario):
    """Refreshes all vm's then waits for a specific amount of time. Memory Monitor creates
    graphs and summary at the end of the scenario.

    Polarion:
        assignee: rhcf3_machine
        casecomponent: Appliance
        initialEstimate: 1/4h
    """
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-refresh-vm',
        'test_name': 'Refresh VMs',
        'appliance_roles': ', '.join(roles_refresh_vms),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_refresh_vms})
    for prov in scenario['providers']:
        get_crud(prov).create_rest()
    logger.info('Sleeping for refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    full_refresh_threshold_set = False
    if 'full_refresh_threshold' in scenario:
        if scenario['full_refresh_threshold'] != FULL_REFRESH_THRESHOLD_DEFAULT:
            appliance.set_full_refresh_threshold(scenario['full_refresh_threshold'])
            full_refresh_threshold_set = True
    if not full_refresh_threshold_set:
        logger.debug('Keeping full_refresh_threshold at default ({}).'.format(
            FULL_REFRESH_THRESHOLD_DEFAULT))

    refresh_size = scenario['refresh_size']

    vms = appliance.rest_api.collections.vms.all
    vms_iter = cycle(vms)
    logger.debug('Number of VM IDs: {}'.format(len(vms)))

    # Variable amount of time for refresh workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_refresh = scenario['time_between_refresh']
    total_refreshed_vms = 0

    while ((time.time() - starttime) < total_time):
        start_refresh_time = time.time()
        refresh_list = [next(vms_iter) for x in range(refresh_size)]
        for vm in refresh_list:
            vm.action.reload()
        total_refreshed_vms += len(refresh_list)
        iteration_time = time.time()

        refresh_time = round(iteration_time - start_refresh_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue VM Refreshes: {}'.format(refresh_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if refresh_time < time_between_refresh:
            wait_diff = time_between_refresh - refresh_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_refresh):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to Queue VM Refreshes ({}) exceeded time between '
                '({})'.format(refresh_time, time_between_refresh))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Refreshes'] = total_refreshed_vms
    logger.info('Test Ending...')
Esempio n. 41
0
    def create(self,
               vm_name,
               provider,
               form_values=None,
               cancel=False,
               check_existing=False,
               find_in_cfme=False,
               wait=True):
        """Provisions an vm/instance with the given properties through CFME

        Args:
            vm_name: the vm/instance's name
            provider: provider object
            form_values: dictionary of form values for provisioning, structured into tabs
            cancel: boolean, whether or not to cancel form filling
            check_existing: verify if such vm_name exists
            find_in_cfme: verify that vm was created and appeared in CFME
            wait: wait for vm provision request end

        Note:
            Calling create on a sub-class of instance will generate the properly formatted
            dictionary when the correct fields are supplied.
        """
        vm = self.instantiate(vm_name, provider)
        if check_existing and vm.exists:
            return vm
        if not provider.is_refreshed():
            provider.refresh_provider_relationships()
            wait_for(provider.is_refreshed,
                     func_kwargs={'refresh_delta': 10},
                     timeout=600)
        if not form_values:
            form_values = vm.vm_default_args
        else:
            inst_args = vm.vm_default_args
            form_values = recursive_update(inst_args, form_values)
        env = form_values.get('environment') or {}
        if env.get('automatic_placement'):
            form_values['environment'] = {'automatic_placement': True}
        form_values.update({'provider_name': provider.name})
        if not form_values.get('template_name'):
            template_name = (
                provider.data.get('provisioning').get('image', {}).get('name')
                or provider.data.get('provisioning').get('template'))
            vm.template_name = template_name
            form_values.update({'template_name': template_name})
        view = navigate_to(self, 'Provision')
        view.form.fill(form_values)

        if cancel:
            view.form.cancel_button.click()
            view = self.browser.create_view(BaseLoggedInPage)
            view.flash.assert_success_message(self.ENTITY.PROVISION_CANCEL)
            view.flash.assert_no_error()
        else:
            view.form.submit_button.click()

            view = vm.appliance.browser.create_view(RequestsView)
            wait_for(lambda: view.flash.messages,
                     fail_condition=[],
                     timeout=10,
                     delay=2,
                     message='wait for Flash Success')
            view.flash.assert_no_error()

            if wait:
                request_description = 'Provision from [{}] to [{}]'.format(
                    form_values.get('template_name'), vm.name)
                provision_request = vm.appliance.collections.requests.instantiate(
                    request_description)
                logger.info('Waiting for cfme provision request for vm %s',
                            vm.name)
                provision_request.wait_for_request(method='ui', num_sec=900)
                if provision_request.is_succeeded(method='ui'):
                    logger.info('Waiting for vm %s to appear on provider %s',
                                vm.name, provider.key)
                    wait_for(provider.mgmt.does_vm_exist, [vm.name],
                             handle_exception=True,
                             num_sec=600)
                else:
                    logger.warn(
                        "Provisioning failed with the message {}".format(
                            provision_request.row.last_message.text))
                    return None
        if find_in_cfme:
            vm.wait_to_appear(timeout=800)

        return vm