Esempio n. 1
0
def wait_for_request(cells, partial_check=False):
    """helper function checks if a request is complete

    After finding the request's row using the ``cells`` argument, this will wait for a request to
    reach the 'Finished' state and return it. In the event of an 'Error' state, it will raise an
    AssertionError, for use with ``pytest.raises``, if desired.

    Args:
        cells: A dict of cells use to identify the request row to inspect in the
            :py:attr:`request_list` Table. See :py:meth:`cfme.web_ui.Table.find_rows_by_cells`
            for more.

    Usage:

        # Filter on the "Description" column
        description = 'Provision from [%s] to [%s]' % (template_name, vm_name)
        cells = {'Description': description}

        # Filter on the "Request ID" column
        # Text must match exactly, you can use "{:,}".format(request_id) to add commas if needed.
        request_id = '{:,}'.format(1000000000001)  # Becomes '1,000,000,000,001', as in the table
        cells = {'Request ID': request_id}

        # However you construct the cells dict, pass it to wait_for_request
        # Provisioning requests often take more than 5 minutes but less than 10.
        wait_for(wait_for_request, [cells], num_sec=600)

    Raises:
        AssertionError: if the matched request has status 'Error'
        RequestException: if multiple matching requests were found

    Returns:
         The matching :py:class:`cfme.web_ui.Table.Row` if found, ``False`` otherwise.
    """
    for page in paginator.pages():
        if sel.elements(request_list._header_loc) and not sel.is_displayed(request_list):
            # The table exists but it is hidden - no cells
            return False
        results = request_list.find_rows_by_cells(cells, partial_check)
        if len(results) == 0:
            # row not on this page, assume it has yet to appear
            continue
        elif len(results) > 1:
            raise RequestException(
                'Multiple requests with matching content found - be more specific!'
            )
        else:
            # found the row!
            row = results[0]
            logger.debug(' Request Message: %s' % row.last_message.text)
            break
    else:
        # Request not found at all, can't continue
        return False

    assert row.status.text != 'Error'
    if row.request_state.text == 'Finished':
        return row
    else:
        return False
Esempio n. 2
0
 def delete_vm(self, vm_name):
     logger.debug(' Deleting VM....({})'.format(vm_name))
     vm = self._get_vm(vm_name)
     wait_for(self.stop_vm, [vm_name], fail_condition='False', num_sec=300, delay=10)
     vm.delete()
     wait_for(self.does_vm_exist, [vm_name], fail_condition=True)
     return True
Esempio n. 3
0
def login(username, password, submit_method=_click_on_login):
    """
    Login to CFME with the given username and password.
    Optionally, submit_method can be press_enter_after_password
    to use the enter key to login, rather than clicking the button.

    Args:
        user: The username to fill in the username field.
        password: The password to fill in the password field.
        submit_method: A function to call after the username and password have been input.

    Raises:
        RuntimeError: If the login fails, ie. if a flash message appears
    """
    if not logged_in() or username is not current_username():
        if logged_in():
            logout()
        # workaround for strange bug where we are logged out
        # as soon as we click something on the dashboard
        sel.sleep(1.0)

        logger.debug('Logging in as user %s' % username)
        fill(form, {'username': username, 'password': password})
        submit_method()
        flash.assert_no_errors()
        thread_locals.current_user = User(username, password, _full_name())
Esempio n. 4
0
def wait_for_miq_server_workers_started(evm_tail=None, poll_interval=5):
    """Waits for the CFME's workers to be started by tailing evm.log for:
    'INFO -- : MIQ(MiqServer#wait_for_started_workers) All workers have been started'
    Verified works with 5.5 and 5.6 appliances.
    """
    if evm_tail is None:
        logger.info('Opening /var/www/miq/vmdb/log/evm.log for tail')
        evm_tail = SSHTail('/var/www/miq/vmdb/log/evm.log')
        evm_tail.set_initial_file_end()

    attempts = 0
    detected = False
    max_attempts = 60
    while (not detected and attempts < max_attempts):
        logger.debug('Attempting to detect MIQ Server workers started: {}'.format(attempts))
        for line in evm_tail:
            if 'MiqServer#wait_for_started_workers' in line:
                if ('All workers have been started' in line):
                    logger.info('Detected MIQ Server is ready.')
                    detected = True
                    break
        time.sleep(poll_interval)  # Allow more log lines to accumulate
        attempts += 1
    if not (attempts < max_attempts):
        logger.error('Could not detect MIQ Server workers started in {}s.'.format(
            poll_interval * max_attempts))
    evm_tail.close()
Esempio n. 5
0
def ensure_advanced_search_closed():
    """Checks if the advanced search box is open and if it does, closes it."""
    if is_advanced_search_opened():
        logger.debug('search.ensure_advanced_search_closed: search was open, closing')
        sel.click(search_box.close_button)
        wait_for(is_advanced_search_opened, fail_condition=True, num_sec=10, delay=2,
                 fail_func=check_and_click_close, message='Waiting for advanced search to close')
Esempio n. 6
0
def refresh_provider_vms_bulk(vm_ids):
    """Refresh the specified VMs with one request via the REST API"""
    starttime = time.time()

    appliance = cfme_performance['appliance']['ip_address']
    resources = []
    for vm_id in vm_ids:
        resources.append({
            "href": "https://" + appliance + "/api/vms/" + str(vm_id)
        })

    data_dict = {
        "action": "refresh",
        "resources": resources
    }
    data_json = json.dumps(data_dict)
    response = requests.post("https://" + appliance + "/api/vms/",
                             data=data_json,
                             auth=(cfme_performance['appliance']['rest_api']['username'],
                                   cfme_performance['appliance']['rest_api']['password']),
                             verify=False,
                             headers={"content-type": "application/json"},
                             allow_redirects=False)

    if response.status_code != 200:
        logger.debug(response.text)

    logger.debug('Queued Refresh {} VMs in: {}s'.format(len(vm_ids),
        round(time.time() - starttime, 2)))
Esempio n. 7
0
def login(username, password, submit_method=_click_on_login):
    """
    Login to CFME with the given username and password.
    Optionally, submit_method can be press_enter_after_password
    to use the enter key to login, rather than clicking the button.

    Args:
        user: The username to fill in the username field.
        password: The password to fill in the password field.
        submit_method: A function to call after the username and password have been input.

    Raises:
        RuntimeError: If the login fails, ie. if a flash message appears
    """
    # TODO: Should probably do the username check here, but there are pretty usernames to deal with
    # e.g. 'admin' shows up in the UI as 'Administrator'
    if not logged_in():
        # workaround for strange bug where we are logged out
        # as soon as we click something on the dashboard
        sel.sleep(1.0)

        logger.debug('Logging in as user %s' % username)
        fill(form, {'username': username, 'password': password})
        submit_method()
        flash.assert_no_errors()
Esempio n. 8
0
def refresh_provider_vms(vm_ids):
    """Refresh the specified VMs with one request per VM via the REST API"""
    starttime = time.time()
    for vm in vm_ids:
        refresh_provider_vm(vm)
    logger.debug('Queued Refresh {} VMs in: {}s'.format(len(vm_ids),
        round(time.time() - starttime, 2)))
Esempio n. 9
0
def beans_admin(cur_user,user,form,issue):
    global G_applicationmgr
    if issue == 'applymsgs':
        result = G_applicationmgr.queryUnRead(cur_user = cur_user)
        logger.debug("applymsg success")
        return json.dumps(result)
    elif issue == 'agree':
        msgid = form.get("msgid",None)
        username = form.get("username",None)
        if msgid is None or username is None:
            return json.dumps({'success':'false', 'message':'msgid and username can\'t be null.'})
        G_lockmgr.acquire("__beans_"+str(username))
        G_lockmgr.acquire("__applymsg_"+str(msgid))
        result = G_applicationmgr.agree(msgid, cur_user = cur_user)
        G_lockmgr.release("__applymsg_"+str(msgid))
        G_lockmgr.release("__beans_"+str(username))
        return json.dumps(result)
    elif issue == 'reject':
        msgid = form.get("msgid",None)
        if msgid is None:
            return json.dumps({'success':'false', 'message':'msgid can\'t be null.'})
        G_lockmgr.acquire("__applymsg_"+str(msgid))
        result = G_applicationmgr.reject(msgid, cur_user = cur_user)
        G_lockmgr.release("__applymsg_"+str(msgid))
        return json.dumps(result)
    else:
        return json.dumps({'success':'false', 'message':'Unsupported URL!'})
Esempio n. 10
0
def fill_snmp_trap_field_tuple(field, val):
    assert 2 <= len(val) <= 3, "The tuple must be at least 2 items and max 3 items!"
    if len(val) == 2:
        val = val + (None,)
    field.oid, field.type, field.value = val
    logger.debug(' Filling in SNMPTrapField #%d with values %s, %s, %s',
        field.seq_id, field.oid, field.type, field.value)
Esempio n. 11
0
def _fill_multibox_dict(multi, d):
    """ Filler function for MultiBoxSelect

    Designed for `dict` styled items. It expects a dictionary in format:
    >>> {"Some item": True, "Another item": False}
    Where key stands for the item name and value its selection status.
    Any items that have to be unselected will be unselected before selecting the unselected happens.

    Args:
        multi: :py:class:`MultiBoxSelect` to fill
        d: :py:class:`dict` with values.

    Returns: :py:class:`bool` with success.
    """
    enable_list, disable_list = [], []
    for key, value in d.iteritems():
        if value:
            enable_list.append(key)
        else:
            disable_list.append(key)
    logger.debug('  Disabling values %s in %s', str(disable_list), str(multi))
    logger.debug('  Enabling values %s in %s', str(enable_list), str(multi))
    multi.remove(*disable_list)
    if multi._async:
        sync, async, dont_care = [], [], []
        MultiBoxSelect.categorize(enable_list, sync, async, dont_care)
        multi.add(*dont_care)
        multi.set_async(*async)
        multi.set_sync(*sync)
    else:
        multi.add(*map(str, enable_list))
Esempio n. 12
0
def checkbox(loc, set_to=False):
    """
    Checks or unchecks a given checkbox

    Finds an element given by loc and checks it

    Args:
        loc: The locator of the element
        value: The value the checkbox should represent as a bool (or None to do nothing)

    Returns: Previous state of the checkbox
    """
    if set_to is not None:
        el = move_to_element(loc)
        if el.tag_name == 'img':
            # Yeah, CFME sometimes uses images for check boxen. *sigh*
            # item_chk0 = unchecked, item_chk1 = checked
            selected = 'item_chk1' in el.get_attribute('src')
        else:
            selected = el.is_selected()

        if selected is not set_to:
            logger.debug("Setting checkbox %s to %s" % (str(loc), str(set_to)))
            click(el)
        return selected
Esempio n. 13
0
def map_vms_to_ids(provider_names_to_vm_names):
    """Takes a dictionary of providers with a list of vms and generates a list of vm_ids for each
    vm in the data structure.  We need this because more than one provider can lead to a """
    starttime = time.time()
    expected_num_ids = sum(len(x) for x in provider_names_to_vm_names.itervalues())
    expected_num_providers = len(provider_names_to_vm_names.keys())
    # Intended ouput here (List of vm ids):
    vm_ids = []
    # Intermediate data structure holding provider_id to list of vm names
    provider_ids_to_vm_names = {}

    # First get all providers details
    all_providers_details = []
    for pro_id in get_all_provider_ids():
        details = get_provider_details(pro_id)
        all_providers_details.append(details)

    providers_to_vms_copy = dict(provider_names_to_vm_names)
    # Next map provider_name to the provider_id
    for provider_name in provider_names_to_vm_names:
        for provider_detail in all_providers_details:
            if provider_name == provider_detail['name']:
                # Copy VMs from that provider to the Intermediate data structure
                provider_ids_to_vm_names[provider_detail['id']] = list(
                    provider_names_to_vm_names[provider_name])
                del providers_to_vms_copy[provider_name]
                break

    if len(providers_to_vms_copy) > 0:
        # Error, we did not find all providers, likely there is an issue with the scenario data
        # inside of cfme_performance.yml or cfme_performance.local.yml
        logger.error('Provider(s) + vm(s) not found in CFME Inventory: {}'.format(
            providers_to_vms_copy))

    provider_ids_to_vm_names_copy = copy.deepcopy(provider_ids_to_vm_names)
    # Now map each vm_name+ems_id to the actual vm_id and append to our list
    for vm_id in get_all_vm_ids():
        vm_details = get_vm_details(vm_id)
        for provider_id in provider_ids_to_vm_names:
            if ('ems_id' in vm_details and provider_id == vm_details['ems_id']):
                # Match provider_id, now check vm_name
                for vm_name in provider_ids_to_vm_names[provider_id]:
                    if vm_name == vm_details['name']:
                        logger.debug('Matching {} to vm id: {}'.format(vm_name, vm_id))
                        vm_ids.append(vm_id)
                        del (provider_ids_to_vm_names_copy[provider_id]
                            [provider_ids_to_vm_names_copy[provider_id].index(vm_name)])
                        break
        if (sum(len(x) for x in provider_ids_to_vm_names_copy.itervalues()) == 0):
            break

    # Now check for left over vms that we did not match:
    leftover_num_ids = sum(len(x) for x in provider_ids_to_vm_names_copy.itervalues())
    if leftover_num_ids > 0:
        logger.error('(Provider_id(s)) + VM(s) not found in CFME inventory: {}'.format(
            provider_ids_to_vm_names_copy))
    logger.debug('Mapped {}/{} vm ids/names over {}/{} provider ids/names in {}s'.format(
        len(vm_ids), expected_num_ids, len(provider_ids_to_vm_names.keys()), expected_num_providers,
        round(time.time() - starttime, 2)))
    return vm_ids
Esempio n. 14
0
 def _wait_for_vm_running():
     state = provider_mgmt.vm_status(vm_name)
     if ON_REGEX.match(state):
         return True
     elif DOWN_REGEX.match(state) or SUSPEND_REGEX.match(state):
         provider_mgmt.start_vm(vm_name)
     logger.debug("Sleeping 15secs...(current state: " + state + ", needed state: running)")
Esempio n. 15
0
def refresh_providers(provider_ids):
    """Refresh the specified providers with one request per provider via the REST API"""
    starttime = time.time()
    for provider in provider_ids:
        refresh_provider(provider)
    logger.debug('Queued Refresh {} Provider(s) in: {}s'.format(len(provider_ids),
        round(time.time() - starttime, 2)))
Esempio n. 16
0
    def __init__(self, stream_output=False, keystate=_ssh_keystate.not_installed,
            **connect_kwargs):
        super(SSHClient, self).__init__()
        self._streaming = stream_output
        self._keystate = keystate
        logger.debug('client initialized with keystate {}'.format(_ssh_keystate[keystate]))

        # Load credentials and destination from confs, set up sane defaults
        parsed_url = urlparse(store.base_url)
        default_connect_kwargs = {
            'username': conf.credentials['ssh']['username'],
            'password': conf.credentials['ssh']['password'],
            'hostname': parsed_url.hostname,
            'timeout': 10,
            'allow_agent': False,
            'look_for_keys': False,
            'gss_auth': False
        }

        default_connect_kwargs["port"] = ports.SSH

        # Overlay defaults with any passed-in kwargs and store
        default_connect_kwargs.update(connect_kwargs)
        self._connect_kwargs = default_connect_kwargs
        self.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        _client_session.append(self)
Esempio n. 17
0
def _setup_providers(cloud_or_infra, validate, check_existing):
    """Helper to set up all cloud or infra providers, and then validate them

    Args:
        cloud_or_infra: Like the name says: 'cloud' or 'infra' (a string)
        validate: see description in :py:func:`setup_provider`
        check_existing: see description in :py:func:`setup_provider`

    Returns:
        A list of provider objects that have been created.

    """
    # Pivot behavior on cloud_or_infra
    options_map = {
        'cloud': {
            'navigate': 'clouds_providers',
            'quad': 'cloud_prov',
            'list': list_cloud_providers
        },
        'infra': {
            'navigate': 'infrastructure_providers',
            'quad': 'infra_prov',
            'list': list_infra_providers
        }
    }
    # Check for existing providers all at once, to prevent reloading
    # the providers page for every provider in cfme_data
    if not options_map[cloud_or_infra]['list']():
        return []
    if check_existing:
        sel.force_navigate(options_map[cloud_or_infra]['navigate'])
        add_providers = []
        for provider_key in options_map[cloud_or_infra]['list']():
            provider_name = conf.cfme_data.get('management_systems', {})[provider_key]['name']
            quad = Quadicon(provider_name, options_map[cloud_or_infra]['quad'])
            for page in paginator.pages():
                if sel.is_displayed(quad):
                    logger.debug('Provider "%s" exists, skipping' % provider_key)
                    break
            else:
                add_providers.append(provider_key)
    else:
        # Add all cloud or infra providers unconditionally
        add_providers = options_map[cloud_or_infra]['list']()

    if add_providers:
        logger.info('Providers to be added: %s' % ', '.join(add_providers))

    # Save the provider objects for validation and return
    added_providers = []

    for provider_name in add_providers:
        # Don't validate in this step; add all providers, then go back and validate in order
        provider = setup_provider(provider_name, validate=False, check_existing=False)
        added_providers.append(provider)

    if validate:
        map(methodcaller('validate'), added_providers)

    return added_providers
Esempio n. 18
0
    def delete_vm(self, vm_name):
        self.wait_vm_steady(vm_name)
        if not self.is_vm_stopped(vm_name):
            self.stop_vm(vm_name)
        logger.debug(' Deleting RHEV VM %s' % vm_name)

        def _do_delete():
            """Returns True if you have to retry"""
            if not self.does_vm_exist(vm_name):
                return False
            try:
                vm = self._get_vm(vm_name)
                vm.delete()
            except RequestError as e:
                # Handle some states that can occur and can be circumvented
                if e.status == 409 and "Related operation" in e.detail:
                    logger.info("Waiting for RHEV: {}:{} ({})".format(e.status, e.reason, e.detail))
                    return True
                else:
                    raise  # Raise other so we can see them and eventually add them into handling
                # TODO: handle 400 - but I haven't seen the error message, it was empty.
            else:
                return False

        wait_for(_do_delete, fail_condition=True, num_sec=600, delay=15, message="execute delete")

        wait_for(
            lambda: self.does_vm_exist(vm_name),
            fail_condition=True,
            message="wait for RHEV VM %s deleted" % vm_name,
            num_sec=300
        )
        return True
Esempio n. 19
0
def set_rails_loglevel(level, validate_against_worker='MiqUiWorker'):
    """Sets the logging level for level_rails and detects when change occured."""
    ui_worker_pid = '#{}'.format(get_worker_pid(validate_against_worker))

    logger.info('Setting log level_rails on appliance to {}'.format(level))
    yaml = store.current_appliance.get_yaml_config()
    if not str(yaml['log']['level_rails']).lower() == level.lower():
        logger.info('Opening /var/www/miq/vmdb/log/evm.log for tail')
        evm_tail = SSHTail('/var/www/miq/vmdb/log/evm.log')
        evm_tail.set_initial_file_end()

        yaml['log']['level_rails'] = level
        store.current_appliance.set_yaml_config(yaml)

        attempts = 0
        detected = False
        while (not detected and attempts < 60):
            logger.debug('Attempting to detect log level_rails change: {}'.format(attempts))
            for line in evm_tail:
                if ui_worker_pid in line:
                    if 'Log level for production.log has been changed to' in line:
                        # Detects a log level change but does not validate the log level
                        logger.info('Detected change to log level for production.log')
                        detected = True
                        break
            time.sleep(1)  # Allow more log lines to accumulate
            attempts += 1
        if not (attempts < 60):
            # Note the error in the logger but continue as the appliance could be slow at logging
            # that the log level changed
            logger.error('Could not detect log level_rails change.')
        evm_tail.close()
    else:
        logger.info('Log level_rails already set to {}'.format(level))
Esempio n. 20
0
def messages_to_statistics_csv(messages, statistics_file_name):
    all_statistics = []
    for msg_id in messages:
        msg = messages[msg_id]

        added = False
        if len(all_statistics) > 0:
            for msg_statistics in all_statistics:
                if msg_statistics.cmd == msg.msg_cmd:

                    if msg.del_time > 0:
                        msg_statistics.delivertimes.append(float(msg.del_time))
                        msg_statistics.gets += 1
                    msg_statistics.dequeuetimes.append(float(msg.deq_time))
                    msg_statistics.totaltimes.append(float(msg.total_time))
                    msg_statistics.puts += 1
                    added = True
                    break

        if not added:
            msg_statistics = MiqMsgLists()
            msg_statistics.cmd = msg.msg_cmd
            if msg.del_time > 0:
                msg_statistics.delivertimes.append(float(msg.del_time))
                msg_statistics.gets = 1
            msg_statistics.dequeuetimes.append(float(msg.deq_time))
            msg_statistics.totaltimes.append(float(msg.total_time))
            msg_statistics.puts = 1
            all_statistics.append(msg_statistics)

    csvdata_path = log_path.join('csv_output', statistics_file_name)
    outputfile = csvdata_path.open('w', ensure=True)

    try:
        csvfile = csv.writer(outputfile)
        metrics = ['samples', 'min', 'avg', 'median', 'max', 'std', '90', '99']
        measurements = ['deq_time', 'del_time', 'total_time']
        headers = ['cmd', 'puts', 'gets']
        for measurement in measurements:
            for metric in metrics:
                headers.append('{}_{}'.format(measurement, metric))

        csvfile.writerow(headers)

        # Contents of CSV
        for msg_statistics in sorted(all_statistics, key=lambda x: x.cmd):
            if msg_statistics.gets > 1:
                logger.debug('Samples/Avg/90th/Std: {} : {} : {} : {},Cmd: {}'.format(
                    str(len(msg_statistics.totaltimes)).rjust(7),
                    str(round(numpy.average(msg_statistics.totaltimes), 3)).rjust(7),
                    str(round(numpy.percentile(msg_statistics.totaltimes, 90), 3)).rjust(7),
                    str(round(numpy.std(msg_statistics.totaltimes), 3)).rjust(7),
                    msg_statistics.cmd))
            stats = [msg_statistics.cmd, msg_statistics.puts, msg_statistics.gets]
            stats.extend(generate_statistics(msg_statistics.dequeuetimes, 3))
            stats.extend(generate_statistics(msg_statistics.delivertimes, 3))
            stats.extend(generate_statistics(msg_statistics.totaltimes, 3))
            csvfile.writerow(stats)
    finally:
        outputfile.close()
Esempio n. 21
0
def find_request(cells, partial_check=False):
    """Finds the request and returns the row element

    Args:
        cells: Search data for the requests table.
        partial_check: If to use the ``in`` operator rather than ``==`` in find_rows_by_cells().
    Returns: row
    """
    navigate_to(Request, 'All')
    from cfme.web_ui import paginator
    for page in paginator.pages():
        results = fields.request_list.find_rows_by_cells(cells, partial_check)
        if len(results) == 0:
            # row not on this page, assume it has yet to appear
            # it might be nice to add an option to fail at this point
            continue
        elif len(results) > 1:
            raise RequestException(
                'Multiple requests with matching content found - be more specific!'
            )
        else:
            # found the row!
            row = results[0]
            logger.debug(' Request Message: %s', row.last_message.text)
            return row
    else:
        # Request not found at all, can't continue
        return False
Esempio n. 22
0
def add_host_credentials(provider):
    """"Adds host credentials to a provider via the REST API"""
    data_dict = {
        "action": "edit",
        "resource": {
            "credentials": {
                "userid": provider['host_credentials']['username'],
                "password": provider['host_credentials']['password']
            }
        }
    }

    json_data = json.dumps(data_dict)
    appliance = cfme_performance['appliance']['ip_address']
    for host in get_all_host_ids():
        response = requests.post("https://" + appliance + "/api/hosts/" + str(host),
                                 data=json_data,
                                 auth=(cfme_performance['appliance']['rest_api']['username'],
                                       cfme_performance['appliance']['rest_api']['password']),
                                 verify=False,
                                 headers={"content-type": "application/json"},
                                 allow_redirects=False)

        if response.status_code != 200:
            logger.debug(response.text)

        print response  # TODO: REMOVE

    logger.debug('Added host credentials, Response: {}'.format(response))
Esempio n. 23
0
def _test_vm_power_on():
    """Ensures power button is shown for a VM"""
    logger.info("Checking for power button")
    vm_name = virtual_machines.get_first_vm_title()
    logger.debug("VM " + vm_name + " selected")
    if not virtual_machines.is_pwr_option_visible(vm_name, option=virtual_machines.Vm.POWER_ON):
        raise OptionNotAvailable("Power button does not exist")
Esempio n. 24
0
def uncollectif(item):
    """ Evaluates if an item should be uncollected

    Tests markers against a supplied lambda from the marker object to determine
    if the item should be uncollected or not.
    """

    marker = item.get_marker('uncollectif')
    if marker:
        log_msg = 'Uncollecting {}: {}'.format(item.name,
            marker.kwargs.get('reason', 'No reason given'))

        try:
            arg_names = inspect.getargspec(marker._arglist[0][0][0]).args
        except TypeError:
            logger.debug(log_msg)
            return not bool(marker.args[0])
        try:
            args = [item.callspec.params[arg] for arg in arg_names]
        except KeyError:
            missing_argnames = list(set(arg_names) - set(item._request.funcargnames))
            func_name = item.name
            if missing_argnames:
                raise Exception("You asked for a fixture which wasn't in the function {} "
                                "prototype {}".format(func_name, missing_argnames))
            else:
                raise Exception("Failed to uncollect {}, best guess a fixture wasn't "
                                "ready".format(func_name))

        logger.debug(log_msg)
        return not marker.args[0](*args)
    else:
        return True
Esempio n. 25
0
def save_cluster(user, beans, form):
    global G_vclustermgr
    clustername = form.get('clustername', None)
    if (clustername == None):
        return json.dumps({'success':'false', 'message':'clustername is null'})

    imagename = form.get("image", None)
    description = form.get("description", None)
    containername = form.get("containername", None)
    isforce = form.get("isforce", None)
    G_ulockmgr.acquire(user)
    try:
        if not isforce == "true":
            [status,message] = G_vclustermgr.image_check(user,imagename)
            if not status:
                return json.dumps({'success':'false','reason':'exists', 'message':message})

        user_info = post_to_user("/user/selfQuery/", {'token':form.get("token")})
        [status,message] = G_vclustermgr.create_image(user,clustername,containername,imagename,description,user_info["data"]["groupinfo"]["image"])
        if status:
            logger.info("image has been saved")
            return json.dumps({'success':'true', 'action':'save'})
        else:
            logger.debug(message)
            return json.dumps({'success':'false', 'reason':'exceed', 'message':message})
    except Exception as ex:
        logger.error(str(ex))
        return json.dumps({'success':'false', 'message': str(ex)})
    finally:
        G_ulockmgr.release(user)
Esempio n. 26
0
def browser_setup(get_appliance, provider, vm_to_analyze, fs_type, mgmt_sys_api_clients):
    '''Overrides env.conf and points a browser to the appliance IP passed to it.

    Once finished with the test, it checks if any tests need the appliance and delete it if not the
    appliance specified in conf/env.yaml.
    '''
    global appliance_vm_name
    global test_list

    test_list.remove(['', provider, vm_to_analyze, fs_type])
    with browser_session(base_url='https://' + get_appliance):
        yield nav.home_page_logged_in(testsetup)

        # cleanup provisioned appliance if not more tests for it
        if ('appliances_provider' not in cfme_data['basic_info'].keys() or
                provider != cfme_data['basic_info']['appliances_provider']):
            more_same_provider_tests = False
            for outstanding_test in test_list:
                if outstanding_test[1] == provider:
                    logger.debug("More provider tests found")
                    more_same_provider_tests = True
                    break
            if not more_same_provider_tests:
                # if rhev,  remove direct_lun disk before delete
                if cfme_data['management_systems'][provider]['type'] == 'rhevm':
                    logger.info('Removing RHEV direct_lun hook...')
                    run_command("./scripts/connect_directlun.py --remove --provider " +
                        provider + " --vm_name " + appliance_vm_name)
                # delete appliance
                logger.info("Delete provisioned appliance: " + appliance_list[provider])
                destroy_cmd = ('./scripts/clone_template.py --provider ' + provider + ' '
                    '--destroy --vm_name ' + appliance_vm_name + ' ')
                run_command(destroy_cmd)
Esempio n. 27
0
 def provision_appliances(
         self, count=1, preconfigured=False, version=None, stream=None, provider=None,
         lease_time=120, ram=None, cpu=None):
     # If we specify version, stream is ignored because we will get that specific version
     if version:
         stream = get_stream(version)
     # If we specify stream but not version, sprout will give us latest version of that stream
     elif stream:
         pass
     # If we dont specify either, we will get the same version as current appliance
     else:
         stream = get_stream(current_appliance.version)
         version = current_appliance.version.vstring
     request_id = self.call_method(
         'request_appliances', preconfigured=preconfigured, version=version,
         group=stream, provider=provider, lease_time=lease_time, ram=ram, cpu=cpu, count=count
     )
     wait_for(
         lambda: self.call_method('request_check', str(request_id))['finished'], num_sec=300,
         message='provision {} appliance(s) from sprout'.format(count))
     data = self.call_method('request_check', str(request_id))
     logger.debug(data)
     appliances = []
     for appliance in data['appliances']:
         appliances.append(IPAppliance(appliance['ip_address']))
     return appliances, request_id
def test_refresh_providers(request, scenario):
    """Refreshes providers then waits for a specific amount of time. Memory Monitor creates graphs
    and summary at the end of the scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    logger.debug('Scenario: {}'.format(scenario['name']))

    clean_appliance(ssh_client)

    monitor_thread = SmemMemoryMonitor(SSHClient(), 'workload-refresh-providers', scenario['name'],
        'refresh-providers', get_server_roles_workload_refresh_providers(separator=','),
        ', '.join(scenario['providers']))

    def cleanup_workload(scenario, from_ts):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_url = get_scenario_dashboard_url(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_url = g_url
        monitor_thread.signal = False
        monitor_thread.join()
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts))

    monitor_thread.start()

    wait_for_miq_server_ready(poll_interval=2)
    set_server_roles_workload_refresh_providers(ssh_client)
    add_providers(scenario['providers'])
    id_list = get_all_provider_ids()

    # Variable amount of time for refresh workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_refresh = scenario['time_between_refresh']

    while ((time.time() - starttime) < total_time):
        start_refresh_time = time.time()
        refresh_providers(id_list)
        iteration_time = time.time()

        refresh_time = round(iteration_time - start_refresh_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue Refreshes: {}'.format(refresh_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if refresh_time < time_between_refresh:
            wait_diff = time_between_refresh - refresh_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_refresh):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to Queue Refreshes ({}) exceeded time between Refreshes({})'.format(
                refresh_time, time_between_refresh))

    logger.info('Test Ending...')
Esempio n. 29
0
def perf_click(uiworker_pid, tailer, measure_sel_time, clickable, *args):
    # Regular Expressions to find the ruby production completed time and select query time
    status_re = re.compile(r'Completed\s([0-9]*\s[a-zA-Z]*)\sin\s([0-9\.]*)ms')
    views_re = re.compile(r'Views:\s([0-9\.]*)ms')
    activerecord_re = re.compile(r'ActiveRecord:\s([0-9\.]*)ms')
    select_query_time_re = re.compile(r'\s\(([0-9\.]*)ms\)')
    worker_pid = '#' + uiworker_pid

    # Time the selenium transaction from "click"
    seleniumtime = 0
    if clickable:
        starttime = time()
        clickable(*args)
        seleniumtime = int((time() - starttime) * 1000)

    pgstats = []
    pgstat = PageStat()
    line_count = 0
    starttime = time()

    for line in tailer:
        line_count += 1
        if worker_pid in line:
            if 'SELECT' in line:
                pgstat.selectcount += 1
                selecttime = select_query_time_re.search(line)
                if selecttime:
                    if float(selecttime.group(1)) > perf_tests['ui']['threshold']['query_time']:
                        pgstat.slowselects.append(line)
            if 'CACHE' in line:
                pgstat.cachedcount += 1
            if 'INFO -- : Started' in line:
                # Obtain method and requested page
                started_idx = line.index('Started') + 8
                pgstat.request = line[started_idx:line.index('for', 72)]
            if 'INFO -- : Completed' in line:
                # Obtain status code and total render time
                status_result = status_re.search(line)
                if status_result:
                    pgstat.status = status_result.group(1)
                    pgstat.completedintime = float(status_result.group(2))

                pgstat.uncachedcount = pgstat.selectcount - pgstat.cachedcount

                # Redirects don't always have a view timing
                views_result = views_re.search(line)
                if views_result:
                    pgstat.viewstime = float(views_result.group(1))
                activerecord_result = activerecord_re.search(line)
                if activerecord_result:
                    pgstat.activerecordtime = float(activerecord_result.group(1))
                pgstats.append(pgstat)
                pgstat = PageStat()
    if pgstats:
        if measure_sel_time:
            pgstats[-1].seleniumtime = seleniumtime
    timediff = time() - starttime
    logger.debug('Parsed ({}) lines in {}'.format(line_count, timediff))
    return pgstats
Esempio n. 30
0
 def refresh_provider_relationships(self, from_list_view=False):
     # from_list_view is ignored as it is included here for sake of compatibility with UI call.
     logger.debug('Refreshing provider relationships')
     col = self.appliance.rest_api.collections.providers.find_by(name=self.name)
     try:
         col[0].action.refresh()
     except IndexError:
         raise Exception("Provider collection empty")
Esempio n. 31
0
 def query_video_id(video_conf):
     mysql = Sql()
     if video_conf['version'] == 'rtsp':
         sql_video_id = "SELECT F_ID FROM t_video_channel WHERE F_Name = '{}' AND " \
                        "F_Video_Type = 'rtsp' AND  F_Enabled = 1;".format(video_conf['name'])
     else:
         sql_video_id = "SELECT F_ID FROM t_video_server WHERE F_NAME = '{}' AND " \
                        "F_Video_Type = 'pvg' AND F_Enabled = 1;".format(video_conf['name'])
     req = mysql.query(sql_video_id)
     mysql.close()
     server_id = req[0][0] if req else None
     logger.debug('获取 {} ID: {}'.format(video_conf['name'], server_id))
     return server_id
Esempio n. 32
0
def delete_provisioned_vms(provision_order):
    """Attempts to Deletes all VMs in provision_order. Expects provision order to be a 2D list
    where the inner list contains the VM name in index 0, and its provider's name in index 1
    Expects cleanup_size to be an integer"""
    starttime = time.time()
    startsize = len(provision_order)

    for vm_tuple in provision_order[:]:
        if delete_provisioned_vm(vm_tuple):
            provision_order.remove(vm_tuple)

    logger.debug('Deleted {} VMs in: {}s'.format(startsize - len(provision_order),
                 round(time.time() - starttime, 2)))
Esempio n. 33
0
def clear_infra_providers(validate=True):
    sel.force_navigate('infrastructure_providers')
    logger.debug('Checking for existing infrastructure providers...')
    if paginator.rec_total():
        logger.info(' Providers exist, so removing all infra providers')
        paginator.results_per_page('100')
        sel.click(paginator.check_all())
        toolbar.select('Configuration',
                       'Remove Infrastructure Providers from the VMDB',
                       invokes_alert=True)
        sel.handle_alert()
        if validate:
            wait_for_no_infra_providers()
Esempio n. 34
0
    def on_request(self, command: str, message: Message) -> None:
        """
        Message handler for all request commands except those that
        have personal handler.

        Personal request message handler name format:
            Capabilities-Exchange -> on_capabilities_exchange
            Registration-Termination -> on_registration_termination
            ...
        """
        logger.debug("Command={command}, Message={message}",
                     command=command,
                     message=message)
Esempio n. 35
0
    def _wait_for_vm_paused():
        if provider.mgmt.is_vm_paused(vm_name):
            return True
        elif provider.mgmt.is_vm_running(vm_name):
            provider.mgmt.pause_vm(vm_name)
        elif provider.mgmt.is_vm_stopped(vm_name) or \
                provider.mgmt.can_suspend and provider.mgmt.is_vm_suspended(vm_name):
            provider.mgmt.start_vm(vm_name)

        logger.debug(
            "Sleeping 15secs...(current state: {}, needed state: paused)".
            format(provider.mgmt.vm_status(vm_name)))
        return False
Esempio n. 36
0
def validate(data):
    validation_schema = ResourceSchema()

    try:
        validation_schema.load(data)
    except ValidationError as e:
        logger.debug('Error messages are: %s' %str(e))
        try:
            error_message = e.messages.values()[0][0]
        except Exception as e:
            logger.error(str(e))
            error_message = 'unknown failure'
        raise ValueError(error_message)
Esempio n. 37
0
 def work(self):
     n = -10
     while 1:
         try:
             n += 10
             for res in self.parse_old_link(n):
                 yield res[0], res[1]
         except:
             logger.debug(
                 '%s%s%s' %
                 ('have no pageing!!!', '\t', traceback.format_exc()))
             yield None
             break
Esempio n. 38
0
def un_zip(target_path):
    """
    解压缩目标压缩包
    实现新需求,解压缩后相应的js文件做代码格式化
    :return:
    """

    logger.info("[Pre][Unzip] Upzip file {}...".format(target_path))

    if not os.path.isfile(target_path):
        logger.warn("[Pre][Unzip] Target file {} is't exist...pass".format(
            target_path))
        return False

    zip_file = zipfile.ZipFile(target_path)
    target_file_path = target_path + "_files/"

    if os.path.isdir(target_file_path):
        logger.debug("[Pre][Unzip] Target files {} is exist...continue".format(
            target_file_path))
        return target_file_path
    else:
        os.mkdir(target_file_path)

    for names in zip_file.namelist():
        zip_file.extract(names, target_file_path)

        # 对其中部分文件中为js的时候,将js代码格式化便于阅读
        if names.endswith(".js"):
            file_path = os.path.join(target_file_path, names)
            file = codecs.open(file_path,
                               'r+',
                               encoding='utf-8',
                               errors='ignore')
            file_content = file.read()
            file.close()

            new_file = codecs.open(file_path,
                                   'w+',
                                   encoding='utf-8',
                                   errors='ignore')

            opts = jsbeautifier.default_options()
            opts.indent_size = 2

            new_file.write(jsbeautifier.beautify(file_content, opts))
            new_file.close()

    zip_file.close()

    return target_file_path
Esempio n. 39
0
def navigate_quadicons(q_names, q_type, page_name, nav_limit, ui_worker_pid, prod_tail, soft_assert,
        acc_topbars=[]):
    pages = []
    count = 0
    if nav_limit == 0:
        count = -1
    assert len(q_names) > 0
    while (count < nav_limit):
        for q in q_names:
            for page in paginator.pages():
                quadicon = Quadicon(str(q), q_type)
                if sel.is_displayed(quadicon):

                    pages.extend(analyze_page_stat(perf_click(ui_worker_pid, prod_tail, True,
                        sel.click, quadicon), soft_assert))

                    for topbar in acc_topbars:
                        try:
                            if not list_acc.is_active(topbar):
                                list_acc.click(topbar)
                            links = list_acc.get_active_links(topbar)
                            for link in range(len(links)):
                                # Every click makes the previous list of links invalid
                                links = list_acc.get_active_links(topbar)
                                if link <= len(links):
                                    # Do not navigate to any link containing:
                                    dnn = ['parent', 'Capacity & Utilization', 'Timelines',
                                        'Show tree of all VMs by Resource Pool in this Cluster',
                                        'Show host drift history', 'Show VMs']
                                    if any_in(dnn, links[link].title):
                                        logger.debug('DNN Skipping: {}'.format(links[link].title))
                                    else:
                                        pages.extend(analyze_page_stat(perf_click(ui_worker_pid,
                                            prod_tail, True, links[link].click), soft_assert))

                        except NoSuchElementException:
                            logger.warning('NoSuchElementException - page_name:{}, Quadicon:{},'
                                ' topbar:{}'.format(page_name, q, topbar))
                            soft_assert(False, 'NoSuchElementException - page_name:{}, Quadicon:{},'
                                ' topbar:{}'.format(page_name, q, topbar))
                            break
                    count += 1
                    break

            pages.extend(analyze_page_stat(perf_click(ui_worker_pid, prod_tail, True,
                sel.force_navigate, page_name), soft_assert))
            # If nav_limit == 0 , every item is navigated to
            if not nav_limit == 0 and count == nav_limit:
                break

    return pages
Esempio n. 40
0
    def get_any_class_methodcall(self,
                                 method_name,
                                 call_params,
                                 unserchain=[],
                                 define_param=(),
                                 deepth=0):
        """
        可以调用任意类的任意个方法,跟踪分析
        :param method_name:
        :param call_params:
        :param unserchain:
        :param define_param:
        :param deepth:
        :return:
        """
        deepth += 1
        define_param = (*call_params, *define_param)
        call_nodes = self.dataflow_db.objects.filter(node_type='newMethod')

        logger.debug(
            "[PhpUnSerChain] trigger any class method. try to found all method in class with {}."
            .format(call_params))
        for node in call_nodes:

            # 为了不影响数据,要先生成新的
            newunserchain = [node]

            class_locate = node.node_locate

            new_locate = node.node_locate + '.' + node.source_node

            method_nodes = self.dataflow_db.objects.filter(
                node_locate__startswith=new_locate)
            params_count = self.dataflow_db.objects.filter(
                node_locate__startswith=new_locate,
                node_type='newMethodparams')

            if params_count != len(define_param):
                continue

            status = self.deep_search_chain(method_nodes,
                                            class_locate,
                                            newunserchain,
                                            define_param=define_param,
                                            deepth=deepth)

            if status:
                unserchain.extend(newunserchain)
                return True

        return False
Esempio n. 41
0
def detect_observed_field(loc):
    """Detect observed fields; sleep if needed

    Used after filling most form fields, this function will inspect the filled field for
    one of the known CFME observed field attribues, and if found, sleep long enough for the observed
    field's AJAX request to go out, and then block until no AJAX requests are in flight.

    Observed fields occasionally declare their own wait interval before firing their AJAX request.
    If found, that interval will be used instead of the default.

    """
    if is_displayed(loc):
        el = element(loc)
    else:
        # Element not visible, sort out
        return

    # Default wait period, based on the default UI wait (700ms)
    # plus a little padding to let the AJAX fire before we wait_for_ajax
    default_wait = .8
    # Known observed field attributes
    observed_field_markers = (
        'data-miq_observe',
        'data-miq_observe_date',
        'data-miq_observe_checkbox',
    )
    for attr in observed_field_markers:
        try:
            observed_field_attr = el.get_attribute(attr)
            break
        except NoSuchAttributeException:
            pass
    else:
        # Failed to detect an observed text field, short out
        return

    try:
        attr_dict = json.loads(observed_field_attr)
        interval = float(attr_dict.get('interval', default_wait))
        # Pad the detected interval, as with default_wait
        interval += .1
    except (TypeError, ValueError):
        # ValueError and TypeError happens if the attribute value couldn't be decoded as JSON
        # ValueError also happens if interval couldn't be coerced to float
        # In either case, we've detected an observed text field and should wait
        interval = default_wait

    logger.debug('  Observed field detected, pausing %.1f seconds' % interval)
    sleep(interval)
    wait_for_ajax()
Esempio n. 42
0
def _fill_multibox_str(multi, string):
    """ Filler function for MultiBoxSelect

    Designed for `string`. Selects item with the name.

    Args:
        multi: :py:class:`MultiBoxSelect` to fill
        string: String to select

    Returns: :py:class:`bool` with success.
    """
    stype = type(multi)
    logger.debug('  Filling in %s with value %s' % (str(stype), string))
    return multi.add(string)
Esempio n. 43
0
def pytest_sessionfinish(session, exitstatus):
    """Loop through the appliance stack and close ssh connections"""

    for ssh_client in store.ssh_clients_to_close:
        logger.debug('Closing ssh connection on %r', ssh_client)
        try:
            ssh_client.close()
        except:
            logger.debug('Closing ssh connection on %r failed, but ignoring',
                         ssh_client)
    for session in ssh._client_session:
        with diaper:
            session.close()
    yield
Esempio n. 44
0
    def wx_login(self, code):
        '''是否每次都要获取用户信息'''
        ret = wx_oauth.GetUserInfo(code)
        if (ret['state'] != 0):
            self.write(str(ret))
            return False

        userInfo = json.loads(ret['userinfo'])
        tokenInfo = json.loads(ret['token_data'])
        userId = daos.userDao.QueryWeChat(userInfo['openid'])
        if not userId:
            parentUserId = self.get_argument('userid', default=None)
            logger.debug('new user login, super user id %s' % parentUserId)
            # 没有用户绑定, 生成绑定用户, 此时应该把头像存到本地服务器, 目前先不做,暂时用微信的,如果失效再更新吧
            userId = daos.userDao.GenerateUserByWeChat(userInfo,
                                                       tokenInfo,
                                                       parentId=parentUserId)
        else:
            '''更新用户信息 .....'''
            logger.debug('update wechat info to user. %s' % str(userInfo))

        if not userId:
            logger.debug('failed to generate user..... ' % str(userInfo))
            return False

        expires = int(time.time()) + self.expiresTime
        logger.debug('%s 登录成功,有效时间 %s 秒' %
                     (userInfo['nickname'].encode("UTF-8"), str(expires)))
        # 这里的过期时间到浏览器查看总是比当前时间早7-8个小时?, 所以每次登录完以后,cookie就失效,导致无法访问其他页面
        self.set_secure_cookie(self.secure_username, userId, expires=expires)
        #self.set_secure_cookie(self.secure_username, userId, expires_days=1)
        #self.set_secure_cookie(self.secure_username, userId)
        return True
def test_filter_save_cancel(rails_delete_filter):
    # bind filter_name to the function for fixture cleanup
    test_filter_save_cancel.filter_name = fauxfactory.gen_alphanumeric()
    logger.debug('Set filter_name to: {}'.format(test_filter_save_cancel.filter_name))

    # Try save filter
    assert search.save_filter("fill_count(Infrastructure Provider.VMs, >)",
                              test_filter_save_cancel.filter_name, cancel=True)
    assert_no_cfme_exception()

    assert search.reset_filter()
    # Exception depends on system state - Load button will be disabled if there are no saved filters
    with pytest.raises((DisabledButtonException, NoSuchElementException)):
        search.load_filter(saved_filter=test_filter_save_cancel.filter_name)
Esempio n. 46
0
 def test_SanYaoSu_3(self):
     '''姓名和身份证匹配,手机号不匹配'''
     yaosupage = SanYaoSuPage(self.driver, self.url, u'登录-知了背调')
     yaosupage.input_bei_diao_xing_ming(unicode(self.test_sanyaosu_datas[1][0]))
     yaosupage.input_id_number(str(int(self.test_sanyaosu_datas[1][1])))
     yaosupage.input_phone_number(str(int(self.test_sanyaosu_datas[1][2])))
     yaosupage.click_start_button()
     time.sleep(2)
     try:
         self.assertIn("该手机号与本人身份不匹配", yaosupage.get_ren_gong_text())
         logger.debug(yaosupage.get_ren_gong_text())
     except:
         screenshot.Cut_img(self.driver, "test_SanYaoSu_3")
         logger.error("没有进入人工审核第一步")
Esempio n. 47
0
 def recover_jobs(self):
     logger.info("Rerun the unfailed and unfinished jobs...")
     try:
         rejobs = Batchjob.query.filter(
             ~Batchjob.status.in_(['done', 'failed', 'stopped']))
         rejobs = rejobs.order_by(Batchjob.create_time).all()
         for rejob in rejobs:
             logger.info("Rerun job: " + rejob.id)
             logger.debug(str(rejob))
             job = BatchJob(rejob.id, rejob.username, None, rejob)
             self.job_map[job.job_id] = job
             self.process_job(job)
     except Exception as err:
         logger.error(traceback.format_exc())
def get_number_of_vms(do_not_navigate=False):
    """
    Returns the total number of VMs visible to the user,
    including those archived or orphaned
    """
    logger.info("Getting number of vms")
    if not do_not_navigate:
        navigate_to(Vm, 'VMsOnly')
    if not paginator.page_controls_exist():
        logger.debug("No page controls")
        return 0
    total = paginator.rec_total()
    logger.debug("Number of VMs: %s", total)
    return int(total)
Esempio n. 49
0
 def switch_to_window(self, partial_url='', partial_title=''):
     all_windows = self.driver.window_handles
     if len(all_windows) == 1:
         logger.warning('只有一个窗口,无法进行切换!')
     elif len(all_windows) == 2:
         other_window = all_windows[1 -
                                    all_windows.index(self.current_window)]
         self.driver.switch_to.window(other_window)
     else:
         for window in all_windows:
             self.driver.switch_to.window(window)
             if partial_url in self.driver.current_url or partial_title in self.driver.title:
                 break
     logger.debug(self.driver.current_url, self.driver.title)
Esempio n. 50
0
def clear_cloud_providers(validate=True):
    sel.force_navigate('clouds_providers')
    logger.debug('Checking for existing cloud providers...')
    total = paginator.rec_total()
    if total is not None and int(total) > 0:
        logger.info(' Providers exist, so removing all cloud providers')
        paginator.results_per_page('100')
        sel.click(paginator.check_all())
        toolbar.select('Configuration',
                       'Remove Cloud Providers from the VMDB',
                       invokes_alert=True)
        sel.handle_alert()
        if validate:
            wait_for_no_cloud_providers()
Esempio n. 51
0
def get_number_of_vms(do_not_navigate=False):
    """
    Returns the total number of VMs visible to the user,
    including those archived or orphaned
    """
    logger.info("Getting number of vms")
    if not do_not_navigate:
        sel.force_navigate('infra_vms')
    if not paginator.page_controls_exist():
        logger.debug("No page controls")
        return 0
    total = paginator.rec_total()
    logger.debug("Number of VMs: {}".format(total))
    return int(total)
Esempio n. 52
0
def test_can_delete_filter():
    filter_name = fauxfactory.gen_alphanumeric()
    logger.debug('Set filter_name to: {}'.format(filter_name))
    assert search.save_filter("fill_count(Infrastructure Provider.VMs, >, 0)",
                              filter_name)
    assert_no_cfme_exception()
    search.reset_filter()
    assert_no_cfme_exception()
    search.load_filter(filter_name)
    assert_no_cfme_exception()
    if not search.delete_filter():
        raise pytest.fail(
            "Cannot delete filter! Probably the delete button is not present!")
    assert_no_cfme_exception()
Esempio n. 53
0
def check_kunlunignore(filename):

    is_not_ignore = True

    for ignore_reg in IGNORE_LIST:

        # ignore_reg_obj = re.match(ignore_reg, filename, re.I)

        if re.search(ignore_reg, filename, re.I):
            logger.debug('[INIT][IGNORE] File {} filter by {}'.format(
                filename, ignore_reg))
            return False

    return True
Esempio n. 54
0
def delete_provisioned_vm(vm_tuple):
    """Deletes the Vm specified in the vm_tuple. Expects a the tuple to contain
    a VM name in index 0, and its provider in index 1. Returns True if successful"""
    vm_name, provider_name = vm_tuple
    logger.debug('Cleaning up: {}'.format(vm_name))
    provider_details = (cfme_providers[provider_name])
    provider = get_mgmt_provider_class(provider_details)
    try:
        provider.delete_vm(vm_name)
        return True
    except Exception as e:
        # VM potentially was not yet provisioned
        logger.error('Could not delete VM: {} Exception: {}'.format(vm_name, e))
        return False
Esempio n. 55
0
def provision_vm(tuple_list):
    """Create a provision request for a VM
        This method expects a list of tuples, each of which have the format:
        (vm_name, template_guid, vlan)"""
    starttime = time.time()
    data_dict = {'action': 'create', 'resources': []}

    for vm_name, template_guid, vlan in tuple_list:
        data_dict['resources'].append({
            'template_fields': {
                'guid': template_guid
            },
            'vm_fields': {
                'number_of_sockets': 1,
                'cores_per_socket': 1,
                'vm_name': vm_name,
                'vm_memory': '1024',
                'vlan': vlan,
                'vm_auto_start': True,
                'provision_type': 'native_clone'
            },
            'requester': {
                'user_name': 'admin',
                'owner_first_name': 'FirstName',
                'owner_last_name': 'LastName',
                'owner_email': '*****@*****.**',
                'auto_approve': True
            },
            'additional_values': {
                'request_id': '1001'
            },
            'ems_custom_attributes': {},
            'miq_custom_attributes': {}
        })

    data_json = json.dumps(data_dict)
    appliance = cfme_appliance['ip_address']
    response = requests.post("https://" + appliance +
                             "/api/provision_requests/",
                             data=data_json,
                             auth=(cfme_appliance['rest_api']['username'],
                                   cfme_appliance['rest_api']['password']),
                             verify=False,
                             headers={"content-type": "application/json"},
                             allow_redirects=False)

    if response.status_code != 200:
        logger.debug(response.text)
    logger.debug('Queued Provision VM {} in: {}s'.format(
        vm_name, round(time.time() - starttime, 2)))
Esempio n. 56
0
 def forward(self, sock, remote, fix):
     content_encoding = None
     left_length = 0
     try:
         fdset = [sock, remote]
         while True:
             r, w, e = select.select(fdset, [], [])
             if sock in r:
                 data = sock.recv(32768)
                 if len(data) <= 0:
                     break
                 remote.sendall(data)
             if remote in r:
                 data = remote.recv(32768)
                 if len(data) <= 0:
                     break
                 if fix:
                     if None == content_encoding:
                         headers,body = data.split(b'\r\n\r\n', maxsplit=1)
                         headers = headers.decode('iso-8859-1')
                         match = re.search(r'Content-Encoding: (\S+)\r\n', headers)
                         if match:
                             content_encoding = match.group(1)
                         else:
                             content_encoding = ''
                         match = re.search(r'Content-Length: (\d+)\r\n', headers)
                         if match:
                             content_length = int(match.group(1))
                         else:
                             content_length = 0
                         left_length = content_length - len(body)
                     else:
                         left_length -= len(body)
                         if left_length <= 0:
                             content_encoding = None
                     if 'gzip' == content_encoding:
                         body = zlib.decompress(body ,15+32)
                     for old in setting.config['content_fix'][self.host]:
                         body = body.replace(old.encode('utf8'), setting.config['content_fix'][self.host][old].encode('utf8'))
                     if None != content_encoding:
                         headers = re.sub(r'Content-Encoding: (\S+)\r\n', r'', headers)
                         headers = re.sub(r'Content-Length: (\d+)\r\n', r'Content-Length: '+str(len(body))+r'\r\n', headers)
                     data = headers.encode('iso-8859-1') + b'\r\n\r\n' + body
                 sock.sendall(data)
     except socket.error as e:
         logger.debug('Forward: %s' % e)
     finally:
         sock.close()
         remote.close()
Esempio n. 57
0
    def receive_task(self):
        # if len(index) == 0:
        # content = requests.get(m3u8_url).text
        if self.cfg.m3u8_url == "":
            return
        content = requests.get(self.cfg.m3u8_url).text
        lines = content.split('\n')
        for line in lines:
            # print(line)
            if line.endswith(".ts"):
                # logger.info(line)
                # i = line.replace("cHZnNjcxLWF2LzE2LzE=/", "").replace(".ts", "")
                i = line.replace(self.cfg.suffix, "").replace(".ts", "")
                if int(i) > self.pre_index or int(i) == 0:
                    self.index_pool.put(int(i))
                    self.pre_index = int(i)
                    # index.append(int(i))

        # if len(index) == 0:
        #     time.sleep(1)
        #     continue

        # avoid video stream sever lost response if HTTP was frequent
        if self.index_pool.empty():
            self.try_cnt += 1
            if self.try_cnt % 20 == 0:
                logger.info(
                    'Video [{}]: Empty index response from online video. May please connect to video server to fix this network error '
                    'or check the deployment server network connected.'.format(
                        self.cfg.index))
            time.sleep(1)
            return False
        # n = index[0]
        # index.remove(index[0])
        current_index = self.index_pool.get()

        # url = "https://222.190.243.176:8081/proxy/video_ts/live/cHZnNjcxLWF2LzE2LzE=/" + str(n) + ".ts"
        # url = vcfg[URL] + str(n) + ".ts"
        url = self.cfg.url + str(current_index) + ".ts"

        # headers = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36"}
        # headers = {"User-Agent": "AppleCoreMedia/1.0.0.19B88(Macintosh;U;Intel Mac OS X 10_15_1;en_us)"}
        logger.debug('Send Request: [{}]'.format(url))
        response = requests.get(url, headers=self.cfg.headers)
        logger.debug('Reponse status code: [{}]'.format(response.status_code))

        if response.status_code == 404:
            logger.debug('Stream Not found: [{}]'.format(response.status_code))
            return False

        # f = open(ts_localpath + "/" + "%03d.ts" % n, "wb")
        format_index = str("%03d.ts" % current_index)
        f = open(self.stream_save_path / format_index, "wb")
        f.write(response.content)
        f.close()
        # caches the stream index which has been completed by HTTP request.
        # note that the Queue is p
        self.pass_index(format_index)
        logger.debug("%03d.ts Download~" % current_index)
Esempio n. 58
0
    def get_any_methodcall(self,
                           method_name,
                           call_params,
                           unserchain=[],
                           define_param=(),
                           deepth=0):
        """
        可以调用任意类的某个方法,跟踪分析
        :param method_name:
        :param call_params:
        :param unserchain:
        :param define_param:
        :param deepth:
        :return:
        """
        deepth += 1
        define_param = (*call_params, *define_param)
        method_node_name = 'Method-{}'.format(method_name)
        call_nodes = self.dataflow_db.objects.filter(
            node_type='newMethod', source_node__startswith=method_node_name)

        logger.debug("[PhpUnSerChain] trigger {}{}. try to found it.".format(
            method_node_name, call_params))

        for node in call_nodes:
            logger.debug("[PhpUnSerChain] Found New () in {}".format(
                method_node_name, node.node_locate))

            # 为了不影响数据,要先生成新的
            newunserchain = [node]

            class_locate = node.node_locate

            new_locate = node.node_locate + '.' + node.source_node

            method_nodes = self.dataflow_db.objects.filter(
                node_locate__startswith=new_locate)

            status = self.deep_search_chain(method_nodes,
                                            class_locate,
                                            newunserchain,
                                            define_param=define_param,
                                            deepth=deepth)

            if status:
                unserchain.extend(newunserchain)
                return True

        return False
Esempio n. 59
0
def update_and_new_project_vendor(project_id,
                                  name,
                                  version,
                                  language,
                                  source=None,
                                  ext=None):
    hash = md5("{},{},{},{}".format(project_id, name, language, source))
    vendor = ProjectVendors.objects.filter(project_id=project_id,
                                           name=name,
                                           language=language).first()

    if vendor:
        # 兼容性处理,如果source未指定,先更新source进去
        if not vendor.source:
            vendor.version = version
            vendor.source = source
            vendor.ext = ext
            vendor.version = version
            vendor.hash = hash

            try:
                vendor.save()
            except IntegrityError:
                logger.warn("[Model Save] vendor model not changed")

    else:
        vendor = ProjectVendors.objects.filter(project_id=project_id,
                                               hash=hash).first()

    if vendor:
        if vendor.version != version and version != 'unknown':
            logger.debug("[Vendors] Component {} update to version {}".format(
                name, version))

            vendor.version = version
            try:
                vendor.save()
            except IntegrityError:
                logger.warn("[Model Save] vendor model not changed")

    else:
        v = ProjectVendors(project_id=project_id,
                           name=name,
                           version=version,
                           language=language,
                           ext=ext)
        v.save()

    return True
Esempio n. 60
0
def select(root, sub=None, invokes_alert=False):
    """ Clicks on a button by calling the :py:meth:`click_n_move` method.

    Args:
        root: The root button's name as a string.
        sub: The sub button's name as a string. (optional)
        invokes_alert: If ``True``, then the behaviour is little bit different. After the last
            click, no ajax wait and no move away is done to be able to operate the alert that
            appears after click afterwards. Defaults to ``False``.
    Returns: ``True`` if everything went smoothly
    Raises: :py:class:`cfme.exceptions.ToolbarOptionGreyed`
    """
    # wait for ajax on select to prevent pickup up a toolbar button in the middle of a page change
    sel.wait_for_ajax()
    if isinstance(root, dict):
        root = version.pick(root)
    if sub is not None and isinstance(sub, dict):
        sub = version.pick(sub)
    if not is_greyed(root):
        try:
            if sub is None and invokes_alert:
                # We arrived into a place where alert will pop up so no moving and no ajax
                sel.click(root_loc(root), wait_ajax=False)
            else:
                select_n_move(root_loc(root))
        except sel.NoSuchElementException:
            raise ToolbarOptionUnavailable(
                "Toolbar button '{}' was not found.".format(root))
        except sel.StaleElementReferenceException:
            logger.debug('Stale toolbar button "{}", relocating'.format(root))
            select(root, sub, invokes_alert)
    else:
        raise ToolbarOptionGreyed("Toolbar button {} is greyed!".format(root))
    if sub:
        sel.wait_for_ajax()
        if not is_greyed(root, sub):
            try:
                if invokes_alert:
                    # We arrived into a place where alert will pop up so no moving and no ajax
                    sel.click(sub_loc(sub), wait_ajax=False)
                else:
                    select_n_move(sub_loc(sub))
            except sel.NoSuchElementException:
                raise ToolbarOptionUnavailable(
                    "Toolbar button '{}/{}' was not found.".format(root, sub))
        else:
            raise ToolbarOptionGreyed("Toolbar option {}/{} is greyed!".format(
                root, sub))
    return True