def appliance_browser(get_appliance, provider_crud, vm_template_name, os, fs_type):
    '''Overrides env.conf and points a browser to the appliance IP passed to it.

    Once finished with the test, it checks if any tests need the appliance and delete it if not the
    appliance specified in conf/env.yaml.
    '''
    logger.info("Starting appliance browser fixture")
    global test_list

    test_list.remove([provider_crud.key, vm_template_name, os, fs_type])

    with get_appliance.ipapp.db.transaction:
        with get_appliance.browser_session() as browser:
            yield browser

    # cleanup provisioned appliance if not more tests for it
    if provider_crud.key is not main_provider:
        more_same_provider_tests = False
        for outstanding_test in test_list:
            if outstanding_test[0] == provider_crud.key:
                logger.info("More provider tests found")
                more_same_provider_tests = True
                break
        if not more_same_provider_tests:
            get_appliance.destroy()
Exemple #2
0
def setup_provider(provider_key, validate=True, check_existing=True):
    """Add the named provider to CFME

    Args:
        provider_key: Provider key name from cfme_data
        validate: Whether or not to block until the provider stats in CFME
            match the stats gleaned from the backend management system
            (default: ``True``)
        check_existing: Check if this provider already exists, skip if it does

    Returns:
        An instance of :py:class:`cfme.cloud.provider.Provider` or
        :py:class:`cfme.infrastructure.provider.Provider` for the named provider, as appropriate.

    """
    provider = get_crud(provider_key)
    if check_existing and provider.exists:
        # no need to create provider if the provider exists
        # pass so we don't skip the validate step
        pass
    else:
        logger.info('Setting up provider: %s', provider.key)
        provider.create(validate_credentials=True)

    if validate:
        provider.validate()

    return provider
    def cleanup_host():
        try:
            logger.info('Cleaning up host %s on provider %s' % (prov_host_name, provider_crud.key))
            mgmt_system = provider_crud.get_mgmt_system()
            host_list = mgmt_system.list_host()
            if host_provisioning['ip_addr'] in host_list:
                wait_for(mgmt_system.is_host_connected, [host_provisioning['ip_addr']])
                mgmt_system.remove_host_from_cluster(host_provisioning['ip_addr'])

            ipmi = test_host.get_ipmi()
            ipmi.power_off()

            # During host provisioning,the host name gets changed from what's specified at creation
            # time.If host provisioning succeeds,the original name is reverted to,otherwise the
            # changed names are retained upon failure
            renamed_host_name1 = "{} ({})".format('IPMI', host_provisioning['ipmi_address'])
            renamed_host_name2 = "{} ({})".format('VMware ESXi', host_provisioning['ip_addr'])

            host_list_ui = host.get_all_hosts()
            if host_provisioning['hostname'] in host_list_ui:
                test_host.delete(cancel=False)
                host.wait_for_host_delete(test_host)
            elif renamed_host_name1 in host_list_ui:
                host_renamed_obj1 = host.Host(name=renamed_host_name1)
                host_renamed_obj1.delete(cancel=False)
                host.wait_for_host_delete(host_renamed_obj1)
            elif renamed_host_name2 in host_list_ui:
                host_renamed_obj2 = host.Host(name=renamed_host_name2)
                host_renamed_obj2.delete(cancel=False)
                host.wait_for_host_delete(host_renamed_obj2)
        except:
            # The mgmt_sys classes raise Exception :\
            logger.warning('Failed to clean up host %s on provider %s' %
                           (prov_host_name, provider_crud.key))
Exemple #4
0
def groupModify_user(cur_user, user, form):
    global G_usermgr
    logger.info("handle request: user/groupModify/")
    G_lockmgr.acquire('__quotafile')
    result = G_usermgr.groupModify(newValue = form, cur_user = cur_user)
    G_lockmgr.release('__quotafile')
    return json.dumps(result)
    def _provisioner(template, provisioning_data, delayed=None):
        pytest.sel.force_navigate('infrastructure_provision_vms', context={
            'provider': provider,
            'template_name': template,
        })

        vm_name = provisioning_data["vm_name"]
        fill(provisioning_form, provisioning_data, action=provisioning_form.submit_button)
        flash.assert_no_errors()

        request.addfinalizer(lambda: cleanup_vm(vm_name, provider))
        if delayed is not None:
            total_seconds = (delayed - datetime.utcnow()).total_seconds()
            row_description = 'Provision from [{}] to [{}]'.format(template, vm_name)
            cells = {'Description': row_description}
            try:
                row, __ = wait_for(requests.wait_for_request, [cells],
                                   fail_func=requests.reload, num_sec=total_seconds, delay=5)
                pytest.fail("The provisioning was not postponed")
            except TimedOutError:
                pass
        logger.info('Waiting for vm %s to appear on provider %s', vm_name, provider.key)
        wait_for(provider.mgmt.does_vm_exist, [vm_name], handle_exception=True, num_sec=600)

        # nav to requests page happens on successful provision
        logger.info('Waiting for cfme provision request for vm %s', vm_name)
        row_description = 'Provision from [{}] to [{}]'.format(template, vm_name)
        cells = {'Description': row_description}
        row, __ = wait_for(requests.wait_for_request, [cells],
                           fail_func=requests.reload, num_sec=900, delay=20)
        assert row.last_message.text == 'Vm Provisioned Successfully'
        return VM.factory(vm_name, provider)
Exemple #6
0
 def close_console_window(self):
     """Attempt to close Console window at the end of test."""
     if self.console_handle is not None:
         self.switch_to_console()
         self.selenium.close()
         logger.info("Browser window/tab containing Console was closed.")
         self.switch_to_appliance()
def cleanup_vm(vm_name, provider_key, provider_mgmt):
    try:
        logger.info('Cleaning up VM %s on provider %s' % (vm_name, provider_key))
        provider_mgmt.delete_vm(vm_name + "_0001")
    except:
        # The mgmt_sys classes raise Exception :\
        logger.warning('Failed to clean up VM %s on provider %s' % (vm_name, provider_key))
Exemple #8
0
    def get_screen_text(self):
        """
        Return the text from a text console.

        Uses OCR to scrape the text from the console image taken at the time of the call.
        """
        image_str = self.get_screen()

        # Write the image string to a file as pytesseract requires
        # a file, and doesn't take a string.
        tmp_file = tempfile.NamedTemporaryFile(suffix='.jpeg')
        tmp_file.write(image_str)
        tmp_file.flush()
        tmp_file_name = tmp_file.name
        # Open Image file, resize it to high resolution, sharpen it for clearer text
        # and then run image_to_string operation which returns unicode that needs to
        # be converted to utf-8 which gives us text [typr(text) == 'str']
        # higher resolution allows tesseract to recognize text correctly
        text = (image_to_string(((Image.open(tmp_file_name)).resize((7680, 4320),
         Image.ANTIALIAS)).filter(ImageFilter.SHARPEN), lang='eng',
         config='--user-words eng.user-words')).encode('utf-8')
        tmp_file.close()

        logger.info('screen text:{}'.format(text))
        return text
Exemple #9
0
 def send_ctrl_alt_delete(self):
     """Press the ctrl-alt-delete button in the console tab."""
     self.switch_to_console()
     ctrl_alt_del_btn = self.provider.get_console_ctrl_alt_del_btn()
     logger.info("Sending following Keys to Console CTRL+ALT+DEL")
     ctrl_alt_del_btn.click()
     self.switch_to_appliance()
def test_retire_stack(provider, provisioning, create_template, catalog, request):
    """Tests stack provisioning

    Metadata:
        test_flag: provision
    """
    set_default_view("Stacks", "Grid View")
    dialog_name, template = create_template
    item_name = fauxfactory.gen_alphanumeric()
    catalog_item = CatalogItem(item_type="Orchestration", name=item_name,
                  description="my catalog", display_in=True, catalog=catalog.name,
                  dialog=dialog_name, orch_template=template.template_name)
    catalog_item.create()
    stack_data = prepare_stack_data(provider, provisioning)
    service_catalogs = ServiceCatalogs("service_name", stack_data)
    service_catalogs.order_stack_item(catalog.name, catalog_item)
    logger.info('Waiting for cfme provision request for service %s', item_name)
    row_description = item_name
    cells = {'Description': row_description}
    row, __ = wait_for(requests.wait_for_request, [cells, True],
                       fail_func=requests.reload, num_sec=2500, delay=20)
    assert row.last_message.text == 'Service Provisioned Successfully'
    stack = Stack(stack_data['stack_name'])
    stack.retire_stack()

    @request.addfinalizer
    def _cleanup_templates():
        template.delete_all_templates()
        stack_data['vm_name'].delete_from_provider()
Exemple #11
0
def cleanup_vm(vm_name, provider):
    try:
        logger.info('Cleaning up VM %s on provider %s', vm_name, provider.key)
        provider.mgmt.delete_vm(vm_name)
    except:
        # The mgmt_sys classes raise Exception :\
        logger.warning('Failed to clean up VM %s on provider %s', vm_name, provider.key)
Exemple #12
0
def manage_folder(group, folder=None):
    """Context manager to use when modifying the folder contents.

    Args:
        group: User group.
        folder: Which folder to manage. If None, top-level will be managed.
    Returns: Context-managed :py:class:`cfme.intelligence.reports.ui_elements.FolderManager` inst.
    """
    sel.force_navigate("report_menus_group", context={"group": group})
    if folder is None:
        reports_tree.click_path("Top Level")
    else:
        reports_tree.click_path("Top Level", folder)
    try:
        yield manager
    except FolderManager._BailOut:
        logger.info("Discarding editation modifications on {}".format(str(repr(manager))))
        manager.discard()
    except:
        # In case of any exception, nothing will be saved
        manager.discard()
        raise  # And reraise the exception
    else:
        # If no exception happens, save!
        manager.commit()
        form_buttons.save()
def test_reconfigure_service(provider, provisioning, create_template, catalog, request):
    """Tests stack provisioning

    Metadata:
        test_flag: provision
    """
    dialog_name, template = create_template
    item_name = fauxfactory.gen_alphanumeric()
    catalog_item = CatalogItem(item_type="Orchestration", name=item_name,
                  description="my catalog", display_in=True, catalog=catalog.name,
                  dialog=dialog_name, orch_template=template.template_name)
    catalog_item.create()
    stack_data = prepare_stack_data(provider, provisioning)

    @request.addfinalizer
    def _cleanup_vms():
        if provider.mgmt.stack_exist(stack_data['stack_name']):
            wait_for(lambda: provider.mgmt.delete_stack(stack_data['stack_name']),
             delay=10, num_sec=800, message="wait for stack delete")
        template.delete_all_templates()
        stack_data['vm_name'].delete_from_provider()

    service_catalogs = ServiceCatalogs("service_name", stack_data)
    service_catalogs.order_stack_item(catalog.name, catalog_item)
    logger.info('Waiting for cfme provision request for service %s', item_name)
    row_description = item_name
    cells = {'Description': row_description}
    row, __ = wait_for(requests.wait_for_request, [cells, True],
                       fail_func=requests.reload, num_sec=2000, delay=20)
    assert row.last_message.text == 'Service Provisioned Successfully'
    myservice = MyService(catalog_item.name)
    myservice.reconfigure_service()
def provider_init(provider_key):
    """cfme/infrastructure/provider.py provider object."""
    try:
        setup_provider(provider_key)
    except Exception as e:
        logger.info("Exception detected on provider setup: " + str(e))
        pytest.skip("It's not possible to set up this provider, therefore skipping")
Exemple #15
0
    def _do_stats_match(self, client, stats_to_match=None):
        """ A private function to match a set of statistics, with a Provider.

        This function checks if the list of stats match, if not, the page is refreshed.

        Note: Provider mgmt_system uses the same key names as this Provider class to avoid
            having to map keyname/attributes e.g. ``num_template``, ``num_vm``.

        Args:
            client: A provider mgmt_system instance.
            stats_to_match: A list of key/attribute names to match.

        Raises:
            KeyError: If the host stats does not contain the specified key.
            ProviderHasNoProperty: If the provider does not have the property defined.
        """
        host_stats = client.stats(*stats_to_match)

        for stat in stats_to_match:
            try:
                cfme_stat = getattr(self, stat)
                logger.info(' Matching stat [%s], Host(%s), CFME(%s)' %
                    (stat, host_stats[stat], cfme_stat))
                if host_stats[stat] != cfme_stat:
                    return False
            except KeyError:
                raise HostStatsNotContains("Host stats information does not contain '%s'" % stat)
            except AttributeError:
                raise ProviderHasNoProperty("Provider does not know how to get '%s'" % stat)
        else:
            return True
def _test_vm_power_on():
    """Ensures power button is shown for a VM"""
    logger.info("Checking for power button")
    vm_name = virtual_machines.get_first_vm_title()
    logger.debug("VM " + vm_name + " selected")
    if not virtual_machines.is_pwr_option_visible(vm_name, option=virtual_machines.Vm.POWER_ON):
        raise OptionNotAvailable("Power button does not exist")
Exemple #17
0
    def _filter_required_flags(self, provider):
        """ Filters by required yaml flags """
        if self.required_flags is None:
            return None
        if self.required_flags:
            test_flags = [flag.strip() for flag in self.required_flags]

            defined_flags = conf.cfme_data.get('test_flags', '')
            if isinstance(defined_flags, six.string_types):
                defined_flags = defined_flags.split(',')
            defined_flags = [flag.strip() for flag in defined_flags]

            excluded_flags = provider.data.get('excluded_test_flags', '')
            if isinstance(excluded_flags, six.string_types):
                excluded_flags = excluded_flags.split(',')
            excluded_flags = [flag.strip() for flag in excluded_flags]

            allowed_flags = set(defined_flags) - set(excluded_flags)

            if set(test_flags) - allowed_flags:
                logger.info("Filtering Provider %s out because it does not have the right flags, "
                            "%s does not contain %s",
                            provider.name, list(allowed_flags),
                            list(set(test_flags) - allowed_flags))
                return False
        return True
def test_soft_reboot(setup_provider_funcscope, provider, testing_instance, soft_assert,
                     verify_vm_running):
    """ Tests instance soft reboot

    Metadata:
        test_flag: power_control, provision
    """
    testing_instance.wait_for_instance_state_change(desired_state=testing_instance.STATE_ON)
    state_change_time = testing_instance.get_detail(properties=('Power Management',
                                                                'State Changed On'))
    testing_instance.power_control_from_cfme(option=testing_instance.SOFT_REBOOT)
    flash.assert_message_contain('Restart Guest initiated')
    wait_for_state_change_time_refresh(testing_instance, provider, state_change_time, timeout=720)
    if provider.type == 'gce' \
            and testing_instance.get_detail(properties=('Power Management', 'Power State')) \
            == testing_instance.STATE_UNKNOWN:
        """Wait for one more state change as transitional state also
        changes "State Changed On" time on GCE provider
        """
        logger.info("Instance is still in \"{}\" state. please wait before CFME will show correct "
                    "state".format(testing_instance.get_detail(properties=('Power Management',
                                                                           'Power State'))))
        state_change_time = testing_instance.get_detail(properties=('Power Management',
                                                                    'State Changed On'))
        wait_for_state_change_time_refresh(testing_instance, provider, state_change_time,
                                           timeout=720)

    testing_instance.wait_for_instance_state_change(desired_state=testing_instance.STATE_ON)
    soft_assert(
        provider.mgmt.is_vm_running(testing_instance.name),
        "instance is not running")
Exemple #19
0
def _get_tasks(tab_destination, **filter_kwargs):
    """ Generic function to return contents of the tasks table

    Args:
        location: Location for :py:module:`ui_navigate` where to get the data.
        **filter_kwargs: See :py:meth:`_filter`
    Returns: List of dicts.
    """
    navigate_to(Tasks, tab_destination)
    if any([filter_kwargs[key] is not None for key in filter_kwargs.keys()]):
        _filter(**filter_kwargs)
    tasks = []

    if sel.is_displayed(tasks_table):
        for page in paginator.pages():
            for row in tasks_table.rows():
                tasks.append(
                    dict(
                        updated=parsetime.from_american_with_utc(
                            row.updated.text.encode('utf-8').strip()
                        ),
                        started=parsetime.from_american_with_utc(
                            row.started.text.encode('utf-8').strip()
                        ),
                        state=row.state.text.encode('utf-8').strip(),
                        message=row.message.text.encode('utf-8').strip(),
                        task_name=row.task_name.text.encode('utf-8').strip(),
                        user=row.user.text.encode('utf-8').strip()
                    )
                )
    else:
        logger.info('No Tasks collected on {}'.format(tab_destination))
    return tasks
def test_ec2_catalog_item(provider_init, provider_key, provider_mgmt, provider_crud,
                          provider_type, provisioning, dialog, catalog, request):
    # tries to delete the VM that gets created here
    vm_name = 'test_ec2_servicecatalog-%s' % generate_random_string()
    image = provisioning['image']['name']
    item_name = "ec2_" + generate_random_string()

    ec2_catalog_item = ec2.Instance(
        item_type="Amazon",
        name=item_name,
        description="my catalog",
        display_in=True,
        catalog=catalog.name,
        dialog=dialog,
        catalog_name=image,
        vm_name=vm_name,
        instance_type=provisioning['instance_type'],
        availability_zone=provisioning['availability_zone'],
        security_groups=[provisioning['security_group']],
        provider_mgmt=provider_mgmt,
        provider=provider_crud.name,
        guest_keypair="shared")

    ec2_catalog_item.create()
    service_catalogs = ServiceCatalogs("service_name")
    service_catalogs.order(catalog.name, ec2_catalog_item)
    flash.assert_no_errors()
    logger.info('Waiting for cfme provision request for service %s' % item_name)
    row_description = 'Provisioning [%s] for Service [%s]' % (item_name, item_name)
    cells = {'Description': row_description}
    request.addfinalizer(lambda: cleanup_vm(vm_name, provider_key, provider_mgmt))
    row, __ = wait_for(requests.wait_for_request, [cells],
        fail_func=requests.reload, num_sec=600, delay=20)
    assert row.last_message.text == 'Request complete'
def test_idle_default(request):
    """Runs an appliance at idle for specific amount of time. Memory Monitor creates graphs and
    summary at the end of the scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()

    clean_appliance(ssh_client)

    monitor_thread = SmemMemoryMonitor(SSHClient(), 'workload-idle', 'default',
        'Idle with Default Roles', get_server_roles_workload_idle_default(separator=', '),
        'No Providers')

    def cleanup_workload(from_ts):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_url = get_default_dashboard_url(from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_url = g_url
        monitor_thread.signal = False
        monitor_thread.join()
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(from_ts))

    monitor_thread.start()

    wait_for_miq_server_ready(poll_interval=2)
    # No need to set server roles as we are using the default set of roles

    s_time = cfme_performance['workloads']['test_idle_default']['total_time']
    logger.info('Idling appliance for {}s'.format(s_time))
    time.sleep(s_time)

    logger.info('Test Ending...')
Exemple #22
0
def wait_for_alert(smtp, alert, delay=None, additional_checks=None):
    """DRY waiting function

    Args:
        smtp: smtp_test funcarg
        alert: Alert name
        delay: Optional delay to pass to wait_for
        additional_checks: Additional checks to perform on the mails. Keys are names of the mail
            sections, values the values to look for.
    """
    logger.info("Waiting for informative e-mail of alert %s to come", alert.description)
    additional_checks = additional_checks or {}

    def _mail_arrived():
        for mail in smtp.get_emails():
            if "Alert Triggered: {}".format(alert.description) in mail["subject"]:
                if not additional_checks:
                    return True
                else:
                    for key, value in additional_checks.iteritems():
                        if value in mail.get(key, ""):
                            return True
        return False
    wait_for(
        _mail_arrived,
        num_sec=delay,
        delay=5,
        message="wait for e-mail to come!"
    )
Exemple #23
0
def chlxcsetting_user(cur_user, user, form):
    global G_usermgr
    logger.info("handle request: user/chlxcsetting/")
    G_lockmgr.acquire('__lxcsetting')
    result = G_usermgr.chlxcsetting(cur_user = cur_user, form = form)
    G_lockmgr.release('__lxcsetting')
    return json.dumps(result)
Exemple #24
0
def usageRelease_user(cur_user, user, form):
    global G_usermgr
    logger.info("handle request: user/usageInc/")
    G_lockmgr.acquire('__usage_'+str(user))
    result = G_usermgr.usageRelease(cur_user = cur_user, cpu = form.get('cpu'), memory = form.get('memory'), disk = form.get('disk'))
    G_lockmgr.release('__usage_'+str(user))
    return json.dumps(result)
Exemple #25
0
def usageRecover_user(cur_user, user, form):
    global G_usermgr
    logger.info("handle request: user/usageInc/")
    G_lockmgr.acquire('__usage_'+str(user))
    result = G_usermgr.usageRecover(cur_user = cur_user, modification = json.loads(form.get('setting')))
    G_lockmgr.release('__usage_'+str(user))
    return json.dumps(result)
Exemple #26
0
def quotaadd_user(cur_user, user, form):
    global G_usermgr
    logger.info("handle request: user/quotaadd/")
    G_lockmgr.acquire('__quotafile')
    result = G_usermgr.quotaadd(form = form, cur_user = cur_user)
    G_lockmgr.release('__quotafile')
    return json.dumps(result)
Exemple #27
0
def groupdel_user(cur_user, user, form):
    global G_usermgr
    logger.info("handle request: user/groupdel/")
    G_lockmgr.acquire('__quotafile')
    result = G_usermgr.groupdel(name = form.get('name', None), cur_user = cur_user)
    G_lockmgr.release('__quotafile')
    return json.dumps(result)
Exemple #28
0
def delete_notification(cur_user, user, form):
    global G_notificationmgr
    logger.info("handle request: notification/delete/")
    G_lockmgr.acquire('__notification')
    result = G_notificationmgr.delete_notification(cur_user=cur_user, form=form)
    G_lockmgr.release('__notification')
    return json.dumps(result)
Exemple #29
0
def chdefault(cur_user, user, form):
    global G_usermgr
    logger.info("handle request: user/chdefault/")
    G_lockmgr.acquire('__quotafile')
    result = G_usermgr.change_default_group(form = form, cur_user = cur_user)
    G_lockmgr.release('__quotafile')
    return json.dumps(result)
 def _custom_click_handler(self, wait_ajax):
     """Handler called from pytest_selenium"""
     if self.is_dimmed and not self._force:
         logger.info("Not clicking %s because it is dimmed", repr(self))
         return
     sel.wait_for_element(self, timeout=5)
     return sel.click(self, no_custom_handler=True, wait_ajax=wait_ajax)
    def test_admin_delete_user_with_projects_confirm(self):
        driver = self.driver

        # create a new user
        create_new_user_button = driver.find_element_by_id(
            "create_new_user_button")
        create_new_user_button.click()

        create_first_name = driver.find_element_by_id("create_first_name")
        create_last_name = driver.find_element_by_id("create_last_name")
        create_emal = driver.find_element_by_id("create_email")

        create_first_name.send_keys(self.first_name)
        create_last_name.send_keys(self.last_name)
        create_emal.send_keys(self.email)

        confirm_create_new_user_button = driver.find_element_by_id(
            "confirm_create_new_user_button")
        confirm_create_new_user_button.click()
        logger.info("create a user for deleting")
        time.sleep(1)
        driver.find_element_by_id("logout").click()
        time.sleep(1)

        # create a project with this user
        driver.find_element_by_link_text("User").click()
        time.sleep(1)
        driver.find_element(
            "xpath",
            "//div[@id='login-tabpane-user']/form/div/input").send_keys(
                self.email)
        driver.find_element(
            "xpath", "//div[@id='login-tabpane-user']/form/button").click()
        time.sleep(1)
        driver.find_element_by_id("create_project_button").click()
        project_name = random_project_name()
        driver.find_element_by_id("create_project_name").send_keys(
            self.project_name)
        driver.find_element_by_id("confirm_create_project_button").click()
        logger.info("create a project for the user")
        time.sleep(1)
        driver.find_element_by_id("logout").click()
        time.sleep(1)

        # delete the created user
        driver.find_element(
            "xpath",
            "//div[@id='login-tabpane-admin']/form/div/input").send_keys(
                "admin")
        driver.find_element(
            "xpath", "//div[@id='login-tabpane-admin']/form/button").click()
        time.sleep(1)

        search_user(self)
        delete_button = driver.find_element(
            "xpath",
            "//tr[td[contains(text(), '{}')]]//i[contains(@class, 'pe-7s-trash')]"
            .format(self.email))
        delete_button.click()
        logger.info("click the delete button")
        time.sleep(1)

        # Check delete modal pops up
        is_delete_modal = self.selenium.is_element_exist("id", "delete_modal")
        self.assertTrue(is_delete_modal)
        logger.info("delete modal pops up")

        # click confirm
        confirm_delete_button = driver.find_element_by_id("confirm_delete")
        confirm_delete_button.click()
        logger.info("click the confirm button")
        time.sleep(1)

        # Check delete success
        is_email = self.selenium.is_element_exist(
            "xpath", "//tr[td[contains(text(), '{}')]]".format(self.email))
        self.assertFalse(is_email)
        logger.info("delete the user successfully")
Exemple #32
0
"""
调用模版,暂时放在这儿,会清理
"""


import time

from utils.api import createJSONRPCRequestObject, postJSONRPCRequestObject, w3
from constants import *
from utils.log import logger

requestObject, requestId = createJSONRPCRequestObject('eth_getTransactionCount', [user, 'latest'], requestId)
responseObject = postJSONRPCRequestObject(URL, requestObject)
logger.info(responseObject)
myNonce = w3.toInt(hexstr=responseObject['result'])
logger.info('nonce of address {} is {}'.format(user, myNonce))

# prepare the data field of the transaction
# function selector and argument encoding
# https://solidity.readthedocs.io/en/develop/abi-spec.html#function-selector-and-argument-encoding
value1, value2 = 10, 32  # random numbers here
function = 'add(uint256,uint256)'  # from smart contract
methodId = w3.sha3(text=function)[0:4].hex()
param1 = value1.to_bytes(32, byteorder='big').hex()
param2 = value2.to_bytes(32, byteorder='big').hex()
data = '0x' + methodId + param1 + param2
contractAddress = state_addr
transaction_dict = {'from': user,
                    'to': contractAddress,
                    'chainId': CHAINID,
def test_provisioning(request, scenario):
    """Runs through provisioning scenarios using the REST API to
    continously provision a VM for a specified period of time.
    Memory Monitor creates graphs and summary at the end of each scenario."""

    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    logger.debug('Scenario: {}'.format(scenario['name']))

    clean_appliance(ssh_client)

    quantifiers = {}
    scenario_data = {
        'appliance_ip': cfme_performance['appliance']['ip_address'],
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-provisioning',
        'test_name': 'Provisioning',
        'appliance_roles':
        get_server_roles_workload_provisioning(separator=', '),
        'scenario': scenario
    }
    monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data)

    provision_order = []

    def cleanup_workload(scenario, from_ts, vms_to_cleanup, quantifiers,
                         scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        set_server_roles_workload_provisioning_cleanup(ssh_client)
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        final_vm_size = len(vms_to_cleanup)
        delete_provisioned_vms(vms_to_cleanup)
        monitor_thread.join()
        logger.info(
            '{} VMs were left over, and {} VMs were deleted in the finalizer.'.
            format(final_vm_size, final_vm_size - len(vms_to_cleanup)))
        logger.info(
            'The following VMs were left over after the test: {}'.format(
                vms_to_cleanup))
        quantifiers['VMs_To_Delete_In_Finalizer'] = final_vm_size
        quantifiers['VMs_Deleted_In_Finalizer'] = final_vm_size - len(
            vms_to_cleanup)
        quantifiers['Leftover_VMs'] = vms_to_cleanup
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(
        scenario, from_ts, provision_order, quantifiers, scenario_data))

    monitor_thread.start()

    wait_for_miq_server_workers_started(poll_interval=2)
    set_server_roles_workload_provisioning(ssh_client)
    add_providers(scenario['providers'])
    logger.info('Sleeping for Refresh: {}s'.format(
        scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    guid_list = get_template_guids(scenario['templates'])
    guid_cycle = cycle(guid_list)
    cleanup_size = scenario['cleanup_size']
    number_of_vms = scenario['number_of_vms']
    total_time = scenario['total_time']
    time_between_provision = scenario['time_between_provision']
    total_provisioned_vms = 0
    total_deleted_vms = 0
    provisioned_vms = 0
    starttime = time.time()

    while ((time.time() - starttime) < total_time):
        start_iteration_time = time.time()
        provision_list = []
        for i in range(number_of_vms):
            total_provisioned_vms += 1
            provisioned_vms += 1
            vm_to_provision = '{}-provision-{}'.format(
                test_ts,
                str(total_provisioned_vms).zfill(4))
            guid_to_provision, provider_name = next(guid_cycle)
            provider_to_provision = cfme_performance['providers'][
                provider_name]
            provision_order.append((vm_to_provision, provider_name))
            provision_list.append((vm_to_provision, guid_to_provision,
                                   provider_to_provision['vlan_network']))

        provision_vm(provision_list)
        creation_time = time.time()
        provision_time = round(creation_time - start_iteration_time, 2)
        logger.debug(
            'Time to initiate provisioning: {}'.format(provision_time))
        logger.info('{} VMs provisioned so far'.format(total_provisioned_vms))

        if provisioned_vms > cleanup_size * len(scenario['providers']):
            start_remove_time = time.time()
            if delete_provisioned_vm(provision_order[0]):
                provision_order.pop(0)
                provisioned_vms -= 1
                total_deleted_vms += 1
            deletion_time = round(time.time() - start_remove_time, 2)
            logger.debug('Time to initate deleting: {}'.format(deletion_time))
            logger.info('{} VMs deleted so far'.format(total_deleted_vms))

        end_iteration_time = time.time()
        iteration_time = round(end_iteration_time - start_iteration_time, 2)
        elapsed_time = end_iteration_time - starttime
        logger.debug('Time to initiate provisioning and deletion: {}'.format(
            iteration_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))

        if iteration_time < time_between_provision:
            wait_diff = time_between_provision - iteration_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0
                    and time_remaining < time_between_provision):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
            else:
                logger.warn(
                    'Time to initiate provisioning ({}) exceeded time between '
                    '({})'.format(iteration_time, time_between_provision))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Provisionings'] = total_provisioned_vms
    quantifiers['Deleted_VMs'] = total_deleted_vms
    logger.info(
        'Provisioned {} VMs and deleted {} VMs during the scenario.'.format(
            total_provisioned_vms, total_deleted_vms))
    logger.info('Test Ending...')
 def end_probe(self):
     if len(self._result_list) > 0:
         logger.info(self)
     else:
         pass
Exemple #35
0
def main():
    try:
        # arg parse
        t1 = time.time()
        parser = argparse.ArgumentParser(
            prog=__title__,
            description=__introduction__.format(detail="Main Program"),
            epilog=__epilog__,
            formatter_class=argparse.RawDescriptionHelpFormatter,
            usage=argparse.SUPPRESS)

        subparsers = parser.add_subparsers()

        parser_group_init = subparsers.add_parser(
            'init', help='Kunlun-M init before use.')
        parser_group_init.add_argument('-init',
                                       action='store_true',
                                       default=False)

        parser_group_core = subparsers.add_parser(
            'config',
            help='config for rule&tamper',
            description=__introduction__.format(
                detail='config for rule&tamper'),
            formatter_class=argparse.RawDescriptionHelpFormatter,
            usage=argparse.SUPPRESS,
            add_help=True)
        parser_group_core.add_argument(
            'load',
            choices=['load', 'recover', 'loadtamper', 'retamper'],
            default=False,
            help='operate for rule&tamper')

        parser_group_scan = subparsers.add_parser(
            'scan',
            help='scan target path',
            description=__introduction__.format(detail='scan target path'),
            epilog=__scan_epilog__,
            formatter_class=argparse.RawDescriptionHelpFormatter,
            add_help=True)
        parser_group_scan.add_argument('-t',
                                       '--target',
                                       dest='target',
                                       action='store',
                                       default='',
                                       metavar='<target>',
                                       help='file, folder')
        parser_group_scan.add_argument(
            '-f',
            '--format',
            dest='format',
            action='store',
            default='csv',
            metavar='<format>',
            choices=['html', 'json', 'csv', 'xml'],
            help='vulnerability output format (formats: %(choices)s)')
        parser_group_scan.add_argument(
            '-o',
            '--output',
            dest='output',
            action='store',
            default='',
            metavar='<output>',
            help='vulnerability output STREAM, FILE')
        parser_group_scan.add_argument('-r',
                                       '--rule',
                                       dest='special_rules',
                                       action='store',
                                       default=None,
                                       metavar='<rule_id>',
                                       help='specifies rules e.g: 1000, 1001')
        parser_group_scan.add_argument(
            '-tp',
            '--tamper',
            dest='tamper_name',
            action='store',
            default=None,
            metavar='<tamper_name>',
            help='tamper repair function e.g: wordpress')
        parser_group_scan.add_argument('-l',
                                       '--log',
                                       dest='log',
                                       action='store',
                                       default=None,
                                       metavar='<log>',
                                       help='log name')
        parser_group_scan.add_argument('-lan',
                                       '--language',
                                       dest='language',
                                       action='store',
                                       default=None,
                                       help='set target language')
        parser_group_scan.add_argument('-b',
                                       '--blackpath',
                                       dest='black_path',
                                       action='store',
                                       default=None,
                                       help='black path list')

        parser_group_scan.add_argument('-d',
                                       '--debug',
                                       dest='debug',
                                       action='store_true',
                                       default=False,
                                       help='open debug mode')

        parser_group_scan.add_argument('-uc',
                                       '--unconfirm',
                                       dest='unconfirm',
                                       action='store_false',
                                       default=False,
                                       help='show unconfirmed vuls')
        parser_group_scan.add_argument('-upc',
                                       '--unprecom',
                                       dest='unprecom',
                                       action='store_false',
                                       default=False,
                                       help='without Precompiled')

        parser_group_show = subparsers.add_parser(
            'show',
            help='show rule&tamper',
            description=__introduction__.format(detail='show rule&tamper'),
            formatter_class=argparse.RawDescriptionHelpFormatter,
            usage=argparse.SUPPRESS,
            add_help=True)

        parser_group_show.add_argument('list',
                                       choices=['rule', "tamper"],
                                       action='store',
                                       default=None,
                                       help='show all rules & tanmpers')

        parser_group_show.add_argument(
            '-k',
            '--key',
            dest='listkey',
            action='store',
            default="all",
            help='key for show rule & tamper. eg: 1001/wordpress')

        parser_group_console = subparsers.add_parser(
            'console',
            help='enter console mode',
            description=__introduction__.format(detail='enter console mode'),
            formatter_class=argparse.RawDescriptionHelpFormatter,
            usage=argparse.SUPPRESS,
            add_help=True)
        parser_group_console.add_argument('console',
                                          action='store_true',
                                          default=True,
                                          help='enter console mode')

        # 加载插件参数列表以及帮助

        parser_group_plugin = subparsers.add_parser(
            'plugin',
            help=plugins.PLUGIN_DESCS,
            description=__introduction__.format(detail=plugins.PLUGIN_DESCS),
            formatter_class=argparse.RawDescriptionHelpFormatter,
            usage=argparse.SUPPRESS,
            add_help=True)
        parser_group_plugin.add_argument('plugin_name',
                                         choices=plugins.PLUGIN_LIST,
                                         default=False,
                                         help='enter plugin name')

        # web

        parser_group_web = subparsers.add_parser(
            'web',
            help='KunLun-m Web mode',
            description=__introduction__.format(detail='KunLun-m Web mode'),
            formatter_class=argparse.RawDescriptionHelpFormatter,
            usage=argparse.SUPPRESS,
            add_help=True)

        parser_group_web.add_argument('-p',
                                      '--port',
                                      dest='port',
                                      action='store',
                                      default='9999',
                                      help='port for web')

        # args = parser.parse_args()
        args = parser.parse_known_args()[0]

        # log
        log(logging.INFO)

        # 插件需要提前声明
        if hasattr(args, "plugin_name") and args.plugin_name:
            logger.info('[INIT] Load Plugin {}.'.format(args.plugin_name))
            plugins.PLUGIN_DICT[args.plugin_name](parser, parser_group_plugin)
            exit()

        # 其余需要验证
        args = parser.parse_args()

        if hasattr(args, "debug") and args.debug:
            logger.setLevel(logging.DEBUG)
            logger.debug('[INIT] set logging level: debug')

        if hasattr(args, "init"):
            logger.info('Init Database for KunLun-M.')
            call_command('makemigrations')
            call_command('migrate')
            logger.info('Init Database Finished.')
            exit()

        if hasattr(args, "port"):
            logger.info('Start KunLun-M Web in Port: {}'.format(args.port))
            call_command('runserver', args.port)

        if hasattr(args, "load"):
            if args.load == "load":
                logger.info("[INIT] RuleCheck start.")
                RuleCheck().load()

                logger.info("[INIT] RuleCheck finished.")
                exit()

            elif args.load == "recover":
                logger.info("[INIT] RuleRecover start.")
                RuleCheck().recover()

                logger.info("[INIT] RuleRecover finished.")
                exit()

            elif args.load == "loadtamper":
                logger.info("[INIT] TamperCheck start.")
                TamperCheck().load()

                logger.info("[INIT] TamperCheck finished.")
                exit()

            elif args.load == "retamper":
                logger.info("[INIT] TamperRecover start.")
                TamperCheck().recover()

                logger.info("[INIT] TamperRecover finished.")
                exit()

            else:
                parser_group_core.print_help()
                exit()

        if hasattr(args, "list"):
            if args.list:
                logger.info("Show {}:\n{}".format(
                    args.list, show_info(args.list, args.listkey.strip(""))))
                exit()
            else:
                parser_group_show.print_help()
                exit()

        if hasattr(args, "console"):
            # check rule and tamper
            logger.info("[INIT] RuleCheck start.")
            RuleCheck().load()

            logger.info("[INIT] RuleCheck finished.")

            logger.info("[INIT] TamperCheck start.")
            TamperCheck().load()

            logger.info("[INIT] TamperCheck finished.")

            logger.info("[INIT] Enter KunLun-M console mode.")
            shell = KunlunInterpreter()
            shell.start()
            exit()

        if not hasattr(args, "target") or args.target == '':
            parser.print_help()
            exit()

        logger.debug('[INIT] start Scan Task...')

        # new scan task
        task_name = get_mainstr_from_filename(args.target)
        s = cli.check_scantask(task_name=task_name,
                               target_path=args.target,
                               parameter_config=sys.argv)

        if s.is_finished:
            logger.info("[INIT] Finished Task.")
            exit()

        # 标识任务id
        sid = str(s.id)
        get_scan_id()

        if hasattr(args, "log") and args.log:
            logger.info("[INIT] New Log file {}.log .".format(args.log))
            log_add(logging.INFO, args.log)
        else:
            logger.info("[INIT] New Log file ScanTask_{}.log .".format(sid))
            log_add(logging.INFO, "ScanTask_{}".format(sid))

        if hasattr(args, "debug") and args.debug:
            logger.setLevel(logging.DEBUG)
            logger.debug('[INIT] set logging level: debug')

        data = {'status': 'running', 'report': ''}
        Running(sid).status(data)

        cli.start(args.target, args.format, args.output, args.special_rules,
                  sid, args.language, args.tamper_name, args.black_path,
                  args.unconfirm, args.unprecom)

        s.is_finished = True
        s.save()
        t2 = time.time()
        logger.info('[INIT] Done! Consume Time:{ct}s'.format(ct=t2 - t1))

    except KeyboardInterrupt:
        logger.warning("[KunLun-M] Stop KunLun-M.")
        sys.exit(0)

    except Exception as e:
        exc_msg = traceback.format_exc()
        logger.warning(exc_msg)
Exemple #36
0
 def switch_to_console(self):
     """Switch focus to console tab/window."""
     logger.info("Switching to console: window handle = {}".format(
         self.console_handle))
     self.selenium.switch_to_window(self.console_handle)
Exemple #37
0
 def switch_to_appliance(self):
     """Switch focus to appliance tab/window."""
     logger.info("Switching to appliance: window handle = {}".format(
         self.appliance_handle))
     self.selenium.switch_to_window(self.appliance_handle)
Exemple #38
0
    def execute_task(self, username, taskid, instanceid, envs, lxcname,
                     pkgpath, command, timeout, outpath, ip, token,
                     mount_info):
        lxcfspath = "/var/lib/lxc/" + lxcname + "/rootfs/"
        scriptname = "batch_job.sh"
        try:
            scriptfile = open(lxcfspath + "root/" + scriptname, "w")
            scriptfile.write("#!/bin/bash\n")
            scriptfile.write("cd " + str(pkgpath) + "\n")
            scriptfile.write(command)
            scriptfile.close()
        except Exception as err:
            logger.error(traceback.format_exc())
            logger.error(
                "Fail to write script file with taskid(%s) instanceid(%s)" %
                (str(taskid), str(instanceid)))
        else:
            try:
                job_id = taskid.split('_')[1]
            except Exception as e:
                logger.error(traceback.format_exc())
                job_id = "_none"
            jobdir = "batch_" + job_id
            logdir = "%s/global/users/%s/data/" % (self.fspath,
                                                   username) + jobdir
            if not os.path.exists(logdir):
                logger.info("Directory:%s not exists, create it." % logdir)
                os.mkdir(logdir)
            stdoutname = str(taskid) + "_" + str(instanceid) + "_stdout.txt"
            stderrname = str(taskid) + "_" + str(instanceid) + "_stderr.txt"
            try:
                stdoutfile = open(logdir + "/" + stdoutname, "w")
                stderrfile = open(logdir + "/" + stderrname, "w")
                logger.info("Create stdout(%s) and stderr(%s) file to log" %
                            (stdoutname, stderrname))
            except Exception as e:
                logger.error(traceback.format_exc())
                stdoutfile = None
                stderrfile = None

            cmd = "lxc-attach -n " + lxcname
            for envkey, envval in envs.items():
                cmd = cmd + " -v %s=%s" % (envkey, envval)
            cmd = cmd + " -- /bin/bash \"" + "/root/" + scriptname + "\""
            logger.info('run task with command - %s' % cmd)
            p = subprocess.Popen(cmd,
                                 stdout=stdoutfile,
                                 stderr=stderrfile,
                                 shell=True)
            #logger.info(p)
            if timeout == 0:
                to = MAX_RUNNING_TIME
            else:
                to = timeout
            while p.poll() is None and to > 0:
                time.sleep(min(2, to))
                to -= 2
            if p.poll() is None:
                p.kill()
                logger.info(
                    "Running time(%d) is out. Task(%s-%s-%s) will be killed." %
                    (timeout, str(taskid), str(instanceid), token))
                self.add_msg(taskid, username, instanceid, rpc_pb2.TIMEOUT,
                             token, "Running time is out.")
            else:
                [success1,
                 msg1] = self.write_output(lxcname, jobdir + "/" + stdoutname,
                                           outpath[0])
                [success2,
                 msg2] = self.write_output(lxcname, jobdir + "/" + stderrname,
                                           outpath[1])
                if not success1 or not success2:
                    if not success1:
                        msg = msg1
                    else:
                        msg = msg2
                    logger.info("Output error on Task(%s-%s-%s)." %
                                (str(taskid), str(instanceid), token))
                    self.add_msg(taskid, username, instanceid,
                                 rpc_pb2.OUTPUTERROR, token, msg)
                else:
                    if p.poll() == 0:
                        logger.info("Task(%s-%s-%s) completed." %
                                    (str(taskid), str(instanceid), token))
                        self.add_msg(taskid, username, instanceid,
                                     rpc_pb2.COMPLETED, token, "")
                    else:
                        logger.info("Task(%s-%s-%s) failed." %
                                    (str(taskid), str(instanceid), token))
                        self.add_msg(taskid, username, instanceid,
                                     rpc_pb2.FAILED, token, "")

        container = lxc.Container(lxcname)
        if container.stop():
            logger.info("stop container %s success" % lxcname)
        else:
            logger.error("stop container %s failed" % lxcname)

        logger.info("deleting container:%s" % lxcname)
        if self.imgmgr.deleteFS(lxcname):
            logger.info("delete container %s success" % lxcname)
        else:
            logger.error("delete container %s failed" % lxcname)

        logger.info("release ip address %s" % ip)
        self.release_ip(ip)
        self.release_gpu_device(lxcname)

        #umount oss
        self.umount_oss("%s/global/users/%s/oss" % (self.fspath, username),
                        mount_info)
Exemple #39
0
 def release_ip(self, ipstr):
     self.lock.acquire()
     ipnum = ip_to_int(ipstr.split('/')[0]) - self.ipbase
     self.free_ips.append(ipnum)
     logger.info(str(self.free_ips))
     self.lock.release()
Exemple #40
0
    def __init__(self):
        rpc_pb2_grpc.WorkerServicer.__init__(self)
        etcdaddr = env.getenv("ETCD")
        logger.info("using ETCD %s" % etcdaddr)

        clustername = env.getenv("CLUSTER_NAME")
        logger.info("using CLUSTER_NAME %s" % clustername)

        # init etcdlib client
        try:
            self.etcdclient = etcdlib.Client(etcdaddr, prefix=clustername)
        except Exception:
            logger.error(
                "connect etcd failed, maybe etcd address not correct...")
            sys.exit(1)
        else:
            logger.info("etcd connected")

        # get master ip and report port
        [success, masterip] = self.etcdclient.getkey("service/master")
        if not success:
            logger.error("Fail to get master ip address.")
            sys.exit(1)
        else:
            self.master_ip = masterip
            logger.info("Get master ip address: %s" % (self.master_ip))
        self.master_port = env.getenv('BATCH_MASTER_PORT')

        self.imgmgr = imagemgr.ImageMgr()
        self.fspath = env.getenv('FS_PREFIX')
        self.confpath = env.getenv('DOCKLET_CONF')

        self.taskmsgs = []
        self.msgslock = threading.Lock()
        self.report_interval = 2

        self.lock = threading.Lock()
        self.mount_lock = threading.Lock()
        self.cons_gateway = env.getenv('BATCH_GATEWAY')
        self.cons_ips = env.getenv('BATCH_NET')
        logger.info("Batch gateway ip address %s" % self.cons_gateway)
        logger.info("Batch ip pools %s" % self.cons_ips)

        self.cidr = 32 - int(self.cons_ips.split('/')[1])
        self.ipbase = ip_to_int(self.cons_ips.split('/')[0])
        self.free_ips = []
        for i in range(2, (1 << self.cidr) - 1):
            self.free_ips.append(i)
        logger.info("Free ip addresses pool %s" % str(self.free_ips))

        self.gpu_lock = threading.Lock()
        self.gpu_status = {}
        gpus = gputools.get_gpu_status()
        for gpu in gpus:
            self.gpu_status[gpu['id']] = ""

        self.start_report()
        logger.info('TaskController init success')
Exemple #41
0
def _test_vm_removal():
    logger.info("Testing for VM removal permission")
    vm_name = vms.get_first_vm()
    logger.debug("VM " + vm_name + " selected")
    vms.remove(vm_name, cancel=True)
Exemple #42
0
    def process_task(self, request, context):
        logger.info('excute task with parameter: ' + str(request))
        taskid = request.id
        instanceid = request.instanceid

        # get config from request
        command = request.parameters.command.commandLine  #'/root/getenv.sh'  #parameter['Parameters']['Command']['CommandLine']
        #envs = {'MYENV1':'MYVAL1', 'MYENV2':'MYVAL2'} #parameters['Parameters']['Command']['EnvVars']
        pkgpath = request.parameters.command.packagePath
        envs = request.parameters.command.envVars
        envs['taskid'] = str(taskid)
        envs['instanceid'] = str(instanceid)
        image = {}
        image['name'] = request.cluster.image.name
        if request.cluster.image.type == rpc_pb2.Image.PRIVATE:
            image['type'] = 'private'
        elif request.cluster.image.type == rpc_pb2.Image.PUBLIC:
            image['type'] = 'public'
        else:
            image['type'] = 'base'
        image['owner'] = request.cluster.image.owner
        username = request.username
        token = request.token
        lxcname = '%s-batch-%s-%s-%s' % (username, taskid, str(instanceid),
                                         token)
        instance_type = request.cluster.instance
        mount_list = request.cluster.mount
        outpath = [
            request.parameters.stdoutRedirectPath,
            request.parameters.stderrRedirectPath
        ]
        timeout = request.timeout
        gpu_need = int(request.cluster.instance.gpu)
        reused = request.reused

        #create container
        [success, ip] = self.create_container(instanceid, username, image,
                                              lxcname, instance_type)
        if not success:
            return rpc_pb2.Reply(status=rpc_pb2.Reply.REFUSED, message=ip)

        #mount oss
        self.mount_oss("%s/global/users/%s/oss" % (self.fspath, username),
                       mount_list)
        conffile = open("/var/lib/lxc/%s/config" % lxcname, 'a+')
        mount_str = "lxc.mount.entry = %s/global/users/%s/oss/%s %s/root/oss/%s none bind,rw,create=dir 0 0"
        for mount in mount_list:
            conffile.write("\n" + mount_str %
                           (self.fspath, username, mount.remotePath, rootfs,
                            mount.remotePath))
        conffile.close()

        logger.info("Start container %s..." % lxcname)
        #container = lxc.Container(lxcname)
        ret = subprocess.run('lxc-start -n %s' % lxcname,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT,
                             shell=True)
        if ret.returncode != 0:
            logger.error('start container %s failed' % lxcname)
            self.release_ip(ip)
            self.imgmgr.deleteFS(lxcname)
            return rpc_pb2.Reply(status=rpc_pb2.Reply.REFUSED,
                                 message="Can't start the container")

        logger.info('start container %s success' % lxcname)

        #add GPU
        [success, msg] = self.add_gpu_device(lxcname, gpu_need)
        if not success:
            logger.error("Fail to add gpu device. " + msg)
            container.stop()
            self.release_ip(ip)
            self.imgmgr.deleteFS(lxcname)
            return rpc_pb2.Reply(status=rpc_pb2.Reply.REFUSED,
                                 message="Fail to add gpu device. " + msg)

        thread = threading.Thread(target=self.execute_task,
                                  args=(username, taskid, instanceid, envs,
                                        lxcname, pkgpath, command, timeout,
                                        outpath, ip, token, mount_list))
        thread.setDaemon(True)
        thread.start()

        return rpc_pb2.Reply(status=rpc_pb2.Reply.ACCEPTED, message="")
Exemple #43
0
 def train_one_epoch(self, epoch):
     epoch_start_time = time.time()
     for train_data in self.train_loader:
         iter_start_time = time.time()
         self.model.train(train_data)
         self.global_step += 1
         if self.global_step % self.print_freq == 0:
             errors = self.model.get_current_errors()
             t_comp = time.time() - iter_start_time
             message = 'experiment:%s, (epoch: %d, steps: %d, time: %.3f) ' % (
                 self.experiment_name, epoch, self.global_step, t_comp)
             for key, value in errors.items():
                 message += '%s: %.5f ' % (key, value)
                 self.writer.add_scalar(key, errors[key], self.global_step)
             logger.info(message)
         # if self.global_step % self.evaluate_freq == 0:
         #     evaluate_errors = self.model.get_evaluate_errors()
         #     t_comp = time.time() - iter_start_time
         #     message = 'experiment:%s, (epoch: %d, steps: %d, time: %.3f) ' % (self.experiment_name, epoch,
         #                                                                       self.global_step, t_comp)
         #     for key, value in evaluate_errors.items():
         #         message += '%s: %.5f ' % (key, value)
         #         self.writer.add_scalar(key, evaluate_errors[key], self.global_step)
         #     logger.info(message)
         if self.global_step % self.display_freq == 0:
             visual_input = self.model.get_current_visuals()
             grid = torchvision.utils.make_grid(list(visual_input), nrow=3)
             img_name = self.model.img_name
             self.writer.add_image(
                 'experiment_{}_train_epoch_{}_step_{}_img_name_{}'.format(
                     self.experiment_name, epoch, self.global_step,
                     img_name), grid, self.global_step)
         if self.global_step % self.evaluate_freq == 0:
             self.model.set_mode(mode="eval")
             fake_b = self.model.inference(train_data["A"])
             b, c, h, w = fake_b.size()
             input_image = (train_data["A"].data.cpu()[0, :, :, :] +
                            1) / 2.0
             fake_b = ((fake_b.data.cpu()[0, :, :, :] + 1) / 2.0).expand(
                 3, h, w)
             real_b = ((train_data["B"].data.cpu()[0, :, :, :] + 1) /
                       2.0).expand(3, h, w)
             visuals = [input_image, fake_b, real_b]
             grid = torchvision.utils.make_grid(visuals, nrow=3)
             img_name = self.model.img_name
             self.writer.add_image(
                 'experiment_{}_eval_epoch_{}_step_{}_img_name_{}'.format(
                     self.experiment_name, epoch, self.global_step,
                     img_name), grid, self.global_step + 1)
             self.model.set_mode()
         if self.save_epoch_freq == 0 and self.save_step_freq > 0 and self.global_step % self.save_step_freq == 0:
             logger.info('saving the model epoch:{}, step:{}'.format(
                 epoch, self.global_step))
             self.model.save_networks(epoch)
     if self.save_epoch_freq > 0 and epoch % self.save_epoch_freq == 0:
         logger.info(
             'saving the model at the end of epoch:{}'.format(epoch))
         self.model.save_networks(epoch)
     logger.info('End of epoch {} / {} \t Time Taken: {} sec'.format(
         epoch, self.niter + self.niter_decay,
         time.time() - epoch_start_time))
     self.model.update_learning_rate()
Exemple #44
0
def reset_request_queue():
    logging.info('reset api_taxzz_crawled_record [state=-1]')
    mongo_db['api_taxzz_crawled_record'].update_many({"state": 1},
                                                     {"$set": {
                                                         "state": -1
                                                     }})
Exemple #45
0
    def connect_direct_lun_to_appliance(self, vm_name, disconnect):
        """Connects or disconnects the direct lun disk to an appliance.

        Args:
            vm_name: Name of the VM with the appliance.
            disconnect: If False, it will connect, otherwise it will disconnect
        """
        if "provider_key" in self.kwargs:
            provider_name = self.kwargs["provider_key"]
        else:
            raise TypeError("provider_key not supplied to the provider.")
        # check that the vm exists on the rhev provider, get the ip address if so
        try:
            vm = self.api.vms.get(vm_name)
            ip_addr = self.get_ip_address(vm_name)
        except:
            raise NameError("{} not found on  {}".format(
                vm_name, provider_name))

        # check for direct lun definition on provider's cfme_data.yaml
        if 'direct_lun' not in self.kwargs:
            raise ValueError(
                "direct_lun key not in cfme_data.yaml under provider {}, exiting..."
                .format(provider_name))

        # does the direct lun exist
        prov_data = self.kwargs
        dlun_name = prov_data['direct_lun']['name']
        dlun = self.api.disks.get(dlun_name)
        if dlun is None:

            #    Create the iSCSI storage connection:
            sc = params.StorageConnection()
            sc.set_address(prov_data['direct_lun']['ip_address'])
            sc.set_type("iscsi")
            sc.set_port(int(prov_data['direct_lun']['port']))
            sc.set_target(prov_data['direct_lun']['iscsi_target'])

            #    Add the direct LUN disk:
            lu = params.LogicalUnit()
            lu.set_id(prov_data['direct_lun']['iscsi_target'])
            lu.set_address(sc.get_address())
            lu.set_port(sc.get_port())
            lu.set_target(sc.get_target())
            storage = params.Storage()
            storage.set_type("iscsi")
            storage.set_logical_unit([lu])
            disk = params.Disk()
            disk.set_name(dlun_name)
            disk.set_interface("virtio")
            disk.set_type("iscsi")
            disk.set_format("raw")
            disk.set_lun_storage(storage)
            disk.set_shareable(True)
            disk = self.api.disks.add(disk)
            dlun = self.api.disks.get(dlun_name)

        # add it
        if not disconnect:
            retries = 0
            while retries < 3:
                retries += 1
                direct_lun = params.Disk(id=dlun.id)
                try:
                    # is the disk present and active?
                    vm_disk_list = vm.get_disks().list()
                    for vm_disk in vm_disk_list:
                        if vm_disk.name == dlun_name:
                            if vm_disk.active:
                                return
                            else:
                                vm_disk.activate()
                                return

                    # if not present, add it and activate
                    direct_lun = params.Disk(id=dlun.id)
                    added_lun = vm.disks.add(direct_lun)
                    added_lun.activate()
                except Exception as e:
                    logger.error("Exception caught: %s", str(e))
                    if retries == 3:
                        logger.error("exhausted retries and giving up")
                        raise
                    else:
                        logger.info(
                            "sleeping for 30s and retrying to connect direct lun"
                        )
                        time.sleep(30)

            # Init SSH client, run pvscan on the appliance
            ssh_kwargs = {
                'username': conf.credentials['ssh']['username'],
                'password': conf.credentials['ssh']['password'],
                'hostname': ip_addr
            }
            client = SSHClient(**ssh_kwargs)
            status, out = client.run_command('pvscan', timeout=5 * 60)

        # remove it
        else:
            vm_dlun = vm.disks.get(name=dlun_name)
            if vm_dlun is None:
                return
            else:
                detach = params.Action(detach=True)
                vm_dlun.delete(action=detach)
Exemple #46
0
def _test_vm_provision():
    logger.info("Checking for provision access")
    navigate_to(vms.Vm, 'VMsOnly')
    vms.lcl_btn("Provision VMs")
Exemple #47
0
def main():
    try:
        # arg parse
        t1 = time.time()
        parser = argparse.ArgumentParser(
            prog=__title__,
            description=__introduction__.format(detail="Main Program"),
            epilog=__epilog__,
            formatter_class=argparse.RawDescriptionHelpFormatter,
            usage=argparse.SUPPRESS)

        subparsers = parser.add_subparsers()

        # init
        parser_group_init = subparsers.add_parser(
            'init', help='Kunlun-M init before use.')
        parser_group_init.add_argument('init',
                                       choices=['initialize', 'checksql'],
                                       default='init',
                                       help='check and migrate SQL')
        parser_group_init.add_argument(
            'appname',
            choices=['index', 'dashboard', 'backend', 'api'],
            nargs='?',
            default='index',
            help='Check App name')
        parser_group_init.add_argument('migrationname',
                                       default='migrationname',
                                       nargs='?',
                                       help='Check migration name')

        # load config into database
        parser_group_core = subparsers.add_parser(
            'config',
            help='config for rule&tamper',
            description=__introduction__.format(
                detail='config for rule&tamper'),
            epilog=__database_epilog__,
            formatter_class=argparse.RawDescriptionHelpFormatter,
            usage=argparse.SUPPRESS,
            add_help=True)
        parser_group_core.add_argument(
            'load',
            choices=['load', 'recover', 'loadtamper', 'retamper'],
            default=False,
            help='operate for rule&tamper')

        parser_group_scan = subparsers.add_parser(
            'scan',
            help='scan target path',
            description=__introduction__.format(detail='scan target path'),
            epilog=__scan_epilog__,
            formatter_class=argparse.RawDescriptionHelpFormatter,
            add_help=True)
        parser_group_scan.add_argument('-t',
                                       '--target',
                                       dest='target',
                                       action='store',
                                       default='',
                                       metavar='<target>',
                                       help='file, folder')
        parser_group_scan.add_argument(
            '-f',
            '--format',
            dest='format',
            action='store',
            default='csv',
            metavar='<format>',
            choices=['html', 'json', 'csv', 'xml'],
            help='vulnerability output format (formats: %(choices)s)')
        parser_group_scan.add_argument(
            '-o',
            '--output',
            dest='output',
            action='store',
            default='',
            metavar='<output>',
            help='vulnerability output STREAM, FILE')
        parser_group_scan.add_argument('-r',
                                       '--rule',
                                       dest='special_rules',
                                       action='store',
                                       default=None,
                                       metavar='<rule_id>',
                                       help='specifies rules e.g: 1000, 1001')
        parser_group_scan.add_argument(
            '-tp',
            '--tamper',
            dest='tamper_name',
            action='store',
            default=None,
            metavar='<tamper_name>',
            help='tamper repair function e.g: wordpress')
        parser_group_scan.add_argument('-l',
                                       '--log',
                                       dest='log',
                                       action='store',
                                       default=None,
                                       metavar='<log>',
                                       help='log name')
        parser_group_scan.add_argument('-lan',
                                       '--language',
                                       dest='language',
                                       action='store',
                                       default=None,
                                       help='set target language')
        parser_group_scan.add_argument('-b',
                                       '--blackpath',
                                       dest='black_path',
                                       action='store',
                                       default=None,
                                       help='black path list')

        # for api
        parser_group_scan.add_argument('-a',
                                       '--api',
                                       dest='api',
                                       action='store_true',
                                       default=False,
                                       help='without any output for shell')
        parser_group_scan.add_argument('-y',
                                       '--yes',
                                       dest='yes',
                                       action='store_true',
                                       default=False,
                                       help='without any output for shell')
        parser_group_scan.add_argument('--origin',
                                       dest='origin',
                                       action='store',
                                       default=None,
                                       metavar='<origin>',
                                       help='project origin')
        parser_group_scan.add_argument('-des',
                                       '--description',
                                       dest='description',
                                       action='store',
                                       default=None,
                                       metavar='<description>',
                                       help='project description')

        # for log
        parser_group_scan.add_argument('-d',
                                       '--debug',
                                       dest='debug',
                                       action='store_true',
                                       default=False,
                                       help='open debug mode')

        # for scan profile
        parser_group_scan.add_argument('-uc',
                                       '--unconfirm',
                                       dest='unconfirm',
                                       action='store_true',
                                       default=False,
                                       help='show unconfirmed vuls')
        parser_group_scan.add_argument('-upc',
                                       '--unprecom',
                                       dest='unprecom',
                                       action='store_true',
                                       default=False,
                                       help='without Precompiled')

        # for vendor vuln scan
        parser_group_scan.add_argument(
            '--without-vendor',
            dest='without_vendor',
            action='store_true',
            default=False,
            help='without scan vendor vuln (default open)')

        # show for rule & tamper
        parser_group_show = subparsers.add_parser(
            'show',
            help='show rule&tamper',
            description=__introduction__.format(detail='show rule&tamper'),
            formatter_class=argparse.RawDescriptionHelpFormatter,
            usage=argparse.SUPPRESS,
            add_help=True)

        parser_group_show.add_argument('list',
                                       choices=['rule', "tamper"],
                                       action='store',
                                       default=None,
                                       help='show all rules & tanmpers')

        parser_group_show.add_argument(
            '-k',
            '--key',
            dest='listkey',
            action='store',
            default="all",
            help='key for show rule & tamper. eg: 1001/wordpress')

        # for search vendor
        parser_group_search = subparsers.add_parser(
            'search',
            help='search project by vendor/path/...',
            description=__introduction__.format(
                detail='search project by vendor/path/...'),
            formatter_class=argparse.RawDescriptionHelpFormatter,
            usage=argparse.SUPPRESS,
            add_help=True)

        parser_group_search.add_argument('stype',
                                         choices=['vendor'],
                                         default='vendor',
                                         help='search type')

        parser_group_search.add_argument('keyword_name',
                                         default='flask',
                                         nargs='?',
                                         help='keyword name for search')

        parser_group_search.add_argument('keyword_value',
                                         default='1.0.0',
                                         nargs='?',
                                         help='keyword value for search')

        parser_group_search.add_argument('--with-vuls',
                                         dest='with_vuls',
                                         action='store_true',
                                         default=False,
                                         help='with vuls scan (default False)')

        # console
        parser_group_console = subparsers.add_parser(
            'console',
            help='enter console mode',
            description=__introduction__.format(detail='enter console mode'),
            formatter_class=argparse.RawDescriptionHelpFormatter,
            usage=argparse.SUPPRESS,
            add_help=True)
        parser_group_console.add_argument('console',
                                          action='store_true',
                                          default=True,
                                          help='enter console mode')

        # 加载插件参数列表以及帮助

        parser_group_plugin = subparsers.add_parser(
            'plugin',
            help=plugins.PLUGIN_DESCS,
            description=__introduction__.format(detail=plugins.PLUGIN_DESCS),
            formatter_class=argparse.RawDescriptionHelpFormatter,
            usage=argparse.SUPPRESS,
            add_help=True)
        parser_group_plugin.add_argument('plugin_name',
                                         choices=plugins.PLUGIN_LIST,
                                         default=False,
                                         help='enter plugin name')

        # web

        parser_group_web = subparsers.add_parser(
            'web',
            help='KunLun-m Web mode',
            description=__introduction__.format(detail='KunLun-m Web mode'),
            formatter_class=argparse.RawDescriptionHelpFormatter,
            usage=argparse.SUPPRESS,
            add_help=True)

        parser_group_web.add_argument('-p',
                                      '--port',
                                      dest='port',
                                      action='store',
                                      default='9999',
                                      help='port for web')

        # args = parser.parse_args()
        args = parser.parse_known_args()[0]

        # log
        log(logging.INFO)

        # 插件需要提前声明
        if hasattr(args, "plugin_name") and args.plugin_name:
            logger.info('[INIT] Load Plugin {}.'.format(args.plugin_name))
            plugins.PLUGIN_DICT[args.plugin_name](parser, parser_group_plugin)
            exit()

        # 其余需要验证
        args = parser.parse_args()

        if hasattr(args, "debug") and args.debug:
            logger.setLevel(logging.DEBUG)

        if hasattr(args, "init"):
            if args.init == 'checksql':
                logger.info('Show migrate sql.')
                call_command('sqlmigrate', args.appname, args.migrationname)
            else:
                logger.info('Init Database for KunLun-M.')
                call_command('makemigrations')
                call_command('migrate')
                logger.info('Init Database Finished.')
            exit()

        if hasattr(args, "port"):
            logger.info('Start KunLun-M Web in Port: {}'.format(args.port))
            call_command('runserver', args.port)

        if hasattr(args, "load"):
            if args.load == "load":
                logger.info("[INIT] RuleCheck start.")
                RuleCheck().load()

                logger.info("[INIT] RuleCheck finished.")
                exit()

            elif args.load == "recover":
                logger.info("[INIT] RuleRecover start.")
                RuleCheck().recover()

                logger.info("[INIT] RuleRecover finished.")
                exit()

            elif args.load == "loadtamper":
                logger.info("[INIT] TamperCheck start.")
                TamperCheck().load()

                logger.info("[INIT] TamperCheck finished.")
                exit()

            elif args.load == "retamper":
                logger.info("[INIT] TamperRecover start.")
                TamperCheck().recover()

                logger.info("[INIT] TamperRecover finished.")
                exit()

            else:
                parser_group_core.print_help()
                exit()

        if hasattr(args, "list"):
            if args.list:
                logger.info("Show {}:\n{}".format(
                    args.list, show_info(args.list, args.listkey.strip(""))))
                exit()
            else:
                parser_group_show.print_help()
                exit()

        if hasattr(args, "stype"):
            # search and show vuls
            if args.stype:
                logger.info("[SEARCH] Search Project by {} in {} {}".format(
                    args.stype, args.keyword_name, args.keyword_value))
                cli.search_project(args.stype, args.keyword_name,
                                   args.keyword_value, args.with_vuls)
                exit()
            else:
                parser_group_show.print_help()
                exit()

        if hasattr(args, "console"):
            # check rule and tamper
            logger.info("[INIT] RuleCheck start.")
            RuleCheck().load()

            logger.info("[INIT] RuleCheck finished.")

            logger.info("[INIT] TamperCheck start.")
            TamperCheck().load()

            logger.info("[INIT] TamperCheck finished.")

            logger.info("[INIT] Enter KunLun-M console mode.")
            shell = KunlunInterpreter()
            shell.start()
            exit()

        if not hasattr(args, "target") or args.target == '':
            parser.print_help()
            exit()

        # for api close log
        if hasattr(args, "api") and args.api:
            log_rm()

        logger.debug('[INIT] start Scan Task...')
        logger.debug('[INIT] set logging level: {}'.format(logger.level))

        # check for project data
        if hasattr(args, "origin") and args.origin:
            origin = args.origin
        else:
            origin = "File in {}".format(args.target)

        # new scan task
        task_name = get_mainstr_from_filename(args.target)
        s = cli.check_scantask(task_name=task_name,
                               target_path=args.target,
                               parameter_config=sys.argv,
                               project_origin=origin,
                               project_des=args.description,
                               auto_yes=args.yes)

        if s.is_finished:
            logger.info("[INIT] Finished Task.")
            exit()

        # 标识任务id
        sid = str(s.id)
        task_id = get_scan_id()

        #  for api
        if hasattr(args, "api") and args.api:
            print("TaskID: {}".format(task_id))
        else:
            logger.info("TaskID: {}".format(task_id))

        if hasattr(args, "log") and args.log:
            logger.info("[INIT] New Log file {}.log .".format(args.log))
            log_name = args.log
        else:
            logger.info("[INIT] New Log file ScanTask_{}.log .".format(sid))
            log_name = "ScanTask_{}".format(sid)

        log_add(logging.DEBUG, log_name)

        if hasattr(args, "without_vendor"):
            # 共享变量
            import Kunlun_M.settings as settings
            settings.WITH_VENDOR = False if args.without_vendor else settings.WITH_VENDOR
            logger.info("[INIT] Vendor Vuls Scan Status: {}".format(
                settings.WITH_VENDOR))

        data = {'status': 'running', 'report': ''}
        Running(sid).status(data)

        cli.start(args.target, args.format, args.output, args.special_rules,
                  sid, args.language, args.tamper_name, args.black_path,
                  args.unconfirm, args.unprecom)

        s.is_finished = True
        s.save()
        t2 = time.time()

        # 如果开启了上传日志到远程,则上传
        if IS_OPEN_REMOTE_SERVER:
            log_path = os.path.join(LOGS_PATH, "{}.log".format(log_name))

            upload_log(log_path)

        logger.info('[INIT] Done! Consume Time:{ct}s'.format(ct=t2 - t1))

    except KeyboardInterrupt:
        logger.warning("[KunLun-M] Stop KunLun-M.")
        sys.exit(0)

    except Exception as e:
        exc_msg = traceback.format_exc()
        logger.warning(exc_msg)
Exemple #48
0
def provider_by_type(metafunc, provider_types, *fields, **options):
    """Get the values of the named field keys from ``cfme_data.get('management_systems', {})``

    Args:
        provider_types: A list of provider types to include. If None, all providers are considered
        *fields: Names of keys in an individual provider dict whose values will be returned when
            used as test function arguments
        **options: Explained below

    The ``**options`` available are defined below:

    * ``required_fields``: when fields passed are not present, skip them
    * ``choose_random``: choose a single provider from the list
    * ``template_location``: Specification where a required tempalte lies in the yaml, If not
      found in the provider, warning is printed and the test not collected. The spec
      is a tuple or list where each item is a key to the next field (str or int).

    The following test function arguments are special:

        ``provider``
            the provider's CRUD object, either a :py:class:`cfme.cloud.provider.Provider`
            or a :py:class:`cfme.infrastructure.provider.Provider`

    Returns:
        An tuple of ``(argnames, argvalues, idlist)`` for use in a pytest_generate_tests hook, or
        with the :py:func:`parametrize` helper.

    Usage:

        # In the function itself
        def pytest_generate_tests(metafunc):
            argnames, argvalues, idlist = testgen.provider_by_type(
                ['openstack', 'ec2'],
                'type', 'name', 'credentials', 'provider', 'hosts'
            )
        metafunc.parametrize(argnames, argvalues, ids=idlist, scope='module')

        # Using the parametrize wrapper
        pytest_generate_tests = testgen.parametrize(testgen.provider_by_type, ['openstack', 'ec2'],
            'type', 'name', 'credentials', 'provider', 'hosts', scope='module')

    Note:

        Using the default 'function' scope, each test will be run individually for each provider
        before moving on to the next test. To group all tests related to single provider together,
        parametrize tests in the 'module' scope.

    Note:

        testgen for providers now requires the usage of test_flags for collection to work.
        Please visit http://cfme-tests.readthedocs.org/guides/documenting.html#documenting-tests
        for more details.

    """

    metafunc.function = pytest.mark.uses_testgen()(metafunc.function)

    argnames = list(fields)
    argvalues = []
    idlist = []
    template_location = options.pop("template_location", None)

    if 'provider' in metafunc.fixturenames and 'provider' not in argnames:
        argnames.append('provider')

    for provider, data in cfme_data.get('management_systems', {}).iteritems():
        try:
            prov_obj = get_crud(provider)
        except UnknownProviderType:
            continue

        skip = False
        if provider_types is not None and prov_obj.type not in provider_types:
            # Skip unwanted types
            continue

        # Test to see the test has meta data, if it does and that metadata contains
        # a test_flag kwarg, then check to make sure the provider contains that test_flag
        # if not, do not collect the provider for this particular test.

        # Obtain the tests flags
        meta = getattr(metafunc.function, 'meta', None)

        test_flags = getattr(meta, 'kwargs', {}) \
            .get('from_docs', {}).get('test_flag', '').split(',')
        if test_flags != ['']:
            test_flags = [flag.strip() for flag in test_flags]

            defined_flags = cfme_data.get('test_flags', '').split(',')
            defined_flags = [flag.strip() for flag in defined_flags]

            excluded_flags = data.get('excluded_test_flags', '').split(',')
            excluded_flags = [flag.strip() for flag in excluded_flags]

            allowed_flags = set(defined_flags) - set(excluded_flags)

            if set(test_flags) - allowed_flags:
                logger.info("Skipping Provider {} for test {} in module {} because "
                    "it does not have the right flags, "
                    "{} does not contain {}".format(provider,
                                                    metafunc.function.func_name,
                                                    metafunc.function.__module__,
                                                    list(allowed_flags),
                                                    list(set(test_flags) - allowed_flags)))
                continue

        try:
            if prov_obj.type == "scvmm" and version.current_version() < "5.3":
                # Ignore SCVMM on 5.2
                continue
        except Exception:  # No SSH connection
            continue

        # Check provider hasn't been filtered out with --use-provider
        if prov_obj.key not in filtered:
            continue

        # Get values for the requested fields, filling in with None for undefined fields
        data_values = {field: data.get(field, None) for field in fields}

        # Go through the values and handle the special 'data' name
        # report the undefined fields to the log
        for key in data_values.keys():
            if data_values[key] is None:
                if 'require_fields' not in options:
                    options['require_fields'] = True
                if options['require_fields']:
                    skip = True
                    logger.warning('Field "%s" not defined for provider "%s", skipping' %
                        (key, provider)
                    )
                else:
                    logger.debug('Field "%s" not defined for provider "%s", defaulting to None' %
                        (key, provider)
                    )
        if skip:
            continue

        # Check the template presence if requested
        if template_location is not None:
            o = data
            try:
                for field in template_location:
                    o = o[field]
            except (IndexError, KeyError):
                logger.info("Cannot apply {} to {} in the template specification, ignoring.".format(
                    repr(field), repr(o)))
            else:
                if not isinstance(o, basestring):
                    raise ValueError("{} is not a string! (for template)".format(repr(o)))
                templates = TEMPLATES.get(provider, None)
                if templates is not None:
                    if o not in templates:
                        logger.info(
                            "Wanted template {} on {} but it is not there!\n".format(o, provider))
                        # Skip collection of this one
                        continue

        values = []
        for arg in argnames:
            if arg == 'provider':
                metafunc.function = pytest.mark.provider_related()(metafunc.function)
                values.append(prov_obj)
            elif arg in data_values:
                values.append(data_values[arg])

        # skip when required field is not present and option['require_field'] == True
        argvalues.append(values)

        # Use the provider name for idlist, helps with readable parametrized test output
        idlist.append(provider)

    # pick a single provider if option['choose_random'] == True
    if 'choose_random' not in options:
        options['choose_random'] = False
    if idlist and options['choose_random']:
        single_index = idlist.index(random.choice(idlist))
        new_idlist = ['random_provider']
        new_argvalues = [argvalues[single_index]]
        logger.debug('Choosing random provider, "%s" selected, ' % (provider))
        return argnames, new_argvalues, new_idlist

    return argnames, argvalues, idlist
Exemple #49
0
 def release_sysips(self, ip_or_ips):
     logger.info ("acquire system ips: %s" % str(ip_or_ips))
     result = self.system.release(ip_or_ips)
     self.dump_system()
     return result
def train(
    cfg,
    data_cfg,
    resume=False,
    epochs=100,
    batch_size=16,
    accumulated_batches=1,
    freeze_backbone=False,
    opt=None,
):
    weights = '../weights'  # 改到上一层, 这样方便文件夹复制
    mkdir_if_missing(weights)
    latest = osp.join(weights,
                      'latest.pt')  # 这个是为了resume上次存好的checkpoint,注意不要覆盖!

    torch.backends.cudnn.benchmark = True  # unsuitable for multiscale

    # Configure run
    print("loading data")
    sys.stdout.flush()
    f = open(data_cfg)
    data_config = json.load(f)
    trainset_paths = data_config['train']
    dataset_root = data_config['root']
    f.close()
    cfg_dict = parse_model_cfg(cfg)
    img_size = [int(cfg_dict[0]['width']), int(cfg_dict[0]['height'])]

    # Get dataloader
    transforms = T.Compose([T.ToTensor()])
    dataset = JointDataset(dataset_root,
                           trainset_paths,
                           img_size,
                           augment=True,
                           transforms=transforms)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             shuffle=True,
                                             num_workers=8,
                                             pin_memory=False,
                                             drop_last=True,
                                             collate_fn=collate_fn)

    # Initialize model
    print("building model")
    sys.stdout.flush()
    model = Darknet(cfg_dict, dataset.nID)

    cutoff = -1  # backbone reaches to cutoff layer
    start_epoch = 0
    if resume:
        if opt.latest:
            latest_resume = "/home/master/kuanzi/weights/72_epoch_arcface.pt"
            print("Loading the latest weight...", latest_resume)
            checkpoint = torch.load(latest_resume, map_location='cpu')

            # Load weights to resume from
            model.load_state_dict(checkpoint['model'])
            model.cuda().train()

            # Set optimizer
            classifer_param_value = list(map(id,
                                             model.classifier.parameters()))
            classifer_param = model.classifier.parameters()
            base_params = filter(lambda p: id(p) not in classifer_param_value,
                                 model.parameters())
            print("classifer_param\n", classifer_param)  #  [2218660649072]
            print("classifer_param_value\n",
                  classifer_param_value)  #  [2218660649072]
            print("base_params\n",
                  base_params)  # <filter object at 0x0000020493D95048>
            sys.stdout.flush()
            # optimizer = torch.optim.SGD(filter(lambda x: x.requires_grad, model.parameters()), lr=opt.lr * 0.1, momentum=.9)
            optimizer = torch.optim.SGD(
                [{
                    'params': filter(lambda x: x.requires_grad, base_params),
                    'lr': opt.lr * 0.01
                }, {
                    'params': classifer_param,
                    'lr': opt.lr
                }],
                momentum=.9)
            # optimizer = torch.optim.SGD(filter(lambda x: x.requires_grad, model.parameters()), lr=opt.lr, momentum=.9)

            start_epoch = checkpoint['epoch'] + 1
            if checkpoint['optimizer'] is not None:
                # Anyway, if you’re “freezing” any part of your network, and your optimizer is only passed “unfrozen” model parameters
                # (i.e. your optimizer filters out model parameters whose requires_grad is False),
                # then when resuming, you’ll need to unfreeze the network again and re-instantiate the optimizer afterwards.
                optimizer.load_state_dict(checkpoint['optimizer'])

            del checkpoint  # current, saved

        else:
            # pretrain = "/home/master/kuanzi/weights/jde_1088x608_uncertainty.pt"
            pretrain = "/home/master/kuanzi/weights/jde_864x480_uncertainty.pt"  #576x320
            print("Loading jde finetune weight...", pretrain)
            sys.stdout.flush()
            checkpoint = torch.load(pretrain, map_location='cpu')

            model_dict = model.state_dict()
            pretrained_dict = {
                k: v
                for k, v in checkpoint['model'].items()
                if not k.startswith("classifier")
            }  # 去掉全连接层
            model_dict.update(pretrained_dict)
            model.load_state_dict(model_dict)
            model.cuda().train()
            print("model weight loaded")
            sys.stdout.flush()

            classifer_param_value = list(map(id,
                                             model.classifier.parameters()))
            classifer_param = model.classifier.parameters()
            base_params = filter(lambda p: id(p) not in classifer_param_value,
                                 model.parameters())
            print("classifer_param\n", classifer_param)  #  [2218660649072]
            print("classifer_param_value\n",
                  classifer_param_value)  #  [2218660649072]
            print("base_params\n",
                  base_params)  # <filter object at 0x0000020493D95048>
            sys.stdout.flush()
            # optimizer = torch.optim.SGD(filter(lambda x: x.requires_grad, model.parameters()), lr=opt.lr * 0.1, momentum=.9)
            optimizer = torch.optim.SGD(
                [{
                    'params': filter(lambda x: x.requires_grad, base_params),
                    'lr': opt.lr * 0.01
                }, {
                    'params': classifer_param,
                    'lr': opt.lr
                }],
                momentum=.9)

            print("chk epoch:\n", checkpoint['epoch'])
            sys.stdout.flush()
            start_epoch = checkpoint['epoch'] + 1

    else:
        # Initialize model with backbone (optional)
        print("Loading backbone...")
        sys.stdout.flush()
        if cfg.endswith('yolov3.cfg'):
            load_darknet_weights(model, osp.join(weights, 'darknet53.conv.74'))
            cutoff = 75
        elif cfg.endswith('yolov3-tiny.cfg'):
            load_darknet_weights(model, osp.join(weights,
                                                 'yolov3-tiny.conv.15'))
            cutoff = 15

        model.cuda().train()

        # Set optimizer
        optimizer = torch.optim.SGD(filter(lambda x: x.requires_grad,
                                           model.parameters()),
                                    lr=opt.lr,
                                    momentum=.9,
                                    weight_decay=1e-4)

    model = torch.nn.DataParallel(model)
    # Set scheduler
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer,
        milestones=[int(0.5 * opt.epochs),
                    int(0.75 * opt.epochs)],
        gamma=0.1)

    # An important trick for detection: freeze bn during fine-tuning
    if not opt.unfreeze_bn:
        for i, (name, p) in enumerate(model.named_parameters()):
            p.requires_grad = False if 'batch_norm' in name else True

    model_info(model)

    t0 = time.time()
    print("begin training...")
    sys.stdout.flush()
    for epoch in range(epochs):
        epoch += start_epoch

        logger.info(
            ('%8s%12s' + '%10s' * 6) % ('Epoch', 'Batch', 'box', 'conf', 'id',
                                        'total', 'nTargets', 'time'))

        # Freeze darknet53.conv.74 for first epoch
        if freeze_backbone and (epoch < 2):
            for i, (name, p) in enumerate(model.named_parameters()):
                if int(name.split('.')[2]) < cutoff:  # if layer < 75
                    p.requires_grad = False if (epoch == 0) else True

        ui = -1
        rloss = defaultdict(float)  # running loss
        optimizer.zero_grad()
        for i, (imgs, targets, _, _, targets_len) in enumerate(dataloader):
            if sum([len(x) for x in targets]) < 1:  # if no targets continue
                continue

            # SGD burn-in
            burnin = min(1000, len(dataloader))
            if (epoch == 0) & (i <= burnin):
                lr = opt.lr * (i / burnin)**4
                for g in optimizer.param_groups:
                    g['lr'] = lr

            # Compute loss, compute gradient, update parameters
            loss, components = model(imgs.cuda(), targets.cuda(),
                                     targets_len.cuda())
            components = torch.mean(components.view(-1, 5), dim=0)

            loss = torch.mean(loss)
            loss.backward()

            # accumulate gradient for x batches before optimizing
            if ((i + 1) % accumulated_batches
                    == 0) or (i == len(dataloader) - 1):
                optimizer.step()
                optimizer.zero_grad()

            # Running epoch-means of tracked metrics
            ui += 1

            for ii, key in enumerate(model.module.loss_names):
                rloss[key] = (rloss[key] * ui + components[ii]) / (ui + 1)

            s = ('%8s%12s' + '%10.3g' * 6) % (
                '%g/%g' % (epoch, epochs - 1), '%g/%g' %
                (i, len(dataloader) - 1), rloss['box'], rloss['conf'],
                rloss['id'], rloss['loss'], rloss['nT'], time.time() - t0)
            t0 = time.time()
            if i % opt.print_interval == 0:
                logger.info(s)

        # # Save latest checkpoint
        # checkpoint = {'epoch': epoch,
        #               'model': model.module.state_dict(),
        #               'optimizer': optimizer.state_dict()}
        # torch.save(checkpoint, latest)

        # Calculate mAP
        if epoch % opt.test_interval == 0 and epoch != 0:
            epoch_chk = osp.join(weights, str(epoch) + '_epoch_arc_margin.pt')
            checkpoint = {
                'epoch': epoch,
                'model': model.module.state_dict(),
                'optimizer': optimizer.state_dict()
            }
            torch.save(checkpoint, epoch_chk)
            # """ 训练与测试解耦,以下工作单独进行 """
            # with torch.no_grad():
            #     # mAP, R, P = test.test(cfg, data_cfg, weights=latest, batch_size=batch_size, print_interval=40)
            #     # print ("test.test:\t", mAP, "\t", R, "\t", P)
            #     test_mapgiou.test_giou(cfg, data_cfg, weights=latest, batch_size=batch_size, print_interval=40)
            #     test_mapgiou.test_emb(cfg, data_cfg, weights=latest, batch_size=batch_size, print_interval=40)

        # Call scheduler.step() after opimizer.step() with pytorch > 1.1.0
        scheduler.step()
Exemple #51
0
def setup_for_event_testing(ssh_client, db, listener_info, providers):
    domain_name = "EventTesting"
    domain = Domain(name=domain_name, enabled=True)
    if not domain.exists():
        domain.create()

    # FIX THE ENV ERROR IF PRESENT
    if ssh_client.run_command("ruby -v")[0] != 0:
        logger.info("Pathing env to correctly source EVM environment")
        success = ssh_client.run_command("echo 'source /etc/default/evm' >> .bashrc")[0] == 0
        assert success, "Issuing the patch command was unsuccessful"
        # Verify it works
        assert ssh_client.run_command("ruby -v")[0] == 0, "Patch failed"

    # INSTALL REST-CLIENT - REQUIRED FOR THE EVENT DISPATCHER SCRIPT
    if ssh_client.run_rails_command("\"require 'rest-client'\"")[0] != 0:
        # We have to install the gem
        logger.info("Installing rest-client ruby gem that is required by the event dispatcher.")
        success = ssh_client.run_command("gem install rest-client")[0] == 0
        assert success, "Could not install 'rest-client' gem"
        # Verify it works
        assert ssh_client.run_rails_command("\"require 'rest-client'\"")[0] == 0

    # IMPORT AUTOMATE NAMESPACE
    qe_automate_namespace_xml = "qe_event_handler.xml"
    qe_automate_namespace_script = "qe_event_handler.rb"
    local_automate_script = local(__file__)\
        .new(basename="../data/%s" % qe_automate_namespace_script)\
        .strpath
    local_automate_file = local(__file__)\
        .new(basename="../data/%s" % qe_automate_namespace_xml)\
        .strpath
    tmp_automate_file = "/tmp/%s" % qe_automate_namespace_xml

    # Change the information
    with open(local_automate_file, "r") as input_xml, \
            open(tmp_automate_file, "w") as output_xml:
        tree = etree.parse(input_xml)
        root = tree.getroot()

        def set_text(xpath, text):
            field = root.xpath(xpath)
            assert len(field) == 1
            field[0].text = text
        set_text("//MiqAeSchema/MiqAeField[@name='url']",
                 re.sub(r"^http://([^/]+)/?$", "\\1", listener_info.host))
        set_text("//MiqAeSchema/MiqAeField[@name='port']", str(listener_info.port))

        # Put the custom script from an external file
        with open(local_automate_script, "r") as script:
            set_text("//MiqAeMethod[@name='relay_events']",
                     etree.CDATA(script.read()))

        et = etree.ElementTree(root)
        et.write(output_xml)

    # copy xml file to appliance
    # but before that, let's check whether it's there because we may have already applied this file
    if ssh_client.run_command("ls /root/%s" % qe_automate_namespace_xml)[0] != 0:
        ssh_client.put_file(tmp_automate_file, '/root/')

        # We have to convert it first for new version
        convert_cmd = version.pick({
            version.LOWEST: None,

            "5.3.0.0":
            "evm:automate:convert DOMAIN={} FILE=/root/{} ZIP_FILE=/root/{}.zip".format(
                domain_name, qe_automate_namespace_xml, qe_automate_namespace_xml),
        })
        if convert_cmd is not None:
            logger.info("Converting namespace for use on newer appliance...")
            return_code, stdout = ssh_client.run_rake_command(convert_cmd)
            if return_code != 0:
                logger.error("Namespace conversion was unsuccessful")
                logger.error(stdout)
                # We didn't successfully do that so remove the file to know
                # that it's needed to do it again when run again
                ssh_client.run_command("rm -f /root/%s*" % qe_automate_namespace_xml)
                raise AutomateImportError(stdout)

        # run rake cmd on appliance to import automate namespace
        rake_cmd = version.pick({
            version.LOWEST: "evm:automate:import FILE=/root/{}".format(qe_automate_namespace_xml),

            "5.3.0.0":
            "evm:automate:import ZIP_FILE=/root/{}.zip DOMAIN={} OVERWRITE=true "
            "PREVIEW=false".format(qe_automate_namespace_xml, domain_name),
        })
        logger.info("Importing the QE Automation namespace ...")
        return_code, stdout = ssh_client.run_rake_command(rake_cmd)
        if return_code != 0:
            logger.error("Namespace import was unsuccessful")
            logger.error(stdout)
            # We didn't successfully do that so remove the file to know
            # that it's needed to do it again when run again
            ssh_client.run_command("rm -f /root/%s*" % qe_automate_namespace_xml)
            raise AutomateImportError(stdout)

    # CREATE AUTOMATE INSTANCE HOOK
    if db is None or db.session.query(db['miq_ae_instances'].name)\
            .filter(db['miq_ae_instances'].name == "RelayEvents").count() == 0:
        original_class = Class(
            name=version.pick({
                version.LOWEST: "Automation Requests (Request)",
                "5.3": "Request"
            }),
            namespace=Namespace("System", domain=Domain("ManageIQ (Locked)")))
        copied_class = original_class.copy_to(domain)
        instance = Instance(
            name="RelayEvents",
            display_name="RelayEvents",
            description="relationship hook to link to custom QE events relay namespace",
            values={
                "rel2": {
                    "value": "/QE/Automation/APIMethods/relay_events?event=$evm.object['event']"
                }
            },
            cls=copied_class,
        )
        instance.create()

    # IMPORT POLICIES
    policy_yaml = "profile_relay_events.yaml"
    policy_path = local(__file__).new(basename="../data/%s" % policy_yaml)
    if not is_imported("Automate event policies"):
        import_file(policy_path.strpath)

    # ASSIGN POLICY PROFILES
    for provider in providers:
        prov_obj = get_crud(provider)
        if not prov_obj.exists:
            prov_obj.create()
        prov_obj.assign_policy_profiles("Automate event policies")
        flash.assert_no_errors()
Exemple #52
0
    def add_consumer(self, input_dict):
        """
        新增客户的基本信息
        :param input_dict:是一个dict
        {
            "customer_name": "山东蓝水能源有限公司",
            "short_customer_name": "山东蓝水",
            "customer_grade": "一类客户",
            "contact_person": "唐半闲",
            "contact_phone": ""19800000000,
            "sale_man_name": "刘鹏",
            "social_credit_code": ""ABCDEFGHJK12345678,
            "consumer_address": "山东省济南市工业南路57号",
            "should_pay_money": "300000",
            "is_payer": 1, #  0,代表选择已有的付款方,1,代表将新加的客户作为付款方
            "payer_select": "陕西元和石油天然气有限公司"
            "consumer_category_display": "贸易商",
            "is_business_contract": 1,  # 0,代表不填写业务信息,1,代表将填写业务信息, 2,代表填写业务信息和合同信息
            "free_hour": "12",
            "waiting_price": "300",
            "kui_tons_standard": "200",
            "settlement_cycle_display": "周结",
            "contract_no": "NO22222",
            "contract_start_date": ""2019-03-19,
            "contract_end_date": "2019-10-19",
        }
        :return:
        """
        self.driver.find_element_by_xpath(self.go_to_add).click()
        time.sleep(2)
        # print(self.driver.title)
        if self.driver.title == self.consumer_add_title:
            logger.info("进入%s页面成功!" % self.consumer_add_title)
            self.driver.find_element_by_xpath(self.customer_name).send_keys(
                input_dict["customer_name"])
            self.driver.find_element_by_xpath(
                self.short_customer_name).send_keys(
                    input_dict["short_customer_name"])
            self.select_input_function(self.customer_grade,
                                       self.select_input_xpath1,
                                       input_dict["customer_grade"],
                                       self.driver)
            time.sleep(1)
            self.driver.find_element_by_xpath(self.contact_person).send_keys(
                input_dict["contact_person"])
            self.driver.find_element_by_xpath(self.contact_phone).send_keys(
                input_dict["contact_phone"])
            self.select_input_function(self.sale_man_name,
                                       self.select_input_xpath2,
                                       input_dict["sale_man_name"],
                                       self.driver)
            time.sleep(1)
            self.driver.find_element_by_xpath(
                self.social_credit_code).send_keys(
                    input_dict["social_credit_code"])
            self.driver.find_element_by_xpath(self.consumer_address).send_keys(
                input_dict["consumer_address"])
            self.driver.find_element_by_xpath(self.should_pay_money).send_keys(
                input_dict["should_pay_money"])

            if input_dict["is_payer"] == 0:
                self.driver.find_element_by_xpath(self.payer_three).click()
                self.driver.find_element_by_xpath(
                    (self.payer_select)).send_keys(input_dict["payer_select"])
            elif input_dict["is_payer"] == 1:
                self.driver.find_element_by_xpath(self.payer_owner).click()
            else:
                logger.info("is_payer参数不符合规范,必须是0,1")

            self.select_input_function(self.consumer_category_display,
                                       self.select_input_xpath3,
                                       input_dict["consumer_category_display"],
                                       self.driver)
            time.sleep(1)

            if input_dict["is_business_contract"] == 0:
                self.driver.find_element_by_xpath(self.save_and_exit).click()
                message = self.driver.switch_to_alert().text()
                return message
            elif input_dict["is_business_contract"] == 1:
                self.driver.find_element_by_xpath(self.save_and_next).click()
                self.driver.find_element_by_xpath(self.free_hour).send_keys(
                    input_dict["free_hour"])
                self.driver.find_element_by_xpath(
                    self.waiting_price).send_keys(input_dict["waiting_price"])
                self.driver.find_element_by_xpath(
                    self.kui_tons_standard).send_keys(
                        input_dict["kui_tons_standard"])
                self.driver.find_element_by_xpath(
                    self.settlement_cycle_display).send_keys(
                        input_dict["settlement_cycle_display"])
                message = self.driver.switch_to_alert().text()
                return message
            elif input_dict["is_business_contract"] == 2:
                self.driver.find_element_by_xpath(self.save_and_next).click()
                self.driver.find_element_by_xpath(self.free_hour).send_keys(
                    input_dict["free_hour"])
                self.driver.find_element_by_xpath(
                    self.waiting_price).send_keys(input_dict["waiting_price"])
                self.driver.find_element_by_xpath(
                    self.kui_tons_standard).send_keys(
                        input_dict["kui_tons_standard"])
                self.driver.find_element_by_xpath(self.settlement_cycle_display).\
                    send_keys(input_dict["settlement_cycle_display"])

                self.driver.find_element_by_xpath(self.save_and_next).click()
                self.driver.find_element_by_xpath(self.contract_no).send_keys(
                    input_dict["contract_no"])
                self.driver.find_element_by_xpath(
                    self.contract_start_date).send_keys(
                        input_dict["contract_start_date"])
                self.driver.find_element_by_xpath(
                    self.contract_end_date).send_keys(
                        input_dict["contract_end_date"])

                self.driver.find_element_by_xpath(self.save_and_exit1).click()
                # 获取alter信息
                message = self.driver.switch_to_alert().text()
                return message
            else:
                logger.info("is_business_contract参数不符合规范,必须是0,1,2")
                return False
        else:
            logger.info("进入%s页面失败!" % self.consumer_add_title)
            return False
 def run_moveEmail_case(self,data):
     with allure.step("点击查询箱,主题包含重构,勿动,并进入第一封邮件的详情页面"):
         self.recipientBoxPageCommon.get_emailBySubject(box="主题包含重构,勿动")
     with allure.step("获取邮件的所有信息"):
         with allure.step("获取邮件的主题"):
             emailSubject = self.emailDetailPageCommon.get_subjectOfEmailDetail()
             logger.info("要移动邮件的主题:{}".format(emailSubject))
             if not emailSubject:
                 raise Exception("要移动的邮件没有主题")
         with allure.step("获取邮件发件人"):
             emailSender = self.emailDetailPageCommon.get_senderOfEmailDetail()
             logger.info("要移动邮件的发件人:{}".format(emailSender))
             if not emailSender:
                 raise Exception("要移动的邮件没有发件人")
         with allure.step("获取所有的收件人"):
             emailRecipients = self.emailDetailPageCommon.get_recipientOfEmailDetail()
             logger.info("要移动邮件的收件人:{}".format(emailRecipients))
             if len(emailRecipients) == 0:
                 raise Exception("要移动的邮件没有收件人")
         with allure.step("获取所有的小附件"):
             emailSmallAttachNames = self.emailDetailPageCommon.get_allSmallAttachNamesOfEmailDetail()
             logger.info("要移动邮件的小附件:{}".format(emailSmallAttachNames))
             if len(emailSmallAttachNames) == 0:
                 raise Exception("要移动的邮件没有小附件")
         # with allure.step("获取所有的大附件"):
         #     emailBigAttachNames = self.emailDetailPageCommon.get_allBigAttachNamesOfEmailDetail()
         #     logger.info("要移动邮件的大附件:{}".format(emailBigAttachNames))
         #     if len(emailBigAttachNames) == 0:
         #         raise Exception("要移动的邮件没有大附件")
         with allure.step("获取邮件文本"):
             emailBody = self.emailDetailPageCommon.get_emailTextOfEmailDetail(index="all")
             logger.info("要移动邮件的邮件文本:{}".format(emailBody))
             if len(emailBody) == 0:
                 raise Exception("要移动的邮件没有邮件文本")
         with allure.step("获取所有的快照链接"):
             emailSites = self.emailDetailPageCommon.get_allSiteUrlsOfEmailDetail()
             logger.info("要移动邮件的营销网站快照:{}".format(emailSites))
             if len(emailSites) == 0:
                 raise Exception("要移动的邮件没有营销快照")
         with allure.step("获取所有的产品图片地址,编码"):
             emailProductImgUrls,emailProductCodes = self.emailDetailPageCommon.get_allProductImgUrlsOfEmailDetail()
             logger.info("要移动邮件的产品图片地址,编码:{}".format(emailProductImgUrls,emailProductCodes))
             if len(emailProductImgUrls) == 0:
                 raise Exception("要移动的邮件没有产品图片")
             if len(emailProductCodes) == 0:
                 raise Exception("要移动的邮件没有产品编码")
     with allure.step("点击移动按钮"):
         self.click_ele(self.emailDetailPage_moveBtn_loc,key="点击邮件详情移动按钮")
     with allure.step("点击{}".format(data["boxCategory"])):
         self.emailDetailPageCommon.clickBoxCategory_toMove(data["boxCategory"])
     if "发" not in data["boxCategory"]:
         if data["boxCategory"] == "客户箱":
             with allure.step("点击第一个客户箱"):
                 firstCustomerBoxEle = self.find_element(self.emailDetailPage_moveToCustomerBoxList_loc,key="获取第一个客户箱")
                 moveToBoxName = firstCustomerBoxEle.text
                 logger.info(moveToBoxName)
                 firstCustomerBoxEle.click()
         elif data["boxCategory"] == "供应商箱":
             with allure.step("点击第一个供应商箱"):
                 firstSupplierBoxEle = self.find_element(self.emailDetailPage_moveToCustomerBoxList_loc,key="获取第一个供应商箱")
                 moveToBoxName = firstSupplierBoxEle.text
                 logger.info(moveToBoxName)
                 firstSupplierBoxEle.click()
         elif data["boxCategory"] == "内部联系人箱":
             with allure.step("点击第一个内部联系人箱"):
                 time.sleep(1)
                 firstInnerBoxEle = self.find_element(self.emailDetailPage_moveToInnerBoxList_loc,key="获取第一个内部联系人箱")
                 moveToBoxName = firstInnerBoxEle.text.split("<")[0]
                 logger.info(moveToBoxName)
                 firstInnerBoxEle.click()
         elif data["boxCategory"] == "自定义箱":
             with allure.step("点击第一个自定义箱"):
                 firstCustomBoxEle = self.find_element(self.emailDetailPage_moveToCustomBoxList_loc,key="获取第一个自定义箱")
                 moveToBoxName = firstCustomBoxEle.text
                 logger.info(moveToBoxName)
                 firstCustomBoxEle.click()
         with allure.step("点击确定按钮"):
             self.click_ele(self.emailDetailPage_sureMoveBtn_loc,key="点击确定移动按钮")
     else:
         moveToBoxName = data["boxCategory"]
     with allure.step("点击关闭邮件详情tab"):
         self.click_ele(self.closeTabBtn,key="关闭邮件详情")
     with allure.step("查看箱子:{},是否有主题是:{}的邮件".format(moveToBoxName,emailSubject)):
         self.recipientBoxPageCommon.get_emailBySubjectAndBox(email_subject=emailSubject,boxCategory=data["boxCategory"],boxName=moveToBoxName)
     with allure.step("获取移动后邮件的所有信息,并判断是否有丢失"):
         with allure.step("获取移动后邮件的主题"):
             emailSubject_moved = self.emailDetailPageCommon.get_subjectOfEmailDetail()
             logger.info("emailSubject_moved:{}".format(emailSubject_moved))
             if emailSubject_moved != emailSubject:
                 raise Exception("移动后的邮件主题:{},与移动前的邮件主题:{},不一致".format(emailSubject_moved,emailSubject))
             else:
                 logger.info("移动前的主题:{},移动后的主题:{}".format(emailSubject,emailSubject_moved))
         with allure.step("获取移动后邮件发件人"):
             emailSender_moved = self.emailDetailPageCommon.get_senderOfEmailDetail()
             logger.info("emailSender_moved:{}".format(emailSender_moved))
             if emailSender_moved.split("<")[1].split(">")[0] != emailSender.split("<")[1].split(">")[0]:
                 raise Exception("移动后的邮件发件人:{},与移动前的邮件发件人:{},不一致".format(emailSender_moved,emailSender))
         with allure.step("获取移动后所有的收件人"):
             emailRecipients_moved = self.emailDetailPageCommon.get_recipientOfEmailDetail()
             logger.info("emailRecipients_moved:{}".format(emailRecipients_moved))
             if emailRecipients_moved != emailRecipients:
                 raise Exception("移动后的邮件收件人:{},与移动前的邮件收件人:{},不一致".format(emailRecipients_moved,emailRecipients))
         # with allure.step("获取移动后所有的大附件"):
         #     emailBigAttachNames_moved = self.emailDetailPageCommon.get_allBigAttachNamesOfEmailDetail()
         #     logger.info("emailBigAttachNames_moved:{}".format(emailBigAttachNames_moved))
         #     if emailBigAttachNames_moved != emailBigAttachNames:
         #         raise Exception("移动后的邮件大附件:{},与移动前的邮件大附件:{},不一致".format(emailBigAttachNames_moved,emailBigAttachNames))
         with allure.step("获取移动后所有的小附件"):
             emailSmallAttachNames_moved = self.emailDetailPageCommon.get_allSmallAttachNamesOfEmailDetail()
             logger.info("emailSmallAttachNames_moved:{}".format(emailSmallAttachNames_moved))
             if emailSmallAttachNames_moved != emailSmallAttachNames:
                 raise Exception("移动后的邮件小附件:{},与移动前的邮件小附件:{},不一致".format(emailSmallAttachNames_moved,emailSmallAttachNames))
         with allure.step("获取移动后邮件文本"):
             emailBody_moved = self.emailDetailPageCommon.get_emailTextOfEmailDetail(index="all")
             logger.info("emailBody_moved:{}".format(emailBody_moved))
             if emailBody_moved != emailBody:
                 raise Exception("移动后的邮件文本:{},与移动前的邮件文本:{},不一致".format(emailBody_moved,emailBody))
         with allure.step("获取移动后所有的快照链接"):
             emailSites_moved = self.emailDetailPageCommon.get_allSiteUrlsOfEmailDetail()
             logger.info("emailSites_moved:{}".format(emailSites_moved))
             if emailSites_moved != emailSites:
                 raise Exception("移动后的邮件营销网站快照链接:{},与移动前的邮件营销网站快照链接:{},不一致".format(emailSites_moved,emailSites))
         with allure.step("获取移动后所有的产品图片地址,编码"):
             emailProductImgUrls_moved, emailProductCodes_moved = self.emailDetailPageCommon.get_allProductImgUrlsOfEmailDetail()
             logger.info("emailProductImgUrls_moved:{}, emailProductCodes_moved:{}".format(emailProductImgUrls_moved, emailProductCodes_moved))
             if emailProductImgUrls_moved != emailProductImgUrls:
                 raise Exception("移动后的邮件产品图地址:{},与移动前的邮件产品图地址:{},不一致".format(emailProductImgUrls_moved,emailProductImgUrls))
             if emailProductCodes_moved != emailProductCodes:
                 raise Exception("移动后的邮件产品编码:{},与移动前的邮件产品编码:{},不一致".format(emailProductCodes_moved,emailProductCodes))
Exemple #54
0
 def acquire_sysips_cidr(self, num=1):
     logger.info ("acquire system ips")
     result = self.system.acquire_cidr(num)
     self.dump_system()
     return result
 def run_resendEmail_case(self):
     with allure.step("点击查询箱,主题包含重构,勿动,并进入第一封邮件的详情页面"):
         self.recipientBoxPageCommon.get_emailBySubject(box="主题包含重构,勿动")
     with allure.step("获取邮件的所有信息"):
         with allure.step("获取邮件的主题"):
             emailSubject = self.emailDetailPageCommon.get_subjectOfEmailDetail()
             logger.info("要重发邮件的主题:{}".format(emailSubject))
             if not emailSubject:
                 raise Exception("要重发的邮件没有主题")
         with allure.step("获取邮件发件人"):
             emailSender = self.emailDetailPageCommon.get_senderOfEmailDetail()
             logger.info("要重发邮件的发件人:{}".format(emailSender))
             if not emailSender:
                 raise Exception("要重发的邮件没有发件人")
         with allure.step("获取所有的收件人"):
             emailRecipients = self.emailDetailPageCommon.get_recipientOfEmailDetail()
             logger.info("要重发邮件的收件人:{}".format(emailRecipients))
             if not emailRecipients:
                 raise Exception("要重发的邮件没有收件人")
         # with allure.step("获取所有的大附件"):
         #     emailBigAttachNames = self.emailDetailPageCommon.get_allBigAttachNamesOfEmailDetail()
         #     logger.info("要重发邮件的大附件:{}".format(emailBigAttachNames))
         #     if not emailBigAttachNames:
         #         raise Exception("要重发的邮件没有大附件")
         with allure.step("获取所有的小附件"):
             emailSmallAttachNames = self.emailDetailPageCommon.get_allSmallAttachNamesOfEmailDetail()
             logger.info("要重发邮件的小附件:{}".format(emailSmallAttachNames))
             if not emailSmallAttachNames:
                 raise Exception("要重发的邮件没有小附件")
         with allure.step("获取邮件文本"):
             emailBody = self.emailDetailPageCommon.get_emailTextOfEmailDetail(index="all")
             logger.info("要重发邮件的邮件文本:{}".format(emailBody))
             if not emailBody:
                 raise Exception("要重发的邮件没有邮件文本")
         with allure.step("获取所有的快照链接"):
             emailSites = self.emailDetailPageCommon.get_allSiteUrlsOfEmailDetail()
             logger.info("要重发邮件的营销网站快照:{}".format(emailSites))
             if not emailSites:
                 raise Exception("要重发的邮件没有营销快照")
         with allure.step("获取所有的产品图片地址,编码"):
             emailProductImgUrls, emailProductCodes = self.emailDetailPageCommon.get_allProductImgUrlsOfEmailDetail()
             logger.info("要重发邮件的产品图片地址:{},编码:{}".format(emailProductImgUrls, emailProductCodes))
             if not emailProductImgUrls:
                 raise Exception("要重发的邮件没有产品图片")
             if not emailProductCodes:
                 raise Exception("要重发的邮件没有产品编码")
     with allure.step("点击更多按钮"):
         self.click_ele(self.emailDetailPage_moreOperateBtn_loc,key="点击更多按钮")
     with allure.step("点击重发按钮"):
         resend_loc = (By.XPATH,self.emailDetailPage_mergerBtn_loc[1].replace("归并","重发"))
         self.click_ele(resend_loc,key="点击重发按钮")
     with allure.step("点击关闭邮件详情tab按钮"):
         self.click_ele(self.closeTabBtn,key="点击关闭详情按钮")
     with allure.step("获取重发后的邮件信息"):
         with allure.step("获取重发后的邮件收件人"):
             emailRecepients_resended = self.writeMailCommon.get_recipientsOfWriteEmailPage()
             logger.info("重发之后的邮件的收件人:{}".format(emailRecepients_resended))
             if emailRecepients_resended[0].split("<")[1].split(">")[0] != emailRecipients[0].split("<")[1].split(">")[0]:
                 raise Exception("重发之后的收件人:{},与重发之前的收件人:{},不一致".format(emailRecepients_resended,emailRecipients))
         with allure.step("获取重发后的邮件主题"):
             emailSubject_resended = self.writeMailCommon.get_emailSubjectInSubjectInput()
             logger.info("重发之后的邮件的主题:{}".format(emailSubject_resended))
             if emailSubject_resended != emailSubject:
                 raise Exception("重发之后的主题:{},与重发之前的主题:{},不一致".format(emailSubject_resended,emailSubject))
         with allure.step("获取重发后的发件人"):
             emailSender_resended = self.writeMailCommon.get_sendersOfWriteEmailPage()
             logger.info("重发之后的邮件的发件人:{}".format(emailSender_resended))
             # if emailSender_resended != emailSender:
             #     raise Exception("重发之后的发件人:{},与重发之前的发件人:{},不一致".format(emailSender_resended,emailSender))
         with allure.step("获取重发后的小附件"):
             emailSmallAttachNames_resended = self.writeMailCommon.get_allAttachNamesOfWriteEmailPage()
             logger.info("重发之后的小附件:{}".format(emailSmallAttachNames_resended))
             if emailSmallAttachNames_resended != emailSmallAttachNames:
                 raise Exception("重发之后的小附件:{},与重发之前的小附件:{},不一致".format(emailSmallAttachNames_resended,emailSmallAttachNames))
         with allure.step("获取重发后的邮件文本"):
             emailBody_resended = self.writeMailCommon.get_emailBodyOfWriteEmailPage()
             logger.info("重发之后的邮件文本:{}".format(emailBody_resended))
             if emailBody_resended != emailBody:
                 raise Exception("重发之后的邮件文本:{},与重发之前的邮件文本:{},不一致".format(emailBody_resended,emailBody))
         with allure.step("获取重发后的产品图片地址,编码"):
             emailProductImgUrls_resended, emailProductCodes_resended = self.writeMailCommon.get_productInfoOfWriteEmailPage()
             logger.info("重发之后的邮件产品图片地址:{},编码:{}".format(emailProductImgUrls_resended, emailProductCodes_resended))
             if emailProductImgUrls_resended != emailProductImgUrls:
                 raise Exception("重发之后的产品图片地址:{},与重发之前的产品图片地址:{},不一致".format(emailProductImgUrls_resended,emailProductImgUrls))
             if emailProductCodes_resended != emailProductCodes:
                 raise Exception("重发之后的产品编码:{},与重发之前的产品编码:{},不一致".format(emailProductCodes_resended,emailProductCodes))
         with allure.step("获取重发后的邮件快照地址"):
             emailSites_resended = self.writeMailCommon.get_sitesOfWriteEmailPage()
             logger.info("重发之后的邮件快照地址:{}".format(emailSites_resended))
             if emailSites_resended != emailSites_resended:
                 raise Exception("重发之后的营销快照地址:{},与重发之前的营销快照地址:{},不一致".format(emailSites_resended,emailSites))
Exemple #56
0
from utils.config import Config
from utils.log import logger


class Login:
    def __init__(self):
        URL = Config().get('URL')
        self.page = AbisMainPage(browser_type='ie').get(URL,
                                                        maximize_window=True)
        self.UserName = Config().get('UserName')
        self.PassWord = Config().get('PassWord')

    def run(self):
        self.login()
        time.sleep(3)
        return self.page

    def login(self):
        # 登录
        locator_UserName = (By.ID, 'UserName')
        locator_PassWord = (By.ID, 'PassWord')
        locator_login = (By.ID, 'loginbutton')
        self.page.find_element(*locator_UserName).send_keys(self.UserName)
        self.page.find_element(*locator_PassWord).send_keys(self.PassWord)
        self.page.find_element(*locator_login).click()


if __name__ == '__main__':
    page = Login().run()
    logger.info(page)
    #page.quit();
Exemple #57
0
    def check_for_badness(self, fn, _tries, nav_args, *args, **kwargs):
        if getattr(fn, '_can_skip_badness_test', False):
            # self.log_message('Op is a Nop! ({})'.format(fn.__name__))
            return

        if self.VIEW:
            self.view.flush_widget_cache()
        go_kwargs = kwargs.copy()
        go_kwargs.update(nav_args)
        self.appliance.browser.open_browser(url_key=self.obj.appliance.server.address())

        # check for MiqQE javascript patch on first try and patch the appliance if necessary
        if self.appliance.is_miqqe_patch_candidate and not self.appliance.miqqe_patch_applied:
            self.appliance.patch_with_miqqe()
            self.appliance.browser.quit_browser()
            _tries -= 1
            self.go(_tries, *args, **go_kwargs)

        br = self.appliance.browser

        try:
            br.widgetastic.execute_script('miqSparkleOff();', silent=True)
        except:  # noqa
            # miqSparkleOff undefined, so it's definitely off.
            # Or maybe it is alerts? Let's only do this when we get an exception.
            self.appliance.browser.widgetastic.dismiss_any_alerts()
            # If we went so far, let's put diapers on one more miqSparkleOff just to be sure
            # It can be spinning in the back
            try:
                br.widgetastic.execute_script('miqSparkleOff();', silent=True)
            except:  # noqa
                pass

        # Check if the page is blocked with blocker_div. If yes, let's headshot the browser right
        # here
        if (
                br.widgetastic.is_displayed("//div[@id='blocker_div' or @id='notification']") or
                br.widgetastic.is_displayed(".modal-backdrop.fade.in")):
            logger.warning("Page was blocked with blocker div on start of navigation, recycling.")
            self.appliance.browser.quit_browser()
            self.go(_tries, *args, **go_kwargs)

        # Check if modal window is displayed
        if (br.widgetastic.is_displayed(
                "//div[contains(@class, 'modal-dialog') and contains(@class, 'modal-lg')]")):
            logger.warning("Modal window was open; closing the window")
            br.widgetastic.click(
                "//button[contains(@class, 'close') and contains(@data-dismiss, 'modal')]")

        # Check if jQuery present
        try:
            br.widgetastic.execute_script("jQuery", silent=True)
        except Exception as e:
            if "jQuery" not in str(e):
                logger.error("Checked for jQuery but got something different.")
                logger.exception(e)
            # Restart some workers
            logger.warning("Restarting UI and VimBroker workers!")
            with self.appliance.ssh_client as ssh:
                # Blow off the Vim brokers and UI workers
                ssh.run_rails_command("\"(MiqVimBrokerWorker.all + MiqUiWorker.all).each &:kill\"")
            logger.info("Waiting for web UI to come back alive.")
            sleep(10)   # Give it some rest
            self.appliance.wait_for_web_ui()
            self.appliance.browser.quit_browser()
            self.appliance.browser.open_browser(url_key=self.obj.appliance.server.address())
            self.go(_tries, *args, **go_kwargs)

        # Same with rails errors
        view = br.widgetastic.create_view(ErrorView)
        rails_e = view.get_rails_error()

        if rails_e is not None:
            logger.warning("Page was blocked by rails error, renavigating.")
            logger.error(rails_e)
            # RHEL7 top does not know -M and -a
            logger.debug('Top CPU consumers:')
            logger.debug(store.current_appliance.ssh_client.run_command(
                'top -c -b -n1 | head -30').output)
            logger.debug('Top Memory consumers:')
            logger.debug(store.current_appliance.ssh_client.run_command(
                'top -c -b -n1 -o "%MEM" | head -30').output)  # noqa
            logger.debug('Managed known Providers:')
            logger.debug(
                '%r', [prov.key for prov in store.current_appliance.managed_known_providers])
            self.appliance.browser.quit_browser()
            self.appliance.browser.open_browser()
            self.go(_tries, *args, **go_kwargs)
            # If there is a rails error past this point, something is really awful

        # Set this to True in the handlers below to trigger a browser restart
        recycle = False

        # Set this to True in handlers to restart evmserverd on the appliance
        # Includes recycling so you don't need to specify recycle = False
        restart_evmserverd = False

        try:
            self.log_message(
                "Invoking {}, with {} and {}".format(fn.func_name, args, kwargs), level="debug")
            return fn(*args, **kwargs)
        except (KeyboardInterrupt, ValueError):
            # KeyboardInterrupt: Don't block this while navigating
            raise
        except UnexpectedAlertPresentException:
            if _tries == 1:
                # There was an alert, accept it and try again
                br.widgetastic.handle_alert(wait=0)
                self.go(_tries, *args, **go_kwargs)
            else:
                # There was still an alert when we tried again, shoot the browser in the head
                logger.debug('Unxpected alert, recycling browser')
                recycle = True
        except (ErrorInResponseException, InvalidSwitchToTargetException):
            # Unable to switch to the browser at all, need to recycle
            logger.info('Invalid browser state, recycling browser')
            recycle = True
        except exceptions.CFMEExceptionOccured as e:
            # We hit a Rails exception
            logger.info('CFME Exception occured')
            logger.exception(e)
            recycle = True
        except exceptions.CannotContinueWithNavigation as e:
            # The some of the navigation steps cannot succeed
            logger.info('Cannot continue with navigation due to: {}; '
                'Recycling browser'.format(str(e)))
            recycle = True
        except (NoSuchElementException, InvalidElementStateException, WebDriverException,
                StaleElementReferenceException) as e:
            from cfme.web_ui import cfme_exception as cfme_exc  # To prevent circular imports
            # First check - if jquery is not found, there can be also another
            # reason why this happened so do not put the next branches in elif
            if isinstance(e, WebDriverException) and "jQuery" in str(e):
                # UI failed in some way, try recycling the browser
                logger.exception(
                    "UI failed in some way, jQuery not found, (probably) recycling the browser.")
                recycle = True
            # If the page is blocked, then recycle...
            # TODO .modal-backdrop.fade.in catches the 'About' modal resulting in nav loop
            if (
                    br.widgetastic.is_displayed("//div[@id='blocker_div' or @id='notification']") or
                    br.widgetastic.is_displayed(".modal-backdrop.fade.in")):
                logger.warning("Page was blocked with blocker div, recycling.")
                recycle = True
            elif cfme_exc.is_cfme_exception():
                logger.exception("CFME Exception before force navigate started!: {}".format(
                    cfme_exc.cfme_exception_text()))
                recycle = True
            elif br.widgetastic.is_displayed("//body/h1[normalize-space(.)='Proxy Error']"):
                # 502
                logger.exception("Proxy error detected. Killing browser and restarting evmserverd.")
                req = br.widgetastic.elements("/html/body/p[1]//a")
                req = br.widgetastic.text(req[0]) if req else "No request stated"
                reason = br.widgetastic.elements("/html/body/p[2]/strong")
                reason = br.widgetastic.text(reason[0]) if reason else "No reason stated"
                logger.info("Proxy error: {} / {}".format(req, reason))
                restart_evmserverd = True
            elif br.widgetastic.is_displayed("//body[./h1 and ./p and ./hr and ./address]"):
                # 503 and similar sort of errors
                title = br.widgetastic.text("//body/h1")
                body = br.widgetastic.text("//body/p")
                logger.exception("Application error {}: {}".format(title, body))
                sleep(5)  # Give it a little bit of rest
                recycle = True
            elif br.widgetastic.is_displayed("//body/div[@class='dialog' and ./h1 and ./p]"):
                # Rails exception detection
                logger.exception("Rails exception before force navigate started!: %r:%r at %r",
                    br.widgetastic.text("//body/div[@class='dialog']/h1"),
                    br.widgetastic.text("//body/div[@class='dialog']/p"),
                    getattr(manager.browser, 'current_url', "error://dead-browser")
                )
                recycle = True
            elif br.widgetastic.elements("//ul[@id='maintab']/li[@class='inactive']") and not\
                    br.widgetastic.elements("//ul[@id='maintab']/li[@class='active']/ul/li"):
                # If upstream and is the bottom part of menu is not displayed
                logger.exception("Detected glitch from BZ#1112574. HEADSHOT!")
                recycle = True
            elif not self.obj.appliance.server.logged_in():
                # Session timeout or whatever like that, login screen appears.
                logger.exception("Looks like we are logged out. Try again.")
                recycle = True
            else:
                logger.error("Could not determine the reason for failing the navigation. " +
                    " Reraising.  Exception: {}".format(str(e)))
                logger.debug(store.current_appliance.ssh_client.run_command(
                    'systemctl status evmserverd').output)
                raise

        if restart_evmserverd:
            logger.info("evmserverd restart requested")
            self.appliance.restart_evm_service()
            self.appliance.wait_for_web_ui()
            self.go(_tries, *args, **go_kwargs)

        if recycle or restart_evmserverd:
            self.appliance.browser.quit_browser()
            logger.debug('browser killed on try {}'.format(_tries))
            # If given a "start" nav destination, it won't be valid after quitting the browser
            self.go(_tries, *args, **go_kwargs)
 def run_markEmail_case(self,caseid,casename):
     # if caseid == 1:
     #     with allure.step("获取未读邮件数"):
     #         '''首页未读邮件数'''
     #         unReadEmail_num_v1 = self.recipientBoxPageCommon.get_unReadEmailNum()
     if caseid == 2:
         with allure.step("获取星标邮件数"):
             '''首页星标邮件数'''
             starEmail_num_v1 = self.recipientBoxPageCommon.get_starEmailNum()
     elif caseid == 3:
         with allure.step("获取免回复邮件数"):
             freeReplyEmail_num_v1 = self.recipientBoxPageCommon.get_freeReplyEmailNum()
     with allure.step("进入收件箱的第一封邮件"):
         self.recipientBoxPageCommon.get_emailBySubjectAndBox()
     with allure.step("获取邮件主题"):
         emailSubject = self.emailDetailPageCommon.get_subjectOfEmailDetail()
         logger.info("用例-{}-{}:要标记的邮件主题:{}".format(caseid,casename,emailSubject))
     if caseid == 1:
         with allure.step("回到邮件首页"):
             self.click_ele(self.emailHomePage_loc, key="点击邮件首页")
         with allure.step("获取未读邮件数,并判断是否正确"):
             '''点击一封未读邮件后,首页未读邮件数'''
             unReadEmail_num_v2 = self.recipientBoxPageCommon.get_unReadEmailNum()
             # assert int(unReadEmail_num_v1) == int(unReadEmail_num_v2)+1
         with allure.step("回到邮件详情tab"):
             self.click_ele(self.emailDetailPage_detailTabBtn_loc,key="回到邮件详情tab")
     with allure.step("点击更多操作按钮"):
         self.click_ele(self.emailDetailPage_moreOperateBtn_loc,key="点击更多操作按钮")
     time.sleep(1)
     with allure.step("悬浮标记为按钮"):
         self.mouseHover(self.emailDetailPage_moreOperateMarkBtn_loc)
     if caseid == 1:
         self.click_ele(self.emailDetailPage_moreOperateList_loc,key="点击未读按钮")
     elif caseid == 2:
         self.click_ele(self.emailDetailPage_moreOperateList_loc,index=1,key="点击星标按钮")
     elif caseid == 3:
         self.click_ele(self.emailDetailPage_moreOperateList_loc,index=2,key="点击免回复按钮")
     with allure.step("回到邮件首页"):
         self.click_ele(self.emailHomePage_loc,key="点击邮件首页")
     if caseid == 1:
         with allure.step("获取未读邮件数"):
             unReadEmail_num_v3 = self.recipientBoxPageCommon.get_unReadEmailNum()
             assert int(unReadEmail_num_v3) == int(unReadEmail_num_v2) + 1
     elif caseid == 2:
         with allure.step("获取星标邮件数"):
             starEmail_num_v2 = self.recipientBoxPageCommon.get_starEmailNum()
             assert int(starEmail_num_v2) == int(starEmail_num_v1) + 1
     elif caseid == 3:
         with allure.step("获取免回复邮件数"):
             freeReplyEmail_num_v2 = self.recipientBoxPageCommon.get_freeReplyEmailNum()
             assert int(freeReplyEmail_num_v2) == int(freeReplyEmail_num_v1) + 1
     with allure.step("回到邮件详情"):
         self.click_ele(self.emailDetailPage_detailTabBtn_loc,key="点击邮件详情tab")
     with allure.step("点击更多操作按钮"):
         self.click_ele(self.emailDetailPage_moreOperateBtn_loc,key="点击更多操作按钮")
     time.sleep(1)
     with allure.step("悬浮标记为按钮"):
         self.mouseHover(self.emailDetailPage_moreOperateMarkBtn_loc)
     if caseid == 1:
         self.click_ele(self.emailDetailPage_moreOperateList_loc,key="点击已读按钮")
     elif caseid == 2:
         self.click_ele(self.emailDetailPage_moreOperateList_loc,index=1,key="点击取消星标按钮")
     elif caseid == 3:
         self.click_ele(self.emailDetailPage_moreOperateList_loc,index=2,key="点击取消免回复按钮")
     with allure.step("回到邮件首页"):
         self.click_ele(self.emailHomePage_loc,key="点击邮件首页")
     if caseid == 1:
         with allure.step("获取未读邮件数"):
             unReadEmail_num_v4 = self.recipientBoxPageCommon.get_unReadEmailNum()
             assert int(unReadEmail_num_v4) == int(unReadEmail_num_v2)
     elif caseid == 2:
         with allure.step("获取星标邮件数"):
             starEmail_num_v3 = self.recipientBoxPageCommon.get_starEmailNum()
             assert int(starEmail_num_v3) == int(starEmail_num_v1)
     elif caseid == 3:
         with allure.step("获取免回复邮件数"):
             freeReplyEmail_num_v3 = self.recipientBoxPageCommon.get_freeReplyEmailNum()
             assert int(freeReplyEmail_num_v3) == int(freeReplyEmail_num_v1)
Exemple #59
0
    def __init__(self, networkmgr, etcdclient, addr, mode):
        self.addr = addr
        logger.info ("begin initialize on %s" % self.addr)
        self.networkmgr = networkmgr
        self.etcd = etcdclient
        self.mode = mode
        self.workerport = env.getenv('WORKER_PORT')
        self.tasks = {}

        # delete the existing network
        logger.info ("delete the existing network")
        [success, bridges] = ovscontrol.list_bridges()
        if success:
            for bridge in bridges:
                if bridge.startswith("docklet-br"):
                    ovscontrol.del_bridge(bridge)
        else:
            logger.error(bridges)

        '''if self.mode == 'new':
            if netcontrol.bridge_exists('docklet-br'):
                netcontrol.del_bridge('docklet-br')
            netcontrol.new_bridge('docklet-br')
        else:
            if not netcontrol.bridge_exists('docklet-br'):
                logger.error("docklet-br not found")
                sys.exit(1)'''

        # get allnodes
        self.allnodes = self._nodelist_etcd("allnodes")
        self.runnodes = []
        self.batchnodes = []
        self.allrunnodes = []
        [status, runlist] = self.etcd.listdir("machines/runnodes")
        for node in runlist:
            nodeip = node['key'].rsplit('/',1)[1]
            if node['value'] == 'ok':
                logger.info ("running node %s" % nodeip)
                self.runnodes.append(nodeip)

        logger.info ("all nodes are: %s" % self.allnodes)
        logger.info ("run nodes are: %s" % self.runnodes)

        # start new thread to watch whether a new node joins
        logger.info ("start thread to watch new nodes ...")
        self.thread_watchnewnode = threading.Thread(target=self._watchnewnode)
        self.thread_watchnewnode.start()
        # wait for all nodes joins
        # while(True):
        for i in range(10):
            allin = True
            for node in self.allnodes:
                if node not in self.runnodes:
                    allin = False
                    break
            if allin:
                logger.info("all nodes necessary joins ...")
                break
            time.sleep(1)
        logger.info ("run nodes are: %s" % self.runnodes)
Exemple #60
0
    """
    在代码中,配置扫描目录列表及敏感词列表
    :return:
    """
    dir_list = [
        r'C:\Users\Administrator\PycharmProjects\Sensitive_Prober\test',
        r'C:\Users\Administrator\PycharmProjects\Sensitive_Prober\utils'
    ]
    sensitive_list = ['PyYAML', 'abc']

    return dir_list, sensitive_list


def get_config():
    """
    优先使用config.yaml文件的配置项
    :return:
    """
    config_file = "./../config.yaml"
    if os.path.exists(config_file):
        return get_config_from_yaml(config_file)
    else:
        return get_config_from_code()


if __name__ == "__main__":
    config_file = "./../config.yaml"
    dir_list, sensitive_list = get_config_from_yaml(config_file)
    logger.info(dir_list)
    logger.info(sensitive_list)