コード例 #1
0
 def ip_address(self, value):
     if self.default_endpoint:
         if value:
             self.default_endpoint.ipaddress = value
     else:
         logger.warn(
             "can't set ipaddress because default endpoint is absent")
コード例 #2
0
    def handle_pn(self, pn_dict, sentence_count):
        """Extract positive and negative tags and convert them to dict of Tensor.

        Args:
            pn_dict: dict, containing positive and negative tags.
            sentence_count: int, total sentence count in the batch data.

        Returns:
            pn2vec: dict, containing Tensors of positive and negative tags.
        """
        pn2vec = {}
        # Init pn2vec.
        for label_name, label_config in self.label_config_dict.items():
            if label_config['type'] != 'multi_label':
                pn2vec[label_name] = torch.ones(sentence_count)
        # update pn2vec.
        for pn_name, pn_config in self.pn_config_dict.items():
            label_name = pn_config['label']
            if label_name not in pn2vec:
                logger.warn(
                    f'The positive and negative symbol column for {label_name} is not used.'
                )
                continue
            # Get pn list.
            if self.multi_turn:
                pn_vec = []
                for pns in pn_dict[pn_name]:
                    for pn in pns.split(self.multi_turn_separator):
                        pn_vec.append(int(pn))
            else:
                pn_vec = [int(pns) for pns in pn_dict[pn_name]]
            pn2vec[label_name] = torch.tensor(pn_vec)
        return pn2vec
コード例 #3
0
def check_and_new_project_id(scantask_id,
                             task_name,
                             project_origin,
                             project_des=""):
    st = ScanTask.objects.filter(id=scantask_id).first()
    p = Project.objects.filter(project_hash=md5(task_name)).first()

    if not p:
        p2 = Project(project_name=st.task_name,
                     project_des=project_des,
                     project_hash=md5(task_name),
                     project_origin=project_origin)
        p2.save()

        st.project_id = p.id
        st.save()
    else:
        p.project_des = project_des
        p.project_origin = project_origin
        try:
            p.save()
        except IntegrityError:
            logger.warn("[Model Save] Project model not changed")

    return p.id
コード例 #4
0
    def handle(self, *args, **options):

        try:
            if not options['test']:
                SpiderCoreBackend()
            else:
                logger.info('[Spider] start test spider.')

                test_target_list = Queue()
                test_target_list.put({
                    'url': "http://testphp.vulnweb.com",
                    'type': 'link',
                    'cookies':
                    "PHPSESSID=malpd9t9qf68a861ppu17pfhf6; user_auth=95ced4640632f7f556a35ce1e0ed0bb7%3A5904be90c39e9f98196f41664a1c4efb;dy_did=e3ac6928cdaaed85c07bc19700061501; acf_did=e3ac6928cdaaed85c07bc19700061501; smidV2=2020042317420813a4a60257434f8522f4e2bc305ceb8600a8b33a84ef2dd40; PHPSESSID=43h3okfkdj10fegm021t9i0k44; acf_auth=2696tljFRsnqcLIzGYQGUlhz91VKMIIQsVxfp1H6WKJX%2Fjwud0vQL7lS06U8Y2e6gVcWkUsH2QvyEaaqSc9%2F8qCutF%2FTcBVZo5lel7IDqG3oPwG2709hTAE; dy_auth=a89eAynmL3g4svYibpYjL2XYAcmV8lEdDCMjcJRxA8qVYMlb42uiiLSXvu%2Bj1s2xKsAs9RomxRAdD5WwJ73X3t83sQIlnshQnuTfvsPXzQbtkQcOGAnkstA; wan_auth37wan=8b35b6ece202gfx3TOUMFS1LITut%2B6mHHB1VaLD7%2F0nP8GuOqIIxbgXHQxW0UT8CG6Q4dJsvBi2ZuEoOqXzN5eOFfz68QJn%2FbH41fWbyD8%2B%2FDSzQ; acf_uid=3634059; acf_username=qq_z5eCyVjt; acf_nickname=LoRexxar; acf_own_room=1; acf_groupid=1; acf_phonestatus=1; acf_avatar=https%3A%2F%2Fapic.douyucdn.cn%2Fupload%2Favanew%2Fface%2F201711%2F20%2F20%2F68e7d0fe88c6f175eb345458b789c64b_; acf_ct=0; acf_ltkid=33139121; acf_biz=1; acf_stk=4fff6ee864f5aaeb; Hm_lvt_e99aee90ec1b2106afe7ec3b199020a7=1587634920,1587634944; Hm_lpvt_e99aee90ec1b2106afe7ec3b199020a7=1587634944",
                    'deep': 0
                })

                spidercore = SpiderCore(test_target_list)
                spidercore.scan_for_queue()

        except KeyboardInterrupt:
            logger.warn("[Spider] stop scan.")
            exit(0)
        except:
            logger.error("[Spider] something error, {}".format(
                traceback.format_exc()))
コード例 #5
0
 def hostname(self, value):
     if self.default_endpoint:
         if value:
             self.default_endpoint.hostname = value
     else:
         logger.warn(
             "can't set hostname because default endpoint is absent")
コード例 #6
0
ファイル: ner.py プロジェクト: MrGuo2/MyProject
 def restore_model(self):
     try:
         self.model.load_state_dict(
             torch.load(os.path.join(self.model_path, 'params.pkl')))
         logger.info('model restore success!')
     except Exception as error:
         logger.warn(f'model restore faild! {error}')
コード例 #7
0
    def start_probe(self):
        print("=====> %s" % self._file)

        if self._file.endswith("ppt"):
            logger.warn("not support file type, file: %s", self._file)
            return

        def read_ppt(ppt):
            prs = Presentation(ppt)
            text_runs = []

            for slide in prs.slides:
                for shape in slide.shapes:
                    if not shape.has_text_frame:
                        continue
                    for paragraph in shape.text_frame.paragraphs:
                        for run in paragraph.runs:
                            text_runs.append(run.text)
            return text_runs

        try:
            text_runs = read_ppt(self._file)
            for text in text_runs:
                for sensitive in self._sensitive_list:
                    if sensitive in text and sensitive not in self._result_list:
                        self._result_list.append(sensitive)
        except Exception as e:
            logger.error(e)
コード例 #8
0
ファイル: hackerone.py プロジェクト: ztencmcp/LSpider
    def html_parse(self):
        result_list = []

        td_list = self.chromeclass.driver.find_elements_by_xpath("//td[@class='daisy-table__cell table__row--align-top break-word']")

        for td in td_list:

            try:
                domain = td.find_element_by_tag_name("strong").text.strip()
                result_list.append(domain.replace("*.", ""))

                if td.find_element_by_tag_name('p'):
                    url_list = td.find_element_by_tag_name('p').text
                    for url in url_list.split("\n"):
                        if url.startswith("/"):
                            u = "http://" + domain + url.strip()

                            # replace {} () <>
                            u = re.sub(r'[({<][^)}>]*[)}>]', '1', u)

                            result_list.append(u.replace("*.", ""))

            except NoSuchElementException:
                logger.warn("[Hackerone spider][parse] Not Found child element.")
                continue
            except:
                logger.warn("[Hackerone spider][parse] url data parse error. {}".format(traceback.format_exc()))
                continue

        return result_list
コード例 #9
0
 def _getSession(self):
     """获取已登录账号的requests.session对象"""
     try:
         session_mongo = singleMongo.sessionMongo
         session_dict = session_mongo.find_one({'_id': USERNAME}, {
             '_id': 0,
             'session_pyobj': 1
         })
     except Exception as e:
         logger.exception(e)
         raise Exception('<进程终止> mongodb ERROR')
     if session_dict is not None:  # redis中存在session
         self.s = pickle.loads(session_dict['session_pyobj'])
         logger.info('从mongodb获取账号{}的requests.session对象:{}'.format(
             USERNAME, self.s))
     else:  # redis中不存在/报错 重新建立session
         logger.info('账号:{}的requests.session对象已过期,重新登录获取'.format(USERNAME))
         l = Login()
         self.s = l.session()
         try:
             l.saveSession(self.s)
         except:
             logger.warn(
                 'mongodb连接异常:无法保存账号{}的requests.session对象到mongodb中'.format(
                     USERNAME, self.s))
コード例 #10
0
def is_task_finished(destination,
                     task_name,
                     expected_status,
                     clear_tasks_after_success=True):
    view = navigate_to(Tasks, destination)
    tab_view = getattr(view.tabs, destination.lower())
    try:
        row = tab_view.table.row(task_name=task_name, state=expected_status)
    except IndexError:
        logger.warn(
            'IndexError exception suppressed when searching for task row, no match found.'
        )
        return False

    # throw exception if error in message
    message = row.message.text.lower()
    if 'error' in message:
        raise Exception("Task {} error: {}".format(task_name, message))
    elif 'timed out' in message:
        raise TimedOutError("Task {} timed out: {}".format(task_name, message))
    elif 'failed' in message:
        raise Exception("Task {} has a failure: {}".format(task_name, message))

    if clear_tasks_after_success:
        # Remove all finished tasks so they wouldn't poison other tests
        delete_all_tasks(destination)

    return True
コード例 #11
0
def test_refresh_providers(request, scenario):
    """Refreshes providers then waits for a specific amount of time. Memory Monitor creates graphs
    and summary at the end of the scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    logger.debug('Scenario: {}'.format(scenario['name']))

    clean_appliance(ssh_client)

    monitor_thread = SmemMemoryMonitor(SSHClient(), 'workload-refresh-providers', scenario['name'],
        'refresh-providers', get_server_roles_workload_refresh_providers(separator=','),
        ', '.join(scenario['providers']))

    def cleanup_workload(scenario, from_ts):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_url = get_scenario_dashboard_url(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_url = g_url
        monitor_thread.signal = False
        monitor_thread.join()
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts))

    monitor_thread.start()

    wait_for_miq_server_ready(poll_interval=2)
    set_server_roles_workload_refresh_providers(ssh_client)
    add_providers(scenario['providers'])
    id_list = get_all_provider_ids()

    # Variable amount of time for refresh workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_refresh = scenario['time_between_refresh']

    while ((time.time() - starttime) < total_time):
        start_refresh_time = time.time()
        refresh_providers(id_list)
        iteration_time = time.time()

        refresh_time = round(iteration_time - start_refresh_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue Refreshes: {}'.format(refresh_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if refresh_time < time_between_refresh:
            wait_diff = time_between_refresh - refresh_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_refresh):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to Queue Refreshes ({}) exceeded time between Refreshes({})'.format(
                refresh_time, time_between_refresh))

    logger.info('Test Ending...')
コード例 #12
0
    def __init__(self, **kwargs):
        for key, val in kwargs.items():
            if key == 'credentials' and not isinstance(
                    val, (Credential, TokenCredential)):
                val = self.credential_class.from_config(val)
            setattr(self, key, val)

        if not hasattr(self, 'credentials'):
            logger.warn("credentials weren't passed "
                        "for endpoint {}".format(self.__class__.__name__))
コード例 #13
0
    def handle(self, *args, **options):

        try:
            LBotCoreBackend()

        except KeyboardInterrupt:
            logger.warn("[Bot] stop bot.")
            exit(0)
        except:
            logger.error("[Bot] something error, {}".format(traceback.format_exc()))
コード例 #14
0
 def on_post(self, req, resp):
     requestObj = req.media
     responseObj = {"responseId": 111, "message": "", "data": {}}
     # validate schema
     afterValidation = self.validateSchema(requestObj)
     if not afterValidation[0]:
         log.warn((thisFilename, inspect.currentframe().f_code.co_name,
                   "schema validation failed"))
         responseObj["responseId"] = 110
         responseObj["message"] = afterValidation[1]
     else:
         log.info((thisFilename, inspect.currentframe().f_code.co_name,
                   "schema validation successful"))
         try:
             # check if user is superuser
             if not dbu.checkIfUserIsSuperuser(
                     req.params["kartoon-fapi-incoming"]["_id"]):
                 log.warn(
                     (thisFilename, inspect.currentframe().f_code.co_name,
                      "user is not superuser"))
                 # if not
                 responseObj["responseId"] = 109
                 responseObj["message"] = "Unauthorized access"
             else:
                 log.info(
                     (thisFilename, inspect.currentframe().f_code.co_name,
                      "user is superuser"))
                 log.info(
                     (thisFilename, inspect.currentframe().f_code.co_name,
                      "preparing data to insert"))
                 # if yes
                 # 01. get index for new metaPulse
                 index = dbc.getNewMetaPulseIndex()
                 # 02. increment metaPulse counter
                 dbc.incrementMetaPulseIndex()
                 # 03. prepare dataToBeInserted
                 dataToBeInserted = self.prepareDataToBeInserted(
                     index, requestObj,
                     req.params["kartoon-fapi-incoming"]["_id"])
                 # 04. insert dataToBeInserted in metaPulses and attach metaPulseId in response
                 log.info(
                     (thisFilename, inspect.currentframe().f_code.co_name,
                      "inserting data"))
                 responseObj["data"]["_id"] = dbpu.insertMetaPulse(
                     dataToBeInserted)
                 # 05. set responseId to success
                 responseObj["responseId"] = 211
         except Exception as ex:
             log.error(
                 (thisFilename, inspect.currentframe().f_code.co_name),
                 exc_info=True)
             responseObj["message"] = str(ex)
     resp.media = responseObj
コード例 #15
0
 def validateAssigneesInAProject(self, linkedProjectId: str, assignees: "list of str") -> [list, list]:
     validAssignees = []
     invalidAssignees = []
     # for all assignees, check if they are member of linkedProjectId
     for assigneeId in assignees:
         try:
             ObjectId(assigneeId)
             if dbpr.hasThisMember(linkedProjectId, assigneeId): validAssignees.append(assigneeId)
             else: invalidAssignees.append(assigneeId)
         except Exception as ex:
             log.warn((thisFilename, inspect.currentframe().f_code.co_name, "invalid object id"))
             invalidAssignees.append(assigneeId)
     return [validAssignees, invalidAssignees]
コード例 #16
0
ファイル: provider.py プロジェクト: dajohnso/cfme_tests
    def __init__(self, **kwargs):
        for key, val in kwargs.items():
            if key == 'credentials' and isinstance(val, str):
                val = self.credential_class.from_config(val)
            elif key == 'credentials' and isinstance(val, Iterable):
                val = self.credential_class.from_plaintext(val)
            elif key == 'credentials' and isinstance(val, (Credential, TokenCredential)):
                pass
            setattr(self, key, val)

        if not hasattr(self, 'credentials'):
            logger.warn("credentials weren't passed "
                        "for endpoint {}".format(self.__class__.__name__))
コード例 #17
0
def un_zip(target_path):
    """
    解压缩目标压缩包
    实现新需求,解压缩后相应的js文件做代码格式化
    :return:
    """

    logger.info("[Pre][Unzip] Upzip file {}...".format(target_path))

    if not os.path.isfile(target_path):
        logger.warn("[Pre][Unzip] Target file {} is't exist...pass".format(
            target_path))
        return False

    zip_file = zipfile.ZipFile(target_path)
    target_file_path = target_path + "_files/"

    if os.path.isdir(target_file_path):
        logger.debug("[Pre][Unzip] Target files {} is exist...continue".format(
            target_file_path))
        return target_file_path
    else:
        os.mkdir(target_file_path)

    for names in zip_file.namelist():
        zip_file.extract(names, target_file_path)

        # 对其中部分文件中为js的时候,将js代码格式化便于阅读
        if names.endswith(".js"):
            file_path = os.path.join(target_file_path, names)
            file = codecs.open(file_path,
                               'r+',
                               encoding='utf-8',
                               errors='ignore')
            file_content = file.read()
            file.close()

            new_file = codecs.open(file_path,
                                   'w+',
                                   encoding='utf-8',
                                   errors='ignore')

            opts = jsbeautifier.default_options()
            opts.indent_size = 2

            new_file.write(jsbeautifier.beautify(file_content, opts))
            new_file.close()

    zip_file.close()

    return target_file_path
コード例 #18
0
 def validateProjectMetaId(self, projectMetaId: "str or None",
                           fields: "list of dict") -> [bool, str]:
     success = True
     message = ""
     # it's wrong if:
     ## or, projectMetaId is None and fields exists
     ## or, projectMetaId is not None and fields does not exists
     if (not projectMetaId and len(fields) > 0) or (projectMetaId
                                                    and len(fields) == 0):
         log.warn((thisFilename, inspect.currentframe().f_code.co_name,
                   "projectMetaId and fields mismatch"))
         success = False
         message = "Invalid projectMetaId"
     else:
         # validate projectMetaId
         if projectMetaId:
             try:
                 ObjectId(projectMetaId)
             except Exception as ex:
                 log.warn(
                     (thisFilename, inspect.currentframe().f_code.co_name,
                      "invalid object id"))
                 success = False
                 message = "Invalid projectMetaId"
             if success:
                 # prepare fieldsDict map
                 fieldsDict = {}
                 for field in fields:
                     fieldsDict[field["key"]] = field["value"]
                 # get dbFields using projectMetaId
                 dbFields = dbpr.getFieldsById(projectMetaId)
                 # validate fields
                 if len(fieldsDict) == len(dbFields):
                     for dbField in dbFields:
                         if dbField["key"] in fieldsDict:
                             if dbField["valueType"] == "select":
                                 if not fieldsDict[dbField[
                                         "key"]] in dbField["value"]:
                                     success = False
                                     message = "Invalid value for " + dbField[
                                         "key"]
                                     break
                         else:
                             success = False
                             message = "Missing " + dbField["key"]
                             break
                 else:
                     success = False
                     message = "Invalid fields count"
     return [success, message]
コード例 #19
0
ファイル: volume.py プロジェクト: dajohnso/cfme_tests
 def step(self, *args, **kwargs):
     self.prerequisite_view.toolbar.view_selector.select('List View')
     try:
         row = self.prerequisite_view.paginator.find_row_on_pages(
             self.prerequisite_view.entities.table,
             name=self.obj.name, cloud_provider=self.obj.provider.name)
     except NoSuchElementException:
         logger.warn('Cannot identify volume by name and provider, looking by name only')
         try:
             row = self.prerequisite_view.paginator.find_row_on_pages(
                 self.prerequisite_view.entities.table, name=self.obj.name)
         except NoSuchElementException:
             raise VolumeNotFound
     row.click()
コード例 #20
0
def update_and_new_project_vendor(project_id,
                                  name,
                                  version,
                                  language,
                                  source=None,
                                  ext=None):
    hash = md5("{},{},{},{}".format(project_id, name, language, source))
    vendor = ProjectVendors.objects.filter(project_id=project_id,
                                           name=name,
                                           language=language).first()

    if vendor:
        # 兼容性处理,如果source未指定,先更新source进去
        if not vendor.source:
            vendor.version = version
            vendor.source = source
            vendor.ext = ext
            vendor.version = version
            vendor.hash = hash

            try:
                vendor.save()
            except IntegrityError:
                logger.warn("[Model Save] vendor model not changed")

    else:
        vendor = ProjectVendors.objects.filter(project_id=project_id,
                                               hash=hash).first()

    if vendor:
        if vendor.version != version and version != 'unknown':
            logger.debug("[Vendors] Component {} update to version {}".format(
                name, version))

            vendor.version = version
            try:
                vendor.save()
            except IntegrityError:
                logger.warn("[Model Save] vendor model not changed")

    else:
        v = ProjectVendors(project_id=project_id,
                           name=name,
                           version=version,
                           language=language,
                           ext=ext)
        v.save()

    return True
コード例 #21
0
def get_default_dashboard_url(from_ts, to_ts, output_to_log=True):
    """Builds the string URL for a Grafana Dashboard if enabled."""
    if cfme_performance['tools']['grafana']['enabled']:
        g_ip = cfme_performance['tools']['grafana']['ip_address']
        g_port = cfme_performance['tools']['grafana']['port']
        appliance_name = cfme_performance['appliance']['appliance_name']
        dashboard_name = cfme_performance['tools']['grafana']['default_dashboard']
        grafana_url = 'http://{}:{}/dashboard/db/{}?from={}&to={}&var-Node={}'.format(g_ip, g_port,
            dashboard_name, from_ts, to_ts, appliance_name)
        if output_to_log:
            logger.info('Grafana URL: {}'.format(grafana_url))
        return grafana_url
    else:
        logger.warn('Grafana integration is not enabled')
        return ''
コード例 #22
0
ファイル: mongo_pool.py プロジェクト: xiaoweihong/proxy_pool
 def insert_one(self, proxy):
     """
     插入代理数据
     使用proxy.ip作为主键_id插入
     :param proxy:
     :return:
     """
     count = self.proxies.count({"_id": proxy.ip})
     if count == 0:
         dic = proxy.__dict__
         dic['_id'] = proxy.ip
         self.proxies.insert_one(dic)
         logger.info("{} 插入成功".format(proxy.ip))
     else:
         logger.warn("{} 已经存在".format(proxy.ip))
コード例 #23
0
def count_events(target, vm):
    timelines_view = navigate_to(target, 'Timelines')
    timelines_view.filter.time_position.select_by_visible_text('centered')
    timelines_view.filter.apply.click()
    found_events = []
    for evt in timelines_view.chart.get_events():
        if not hasattr(evt, 'source_vm'):
            # BZ(1428797)
            logger.warn("event {evt} doesn't have source_vm field. Probably issue".format(evt=evt))
            continue
        elif evt.source_vm == vm.name:
            found_events.append(evt)

    logger.info("found events: {evt}".format(evt="\n".join([repr(e) for e in found_events])))
    return len(found_events)
コード例 #24
0
 def _saveParams(self, type_name: str, item: dict):
     """保存参数:redis and file"""
     if item != {}:
         update_item = deepcopy(item)
         update_item['update_time'] = datetime.utcnow()  # 更新时间(utc)
         try:
             self.param_mongo.update_one(item, {'$set': update_item},
                                         upsert=True)
         except Exception as e:
             logger.exception(e)
             logger.warn('没有写入的数据: {}'.format(item))
             logger.error('<进程终止>: mongodb ERROR')
             raise Exception('<进程终止>: mongodb ERROR')
     else:
         logger.info('<无数据>:{}'.format(type_name))
コード例 #25
0
 def validateProjectId(self, projectId: str) -> [bool, str]:
     success = True
     message = ""
     try:
         ObjectId(projectId)
     except Exception as ex:
         log.warn((thisFilename, inspect.currentframe().f_code.co_name, "invalid object id"))
         success = False
         message = "Invalid projectId"
     if success:
         if dbpr.countDocumentsById(projectId) != 1:
             log.warn((thisFilename, inspect.currentframe().f_code.co_name, "does not have projectId"))
             success = False
             message = "Invalid projectId"
     return [success, message]
コード例 #26
0
 def validatePulseId(self, linkedMilestoneId: str, pulseId: str) -> [bool, str]:
     success = True
     message = ""
     try:
         ObjectId(pulseId)
     except Exception as ex:
         log.warn((thisFilename, inspect.currentframe().f_code.co_name, "invalid object id"))
         success = False
         message = "Invalid pulseId"
     if success:
         # check if pulseId exists and has linkedMilestoneId
         if dbpu.countDocumentsById(pulseId) != 1 or not dbpu.hasThisLinkedMilestoneId(linkedMilestoneId, pulseId):
             log.warn((thisFilename, inspect.currentframe().f_code.co_name, "pulseId does not exist or does not jave linkedMilestoneId"))
             success = False
             message = "Invalid pulseId"
     return [success, message]
コード例 #27
0
def get_default_dashboard_url(from_ts, to_ts, output_to_log=True):
    """Builds the string URL for a Grafana Dashboard if enabled."""
    if cfme_performance['tools']['grafana']['enabled']:
        g_ip = cfme_performance['tools']['grafana']['ip_address']
        g_port = cfme_performance['tools']['grafana']['port']
        appliance_name = cfme_performance['appliance']['appliance_name']
        dashboard_name = cfme_performance['tools']['grafana'][
            'default_dashboard']
        grafana_url = 'http://{}:{}/dashboard/db/{}?from={}&to={}&var-Node={}'.format(
            g_ip, g_port, dashboard_name, from_ts, to_ts, appliance_name)
        if output_to_log:
            logger.info('Grafana URL: {}'.format(grafana_url))
        return grafana_url
    else:
        logger.warn('Grafana integration is not enabled')
        return ''
コード例 #28
0
 def validateLocationId(self, locationId: str) -> [bool, str]:
     success = True
     message = ""
     try:
         ObjectId(locationId)
     except Exception as ex:
         log.warn((thisFilename, inspect.currentframe().f_code.co_name, "invalid object id"))
         success = False
         message = "Invalid locationId"
     if success:
         # check if locationId exists
         if dbl.countDocumentsById(locationId) != 1:
             log.warn((thisFilename, inspect.currentframe().f_code.co_name, "locationId does not exists"))
             success = False
             message = "Invalid locationId"
     return [success, message]
コード例 #29
0
ファイル: test_timelines.py プロジェクト: dajohnso/cfme_tests
def count_events(target, vm):
    timelines_view = navigate_to(target, 'Timelines')
    if isinstance(target, Server):
        timelines_view = timelines_view.timelines
    timelines_view.filter.time_position.select_by_visible_text('centered')
    timelines_view.filter.apply.click()
    found_events = []
    for evt in timelines_view.chart.get_events():
        if not hasattr(evt, 'source_vm'):
            # BZ(1428797)
            logger.warn("event {evt} doesn't have source_vm field. Probably issue".format(evt=evt))
            continue
        elif evt.source_vm == vm.name:
            found_events.append(evt)

    logger.info("found events: {evt}".format(evt="\n".join([repr(e) for e in found_events])))
    return len(found_events)
コード例 #30
0
    def load_model(self, checkpoint):
        """Load model from checkpoint.

        Args:
            checkpoint: str, checkpoint path.

        Returns:
            None.
        """
        logger.info(f'Loading model from {checkpoint}.')
        epoch = int(os.path.basename(checkpoint)[6:-3])
        self.current_epoch = epoch + 1
        self.model = torch.load(checkpoint, map_location=self.device)
        if self.current_epoch > self.config.epoch_num:
            logger.warn(
                f'The current epoch {self.current_epoch} > total epoch number {self.config.epoch_num}.'
            )
コード例 #31
0
ファイル: hackerone.py プロジェクト: ztencmcp/LSpider
    def spider(self, appname):
        url = self.url + appname

        # login
        self.login()
        time.sleep(5)

        code, content, title = self.chromeclass.get_resp(url, isclick=False)
        time.sleep(5)

        if "Page not found" in content:
            logger.warn("[Hackerone spider] Not Found App {}".format(appname))
            return []

        result = self.html_parse()

        return result
コード例 #32
0
 def on_post(self, req, resp):
     requestObj = req.media
     responseObj = {"responseId": 111, "message": "", "data": {}}
     # validate schema
     afterValidation = self.validateSchema(requestObj)
     if not afterValidation[0]:
         log.warn((thisFilename, inspect.currentframe().f_code.co_name,
                   "schema validation failed"))
         responseObj["responseId"] = 110
         responseObj["message"] = afterValidation[1]
     else:
         log.info((thisFilename, inspect.currentframe().f_code.co_name,
                   "schema validation successful"))
         try:
             # check if user exists
             userCount = dbu.countDocumentsByUsername(
                 requestObj["username"])
             if userCount == 1:
                 log.info(
                     (thisFilename, inspect.currentframe().f_code.co_name,
                      "user exists, update to superuser"))
                 # if yes, updateUserToSuperuser
                 dbu.updateUserToSuperuser(requestObj["username"])
             elif userCount == 0:
                 log.info(
                     (thisFilename, inspect.currentframe().f_code.co_name,
                      "user does not exist, add as superuser"))
                 # if no
                 # 01. get index for new user
                 index = dbc.getNewUserIndex()
                 # 02. increment user counter
                 dbc.incrementUserIndex()
                 # 03. prepare dataToBeInserted
                 dataToBeInserted = self.prepareDataToBeInserted(
                     index, requestObj)
                 # 04. insert dataToBeInserted in users and attach userId in response
                 responseObj["data"]["_id"] = dbu.insertUser(
                     dataToBeInserted)
             # 05. set responseId to success
             responseObj["responseId"] = 211
         except Exception as ex:
             log.error(
                 (thisFilename, inspect.currentframe().f_code.co_name),
                 exc_info=True)
             responseObj["message"] = str(ex)
     resp.media = responseObj
コード例 #33
0
 def step(self, *args, **kwargs):
     self.prerequisite_view.toolbar.view_selector.select('List View')
     try:
         row = self.prerequisite_view.paginator.find_row_on_pages(
             self.prerequisite_view.entities.table,
             name=self.obj.name,
             cloud_provider=self.obj.provider.name)
     except NoSuchElementException:
         logger.warn(
             'Cannot identify volume by name and provider, looking by name only'
         )
         try:
             row = self.prerequisite_view.paginator.find_row_on_pages(
                 self.prerequisite_view.entities.table, name=self.obj.name)
         except NoSuchElementException:
             raise VolumeNotFound
     row.click()
コード例 #34
0
def check_update_or_new_scanresult(scan_task_id, cvi_id, language,
                                   vulfile_path, source_code, result_type,
                                   is_unconfirm, is_active):
    # 优化基础扫描结果
    if str(cvi_id).startswith('5'):
        vulfile_path = vulfile_path.split(':')[0]

    # 如果漏洞hash存在,那么更新信息,如果hash不存在,那么新建漏洞
    scan_project_id = get_and_check_scantask_project_id(scan_task_id)
    vul_hash = md5("{},{},{},{},{}".format(scan_project_id, cvi_id, language,
                                           vulfile_path, source_code))

    sr = ScanResultTask.objects.filter(vul_hash=vul_hash).first()
    if sr:
        logger.debug("[Database] Scan Result id {} exist. update.".format(
            sr.id))

        sr.scan_task_id = scan_task_id
        sr.cvi_id = cvi_id
        sr.language = language
        sr.vulfile_path = vulfile_path
        sr.source_code = source_code
        sr.result_type = result_type
        sr.is_unconfirm = is_unconfirm

        try:
            sr.save()
        except IntegrityError:
            logger.warn("[Model Save] Model param not changed")

        return False

    else:
        sr = ScanResultTask(scan_project_id=scan_project_id,
                            scan_task_id=scan_task_id,
                            cvi_id=cvi_id,
                            language=language,
                            vulfile_path=vulfile_path,
                            source_code=source_code,
                            result_type=result_type,
                            is_unconfirm=is_unconfirm,
                            is_active=is_active)
        sr.save()

    return sr
コード例 #35
0
def is_task_finished(destination, task_name, expected_status, clear_tasks_after_success=True):
    view = navigate_to(Tasks, destination)
    tab_view = getattr(view.tabs, destination.lower())
    try:
        row = tab_view.table.row(task_name=task_name, state=expected_status)
    except IndexError:
        logger.warn('IndexError exception suppressed when searching for task row, no match found.')
        return False

    # throw exception if error in message
    if 'error' in row.message.text.lower():
        raise Exception("Task {} error".format(task_name))

    if clear_tasks_after_success:
        # Remove all finished tasks so they wouldn't poison other tests
        view.delete.item_select('Delete All', handle_alert=True)

    return True
コード例 #36
0
ファイル: grafana.py プロジェクト: akrzos/cfme-performance
def get_scenario_dashboard_urls(scenario, from_ts, to_ts, output_to_log=True):
    """Builds a dictionary of URLs to Grafana Dashboards of relevant appliances for a single
    workload's scenario.  It accounts for when a replication_master appliance is under test too."""
    if cfme_performance['tools']['grafana']['enabled']:
        g_ip = cfme_performance['tools']['grafana']['ip_address']
        g_port = cfme_performance['tools']['grafana']['port']
        appliance_name = cfme_performance['appliance']['appliance_name']
        dashboard_name = cfme_performance['tools']['grafana']['default_dashboard']
        grafana_urls = {}
        if 'grafana_dashboard' in scenario:
            dashboard_name = scenario['grafana_dashboard']
        grafana_urls['appliance'] = 'http://{}:{}/dashboard/db/{}?from={}&to={}&var-Node={}'.format(g_ip,
            g_port, dashboard_name, from_ts, to_ts, appliance_name)
        if 'replication_master' in scenario:
            grafana_urls['replication_master'] = \
                'http://{}:{}/dashboard/db/{}?from={}&to={}&var-Node={}'.format(g_ip, g_port,
                dashboard_name, from_ts, to_ts, scenario['replication_master']['appliance_name'])
        if output_to_log:
            logger.info('Grafana URLs: {}'.format(grafana_urls))
        return grafana_urls
    else:
        logger.warn('Grafana integration is not enabled')
        return ''
コード例 #37
0
ファイル: tasks.py プロジェクト: dajohnso/cfme_tests
def is_task_finished(destination, task_name, expected_status, clear_tasks_after_success=True):
    view = navigate_to(Tasks, destination)
    tab_view = getattr(view.tabs, destination.lower())
    try:
        row = tab_view.table.row(task_name=task_name, state=expected_status)
    except IndexError:
        logger.warn('IndexError exception suppressed when searching for task row, no match found.')
        return False

    # throw exception if error in message
    message = row.message.text.lower()
    if 'error' in message:
        raise Exception("Task {} error: {}".format(task_name, message))
    elif 'timed out' in message:
        raise TimedOutError("Task {} timed out: {}".format(task_name, message))
    elif 'failed' in message:
        raise Exception("Task {} has a failure: {}".format(task_name, message))

    if clear_tasks_after_success:
        # Remove all finished tasks so they wouldn't poison other tests
        delete_all_tasks(destination)

    return True
コード例 #38
0
 def _finalize():
     try:
         vm_obj.delete_from_provider()
     except Exception:
         logger.warn('Failed deleting VM from provider: %s', vm_name)
コード例 #39
0
def test_refresh_vms(request, scenario):
    """Refreshes all vm's then waits for a specific amount of time. Memory Monitor creates
    graphs and summary at the end of the scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    logger.debug('Scenario: {}'.format(scenario['name']))

    clean_appliance(ssh_client)

    quantifiers = {}
    scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'],
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-refresh-vm',
        'test_name': 'Refresh VMs',
        'appliance_roles': get_server_roles_workload_refresh_vms(separator=', '),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_url = get_scenario_dashboard_url(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_url = g_url
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    wait_for_miq_server_workers_started(poll_interval=2)
    set_server_roles_workload_refresh_vms(ssh_client)
    add_providers(scenario['providers'])
    logger.info('Sleeping for refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    full_refresh_threshold_set = False
    if 'full_refresh_threshold' in scenario:
        if scenario['full_refresh_threshold'] != FULL_REFRESH_THRESHOLD_DEFAULT:
            set_full_refresh_threshold(ssh_client, scenario['full_refresh_threshold'])
            full_refresh_threshold_set = True
    if not full_refresh_threshold_set:
        logger.debug('Keeping full_refresh_threshold at default ({}).'.format(
            FULL_REFRESH_THRESHOLD_DEFAULT))

    refresh_size = scenario['refresh_size']
    vm_ids = get_all_vm_ids()
    vm_ids_iter = cycle(vm_ids)
    logger.debug('Number of VM IDs: {}'.format(len(vm_ids)))

    # Variable amount of time for refresh workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_refresh = scenario['time_between_refresh']
    total_refreshed_vms = 0

    while ((time.time() - starttime) < total_time):
        start_refresh_time = time.time()
        refresh_list = [next(vm_ids_iter) for x in range(refresh_size)]
        refresh_provider_vms_bulk(refresh_list)
        total_refreshed_vms += len(refresh_list)
        iteration_time = time.time()

        refresh_time = round(iteration_time - start_refresh_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue VM Refreshes: {}'.format(refresh_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if refresh_time < time_between_refresh:
            wait_diff = time_between_refresh - refresh_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_refresh):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to Queue VM Refreshes ({}) exceeded time between '
                '({})'.format(refresh_time, time_between_refresh))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Refreshes'] = total_refreshed_vms
    logger.info('Test Ending...')
コード例 #40
0
ファイル: provider.py プロジェクト: dajohnso/cfme_tests
 def ip_address(self, value):
     if self.default_endpoint:
         if value:
             self.default_endpoint.ipaddress = value
     else:
         logger.warn("can't set ipaddress because default endpoint is absent")
コード例 #41
0
def test_refresh_providers(request, scenario):
    """Refreshes providers then waits for a specific amount of time. Memory Monitor creates graphs
    and summary at the end of the scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    logger.debug('Scenario: {}'.format(scenario['name']))

    clean_appliance(ssh_client)

    quantifiers = {}
    scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'],
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-refresh-providers',
        'test_name': 'Refresh Providers',
        'appliance_roles': get_server_roles_workload_refresh_providers(separator=', '),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    wait_for_miq_server_workers_started(poll_interval=2)
    set_server_roles_workload_refresh_providers(ssh_client)
    add_providers(scenario['providers'])
    id_list = get_all_provider_ids()

    # Variable amount of time for refresh workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_refresh = scenario['time_between_refresh']
    total_refreshed_providers = 0

    while ((time.time() - starttime) < total_time):
        start_refresh_time = time.time()
        refresh_providers_bulk(id_list)
        total_refreshed_providers += len(id_list)
        iteration_time = time.time()

        refresh_time = round(iteration_time - start_refresh_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue Refreshes: {}'.format(refresh_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if refresh_time < time_between_refresh:
            wait_diff = time_between_refresh - refresh_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_refresh):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to Queue Refreshes ({}) exceeded time between '
                '({})'.format(refresh_time, time_between_refresh))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_Provider_Refreshes'] = total_refreshed_providers
    logger.info('Test Ending...')
コード例 #42
0
def test_workload_smartstate_analysis(request, scenario):
    """Runs through provider based scenarios initiating smart state analysis against VMs, Hosts,
    and Datastores"""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    logger.debug('Scenario: {}'.format(scenario['name']))
    install_vddk(ssh_client)

    clean_appliance(ssh_client)

    quantifiers = {}
    scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'],
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-ssa',
        'test_name': 'SmartState Analysis',
        'appliance_roles': get_server_roles_workload_smartstate(separator=', '),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_url = get_scenario_dashboard_url(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_url = g_url
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    wait_for_miq_server_workers_started(poll_interval=2)
    set_server_roles_workload_smartstate(ssh_client)
    add_providers(scenario['providers'])
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    # Add host credentials and set CFME relationship for RHEVM SSA
    for provider in scenario['providers']:
        add_host_credentials(cfme_performance['providers'][provider], ssh_client)
        if (cfme_performance['providers'][provider]['type'] ==
                "ManageIQ::Providers::Redhat::InfraManager"):
            set_cfme_server_relationship(ssh_client,
                cfme_performance['appliance']['appliance_name'])

    # Get list of VM ids by mapping provider name + vm name to the vm id
    vm_ids_to_scan = map_vms_to_ids(scenario['vms_to_scan'])

    # Variable amount of time for SmartState Analysis workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_analyses = scenario['time_between_analyses']
    total_scanned_VMs = 0

    while ((time.time() - starttime) < total_time):
        start_ssa_time = time.time()
        scan_provider_vms_bulk(vm_ids_to_scan)
        total_scanned_VMs += len(vm_ids_to_scan)
        iteration_time = time.time()

        ssa_time = round(iteration_time - start_ssa_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue SmartState Analyses: {}'.format(ssa_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if ssa_time < time_between_analyses:
            wait_diff = time_between_analyses - ssa_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_analyses):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to Queue SmartState Analyses ({}) exceeded time between '
                '({})'.format(ssa_time, time_between_analyses))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Scans'] = total_scanned_VMs
    logger.info('Test Ending...')
コード例 #43
0
ファイル: provider.py プロジェクト: dajohnso/cfme_tests
 def hostname(self, value):
     if self.default_endpoint:
         if value:
             self.default_endpoint.hostname = value
     else:
         logger.warn("can't set hostname because default endpoint is absent")
コード例 #44
0
def test_workload_smartstate_analysis(appliance, request, scenario):
    """Runs through provider based scenarios initiating smart state analysis against VMs, Hosts,
    and Datastores"""
    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))
    appliance.install_vddk()

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-ssa',
        'test_name': 'SmartState Analysis',
        'appliance_roles': ', '.join(roles_smartstate),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_smartstate})
    for provider in scenario['providers']:
        get_crud(provider).create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    # Add host credentials and set CFME relationship for RHEVM SSA
    for provider in scenario['providers']:
        for api_host in appliance.rest_api.collections.hosts.all:
            test_host = host.Host(name=api_host.name, provider=provider)
            host_data = get_host_data_by_name(get_crud(provider), api_host.name)
            credentials = host.get_credentials_from_config(host_data['credentials'])
            test_host.update_credentials_rest(credentials)
        appliance.set_cfme_server_relationship(cfme_performance['appliance']['appliance_name'])

    # Variable amount of time for SmartState Analysis workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_analyses = scenario['time_between_analyses']
    total_scanned_vms = 0

    while ((time.time() - starttime) < total_time):
        start_ssa_time = time.time()
        for vm in scenario['vms_to_scan'].values()[0]:
            vm_api = appliance.rest_api.collections.vms.get(name=vm)
            vm_api.action.scan()
            total_scanned_vms += 1
        iteration_time = time.time()

        ssa_time = round(iteration_time - start_ssa_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue SmartState Analyses: {}'.format(ssa_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if ssa_time < time_between_analyses:
            wait_diff = time_between_analyses - ssa_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_analyses):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to Queue SmartState Analyses ({}) exceeded time between '
                '({})'.format(ssa_time, time_between_analyses))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Scans'] = total_scanned_vms
    logger.info('Test Ending...')
コード例 #45
0
ファイル: testgen.py プロジェクト: weissjeffm/cfme_tests
def provider_by_type(metafunc, provider_types, *fields):
    """Get the values of the named field keys from ``cfme_data['management_systems']``

    Args:
        provider_types: A list of provider types to include. If None, all providers are considered
        *fields: Names of keys in an individual provider dict whose values will be returned when
            used as test function arguments

    The following test function arguments are special:

        ``provider_data``
            the entire provider data dict from cfme_data.

        ``provider_key``
            the provider's key in ``cfme_data['management_systems']``

        ``provider_crud``
            the provider's CRUD object, either a :py:class:`cfme.cloud.provider.Provider`
            or a :py:class:`cfme.infrastructure.provider.Provider`

        ``provider_mgmt``
            the provider's backend manager, from :py:class:`utils.mgmt_system`

    Returns:
        An tuple of ``(argnames, argvalues, idlist)`` for use in a pytest_generate_tests hook, or
        with the :py:func:`parametrize` helper.

    Usage:

        # In the function itself
        def pytest_generate_tests(metafunc):
            argnames, argvalues, idlist = testgen.provider_by_type(
                ['openstack', 'ec2'],
                'type', 'name', 'credentials', 'provider_data', 'hosts'
            )
        metafunc.parametrize(argnames, argvalues, ids=idlist, scope='module')

        # Using the parametrize wrapper
        pytest_generate_tests = testgen.parametrize(testgen.provider_by_type, ['openstack', 'ec2'],
            'type', 'name', 'credentials', 'provider_data', 'hosts', scope='module')

    Note:

        Using the default 'function' scope, each test will be run individually for each provider
        before moving on to the next test. To group all tests related to single provider together,
        parametrize tests in the 'module' scope.

    """
    argnames = list(fields)
    argvalues = []
    idlist = []

    special_args = ('provider_key', 'provider_data', 'provider_crud',
        'provider_mgmt', 'provider_type')
    # Hook on special attrs if requested
    for argname in special_args:
        if argname in metafunc.fixturenames and argname not in argnames:
            argnames.append(argname)

    for provider, data in cfme_data['management_systems'].iteritems():
        prov_type = data['type']
        if provider_types is not None and prov_type not in provider_types:
            # Skip unwanted types
            continue

        # Use the provider name for idlist, helps with readable parametrized test output
        idlist.append(provider)

        # Get values for the requested fields, filling in with None for undefined fields
        values = [data.get(field, '') for field in fields]

        # Go through the values and handle the special 'data' name
        # report the undefined fields to the log
        for i, (field, value) in enumerate(zip(fields, values)):
            if value is None:
                logger.warn('Field "%s" not defined for provider "%s", defaulting to None' %
                    (field, provider)
                )

        if prov_type in cloud_provider_type_map:
            crud = get_cloud_provider(provider)
        elif prov_type in infra_provider_type_map:
            crud = get_infra_provider(provider)
        # else: wat? You deserve the NameError you're about to receive

        mgmt = provider_factory(provider)

        special_args_map = dict(zip(special_args, (provider, data, crud, mgmt, prov_type)))
        for arg in special_args:
            if arg in argnames:
                values.append(special_args_map[arg])
        argvalues.append(values)

    return argnames, argvalues, idlist
コード例 #46
0
def test_provisioning(appliance, request, scenario):
    """Runs through provisioning scenarios using the REST API to
    continously provision a VM for a specified period of time.
    Memory Monitor creates graphs and summary at the end of each scenario."""

    from_ts = int(time.time() * 1000)
    logger.debug('Scenario: {}'.format(scenario['name']))

    appliance.clean_appliance()

    quantifiers = {}
    scenario_data = {'appliance_ip': appliance.hostname,
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-provisioning',
        'test_name': 'Provisioning',
        'appliance_roles': ', '.join(roles_provisioning),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data)

    provision_order = []

    def cleanup_workload(scenario, from_ts, vms_to_cleanup, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        appliance.update_server_roles({role: True for role in roles_provisioning_cleanup})
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        final_vm_size = len(vms_to_cleanup)
        appliance.rest_api.collections.vms.action.delete(vms_to_cleanup)
        monitor_thread.join()
        logger.info('{} VMs were left over, and {} VMs were deleted in the finalizer.'
            .format(final_vm_size, final_vm_size - len(vms_to_cleanup)))
        logger.info('The following VMs were left over after the test: {}'
            .format(vms_to_cleanup))
        quantifiers['VMs_To_Delete_In_Finalizer'] = final_vm_size
        quantifiers['VMs_Deleted_In_Finalizer'] = final_vm_size - len(vms_to_cleanup)
        quantifiers['Leftover_VMs'] = vms_to_cleanup
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, vm_name, quantifiers,
            scenario_data))

    monitor_thread.start()

    appliance.wait_for_miq_server_workers_started(poll_interval=2)
    appliance.update_server_roles({role: True for role in roles_provisioning})
    prov = get_crud(scenario['providers'][0])
    prov.create_rest()
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    guid_list = prov.get_template_guids(scenario['templates'])
    guid_cycle = cycle(guid_list)
    cleanup_size = scenario['cleanup_size']
    number_of_vms = scenario['number_of_vms']
    total_time = scenario['total_time']
    time_between_provision = scenario['time_between_provision']
    total_provisioned_vms = 0
    total_deleted_vms = 0
    provisioned_vms = 0
    starttime = time.time()

    while ((time.time() - starttime) < total_time):
        start_iteration_time = time.time()
        provision_list = []
        for i in range(number_of_vms):
            total_provisioned_vms += 1
            provisioned_vms += 1
            vm_to_provision = '{}-provision-{}'.format(
                test_ts, str(total_provisioned_vms).zfill(4))
            guid_to_provision, provider_name = next(guid_cycle)
            provision_order.append((vm_to_provision, provider_name))
            provision_list.append((vm_to_provision, guid_to_provision,
                prov.data['provisioning']['vlan']))

        template = prov.data.get('small_template')
        provision_data = get_provision_data(appliance.rest_api, prov, template)
        vm_name = provision_data["vm_fields"]["vm_name"]
        response = appliance.rest_api.collections.provision_requests.action.create(**provision_data)
        assert appliance.rest_api.response.status_code == 200
        provision_request = response[0]

        def _finished():
            provision_request.reload()
            if "error" in provision_request.status.lower():
                pytest.fail("Error when provisioning: `{}`".format(provision_request.message))
            return provision_request.request_state.lower() in ("finished", "provisioned")

        wait_for(_finished, num_sec=800, delay=5, message="REST provisioning finishes")

        vm = appliance.rest_api.collections.vms.get(name=vm_name)
        creation_time = time.time()
        provision_time = round(creation_time - start_iteration_time, 2)
        logger.debug('Time to initiate provisioning: {}'.format(provision_time))
        logger.info('{} VMs provisioned so far'.format(total_provisioned_vms))

        if provisioned_vms > cleanup_size * len(scenario['providers']):
            start_remove_time = time.time()
            if appliance.rest_api.collections.vms.action.delete(vm):
                provision_order.pop(0)
                provisioned_vms -= 1
                total_deleted_vms += 1
            deletion_time = round(time.time() - start_remove_time, 2)
            logger.debug('Time to initate deleting: {}'.format(deletion_time))
            logger.info('{} VMs deleted so far'.format(total_deleted_vms))

        end_iteration_time = time.time()
        iteration_time = round(end_iteration_time - start_iteration_time, 2)
        elapsed_time = end_iteration_time - starttime
        logger.debug('Time to initiate provisioning and deletion: {}'.format(iteration_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if iteration_time < time_between_provision:
            wait_diff = time_between_provision - iteration_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_provision):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
            else:
                logger.warn('Time to initiate provisioning ({}) exceeded time between '
                    '({})'.format(iteration_time, time_between_provision))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Provisionings'] = total_provisioned_vms
    quantifiers['Deleted_VMs'] = total_deleted_vms
    logger.info('Provisioned {} VMs and deleted {} VMs during the scenario.'
        .format(total_provisioned_vms, total_deleted_vms))
    logger.info('Test Ending...')
コード例 #47
0
def test_workload_smartstate_analysis(request, scenario):
    """Runs through provider based scenarios initiating smart state analysis against VMs, Hosts,
    and Datastores"""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    logger.debug('Scenario: {}'.format(scenario['name']))
    install_vddk(ssh_client)

    clean_appliance(ssh_client)

    monitor_thread = SmemMemoryMonitor(SSHClient(), 'workload-ssa', scenario['name'],
        'SmartState Analysis', get_server_roles_workload_smartstate(separator=', '),
        ', '.join(scenario['providers']))

    def cleanup_workload(scenario, from_ts):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_url = get_scenario_dashboard_url(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_url = g_url
        monitor_thread.signal = False
        monitor_thread.join()
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts))

    monitor_thread.start()

    wait_for_miq_server_ready(poll_interval=2)
    set_server_roles_workload_smartstate(ssh_client)
    add_providers(scenario['providers'])
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    for provider in scenario['providers']:
        add_host_credentials(cfme_performance['providers'][provider])
        if (cfme_performance['providers'][provider]['type'] ==
                "ManageIQ::Providers::Redhat::InfraManager"):
            set_cfme_server_relationship(ssh_client,
                cfme_performance['appliance']['appliance_name'])

    # Variable amount of time for SmartState Analysis workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_analyses = scenario['time_between_analyses']

    vm_ids_to_scan = []
    for vm_name in scenario['vms_to_scan']:
        vm_ids_to_scan.append(get_vm_id(vm_name))

    while ((time.time() - starttime) < total_time):
        start_ssa_time = time.time()

        for vm_id in vm_ids_to_scan:
            scan_provider_vm(vm_id)

        iteration_time = time.time()
        ssa_time = round(iteration_time - start_ssa_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to initiate SmartState Analyses: {}'.format(ssa_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if ssa_time < time_between_analyses:
            wait_diff = time_between_analyses - ssa_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_analyses):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to initiate SmartState Analyses ({}) exceeded time between '
                '({})'.format(ssa_time, time_between_analyses))

    logger.info('Test Ending...')
コード例 #48
0
def test_provisioning(request, scenario):
    """Runs through provisioning scenarios using the REST API to
    continously provision a VM for a specified period of time.
    Memory Monitor creates graphs and summary at the end of each scenario."""

    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    logger.debug('Scenario: {}'.format(scenario['name']))

    clean_appliance(ssh_client)

    quantifiers = {}
    scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'],
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-provisioning',
        'test_name': 'Provisioning',
        'appliance_roles': get_server_roles_workload_provisioning(separator=', '),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data)

    provision_order = []

    def cleanup_workload(scenario, from_ts, vms_to_cleanup, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        set_server_roles_workload_provisioning_cleanup(ssh_client)
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        final_vm_size = len(vms_to_cleanup)
        delete_provisioned_vms(vms_to_cleanup)
        monitor_thread.join()
        logger.info('{} VMs were left over, and {} VMs were deleted in the finalizer.'
            .format(final_vm_size, final_vm_size - len(vms_to_cleanup)))
        logger.info('The following VMs were left over after the test: {}'
            .format(vms_to_cleanup))
        quantifiers['VMs_To_Delete_In_Finalizer'] = final_vm_size
        quantifiers['VMs_Deleted_In_Finalizer'] = final_vm_size - len(vms_to_cleanup)
        quantifiers['Leftover_VMs'] = vms_to_cleanup
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, provision_order, quantifiers,
            scenario_data))

    monitor_thread.start()

    wait_for_miq_server_workers_started(poll_interval=2)
    set_server_roles_workload_provisioning(ssh_client)
    add_providers(scenario['providers'])
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    guid_list = get_template_guids(scenario['templates'])
    guid_cycle = cycle(guid_list)
    cleanup_size = scenario['cleanup_size']
    number_of_vms = scenario['number_of_vms']
    total_time = scenario['total_time']
    time_between_provision = scenario['time_between_provision']
    total_provisioned_vms = 0
    total_deleted_vms = 0
    provisioned_vms = 0
    starttime = time.time()

    while ((time.time() - starttime) < total_time):
        start_iteration_time = time.time()
        provision_list = []
        for i in range(number_of_vms):
            total_provisioned_vms += 1
            provisioned_vms += 1
            vm_to_provision = '{}-provision-{}'.format(
                test_ts, str(total_provisioned_vms).zfill(4))
            guid_to_provision, provider_name = next(guid_cycle)
            provider_to_provision = cfme_performance['providers'][provider_name]
            provision_order.append((vm_to_provision, provider_name))
            provision_list.append((vm_to_provision, guid_to_provision,
                provider_to_provision['vlan_network']))

        provision_vm(provision_list)
        creation_time = time.time()
        provision_time = round(creation_time - start_iteration_time, 2)
        logger.debug('Time to initiate provisioning: {}'.format(provision_time))
        logger.info('{} VMs provisioned so far'.format(total_provisioned_vms))

        if provisioned_vms > cleanup_size * len(scenario['providers']):
            start_remove_time = time.time()
            if delete_provisioned_vm(provision_order[0]):
                provision_order.pop(0)
                provisioned_vms -= 1
                total_deleted_vms += 1
            deletion_time = round(time.time() - start_remove_time, 2)
            logger.debug('Time to initate deleting: {}'.format(deletion_time))
            logger.info('{} VMs deleted so far'.format(total_deleted_vms))

        end_iteration_time = time.time()
        iteration_time = round(end_iteration_time - start_iteration_time, 2)
        elapsed_time = end_iteration_time - starttime
        logger.debug('Time to initiate provisioning and deletion: {}'.format(iteration_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if iteration_time < time_between_provision:
            wait_diff = time_between_provision - iteration_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_provision):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
            else:
                logger.warn('Time to initiate provisioning ({}) exceeded time between '
                    '({})'.format(iteration_time, time_between_provision))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Provisionings'] = total_provisioned_vms
    quantifiers['Deleted_VMs'] = total_deleted_vms
    logger.info('Provisioned {} VMs and deleted {} VMs during the scenario.'
        .format(total_provisioned_vms, total_deleted_vms))
    logger.info('Test Ending...')