def parse_url(url):
    """
    Parse RSS XML URL and get the basic details
    :param url: RSS XML URL
    :param provider: Name of the Article provider
    :return: item
    """
    try:
        items = []
        column_list = [
            'title', 'summary', 'id', 'language', 'link', 'description',
            'published', 'media_content', 'image'
        ]
        elements = feedparser.parse(url)
        elements = (element for element in elements.entries)
        if not elements:
            return []
        logger.info("Please wait, parsing through the RSS feeds........")
        for element_res in elements:
            item = {}
            for element in element_res:
                if element in column_list:
                    item[element] = element_res.get(element)
                    if 'media_content' in element_res:
                        item['image_url'] = element_res.media_content[0]['url']
                    else:
                        item['image_url'] = element_res.get('image')
            item = get_metadata_newspaper(item)
            # yield item
            if item:
                items.append(item)
        return iter(items)
    except Exception as error:
        logger.error(error)
Beispiel #2
0
def checkFabricServiceStatus(yamlContent):
    logger.info("Waiting for BoK to start up ...")
    clusterName = yamlContent["clusterName"]
    peerOrgs = yamlContent["crypto-config.yaml"]["PeerOrgs"]
    ifReady=0
    timeCount=0
    config.load_kube_config()
    v1 = client.CoreV1Api()
    while True:
        ret = v1.list_pod_for_all_namespaces(watch=False)
        for peerOrg in peerOrgs:
            peerOrgName=peerOrg["Name"]
            namespace=peerOrgName.lower() + "-" + clusterName.lower()
            for i in ret.items:
                if namespace == i.metadata.namespace:
                    if i.status.phase != "Running":
                        ifReady=0
                        break
                    else:
                        ifReady=1
            if ifReady == 0:
                break
        if ifReady == 1:
            logger.info("BoK is up and running.")
            break
        else:
            timeCount = timeCount + INTERVAL
            time.sleep(INTERVAL)
       
        if timeCount > TIMEOUT:
            logger.error("Error: Failed to start BoK service.")
            sys.exit(1)
Beispiel #3
0
def load_paperdata(distance_f):
    '''
    Load distance from data

    Args:
        distance_f : distance file, the format is column1-index 1, column2-index 2, column3-distance

    Returns:
        distances dict, max distance, min distance, max continues id
    '''
    log.info("PROGRESS: load data")
    distances = {}
    min_dis, max_dis = sys.float_info.max, 0.0
    max_id = 0
    with open(distance_f, 'r') as fp:
        for line in fp:
            x1, x2, d = line.strip().split(' ')
            x1, x2 = int(x1), int(x2)
            max_id = max(max_id, x1, x2)
            dis = float(d)
            min_dis, max_dis = min(min_dis, dis), max(max_dis, dis)
            distances[(x1, x2)] = float(d)
            distances[(x2, x1)] = float(d)
    for i in range(max_id):
        distances[(i, i)] = 0.0
    logger.info("PROGRESS: load end")
    return distances, max_dis, min_dis, max_id
Beispiel #4
0
def evaluate(args, model, eval_dataloader, metrics):
    # Eval!
    logger.info("  Num examples = %d", len(eval_dataloader))
    logger.info("  Batch size = %d", args.eval_batch_size)
    eval_loss = AverageMeter()
    metrics.reset()
    preds = []
    targets = []
    pbar = ProgressBar(n_total=len(eval_dataloader), desc='Evaluating')
    for bid, batch in enumerate(eval_dataloader):
        model.eval()
        batch = tuple(t.to(args.device) for t in batch)
        with torch.no_grad():
            inputs = {'input_ids': batch[0],
                      'attention_mask': batch[1],
                      'labels': batch[3]}
            inputs['token_type_ids'] = batch[2]
            outputs = model(**inputs)
            loss, logits = outputs[:2]
            eval_loss.update(loss.item(), n=1)
        preds.append(logits.cpu().detach())
        targets.append(inputs['labels'].cpu().detach())
        pbar(bid)
    preds = torch.cat(preds, dim=0).cpu().detach()
    targets = torch.cat(targets, dim=0).cpu().detach()
    metrics(preds, targets)
    eval_log = {"eval_acc": metrics.value(),
                'eval_loss': eval_loss.avg}
    return eval_log
Beispiel #5
0
def uploadspeech(localpath):
    """
    upload file to qiniu, if upload failed, it will retry 3 times.
    if upload still failed after retry, it return "None"
    """
    access_key = '_D2Iavhr-DRKHHhW0BTT7-liQ2jO-1cC_lqKn0eF'
    secret_key = 'E3QKF99mgA8HAyGF1nMlKWVVaKlIxRpTZvEb1CiO'
    global qiniuAuth
    if isinstance(qiniuAuth, Auth) is False:
        qiniuAuth = Auth(access_key, secret_key)
    bucket_name = 'pipixia'
    key = "audio_" + str(int(time.time())) + ".mp3"
    uptoken = qiniuAuth.upload_token(bucket_name, key, 700000)
    ret, info = put_file(uptoken, key, localpath)
    global uploadspeechRetryCount
    if ret is None:
        info(info)
        if uploadspeechRetryCount < 4:
            uploadspeechRetryCount += 1
            return uploadspeech(localpath)
        else:
            return None
    else:
        uploadspeechRetryCount = 0
        return "http://oty38yumz.bkt.clouddn.com/" + key
Beispiel #6
0
    def _download(self, url):
        download_dir = global_define.XML_DIR
        if not os.path.exists(download_dir):
            os.mkdir(download_dir)

        day = global_define.TODAY
        download_dir = os.path.abspath(os.path.join(download_dir, str(day)))
        if not os.path.exists(download_dir):
            os.mkdir(download_dir)

        #(head,fname)=os.path.split(url)
        #file_path=os.path.join(file_dir,fname)
        file_name = url.replace(self.__url_prefix,"")
        file_name = file_name.replace("/","")
        file_path = os.path.join(download_dir, file_name)

        wgetcmd = "wget %s --timeout=10 --quiet -O %s" %(url,file_path)
        if not subprocess.call(wgetcmd,shell=True):
            logger.debug('wget url[%s] finished.' %url)
        else:
            logger.critical('cmd[%s] failed.' %wgetcmd)
            return None

        logger.info('download [%s] successfully' %(url))
        return os.path.abspath(file_path)
 def setup(self, fname):
     logger = logging.getLogger(__name__)
     logger.setLevel(logging.DEBUG)
     logger.addHandler(self.file_handler)
     logger.addHandler(self.stream_handler)
     logger.info('Begin logging to {}'.format(fname))
     return logger
Beispiel #8
0
    def test_persons_get(self):
        for setting in [
                'minimal', 'normal', 'detailed', 'extensive', 'default'
        ]:
            for mode in [False, True]:
                query = urllib.parse.urlencode({
                    'select': setting,
                    'dict': str(mode).lower()
                })
                check = self.app.get('/persons?{}'.format(query),
                                     follow_redirects=True)
                logger.info(check.data)
                if setting == 'extensive' and mode:
                    comparison = check.data

        selected = random.choice(json.loads(comparison.decode())['content'])
        for setting in ['full', 'default']:
            query = urllib.parse.urlencode({'select': setting})
            check = self.app.get('/persons/{}?{}'.format(
                selected['uuid'], query),
                                 follow_redirects=True)
            self.assertEqual(
                json.loads(check.data.decode())['content'], selected)

        print('\nfinished persons')
Beispiel #9
0
 def send_email(setting: dict, file_path):
     """
     入参一个字典
     :param user: 发件人邮箱
     :param password: 邮箱授权码
     :param host: 发件人使用的邮箱服务 例如:smtp.163.com
     :param contents: 内容
     :param addressees: 收件人列表
     :param title: 邮件标题
     :param enclosures: 附件列表
     :param file_path: 需要压缩的文件夹
     :return:
     """
     EmailServe.zip_report(
         file_path=file_path,
         out_path=setting['enclosures'])
     yag = yagmail.SMTP(
         setting['user'],
         setting['password'],
         setting['host'])
     # 发送邮件
     yag.send(
         setting['addressees'],
         setting['title'],
         setting['contents'],
         setting['enclosures'])
     # 关闭服务
     yag.close()
     logger.info("邮件发送成功!")
Beispiel #10
0
 def simulate(self):
     target = Plant.get(Plant.localhost == True)
     source = Plant.get(Plant.name == 'marta')
     for sensor in Sensor.select():
         logger.info(sensor.name)
         PlantSimulate().run(target, sensor, source)
     logger.info('finished')
Beispiel #11
0
def run():
    if os.path.exists('report/'):
        shutil.rmtree(path='report/')
    logger.add(file_path['log'], enqueue=True, encoding='utf-8')
    logger.info("""
                 _    _         _      _____         _   
  __ _ _ __ (_)  / \  _   _| |_ __|_   _|__  ___| |_ 
 / _` | '_ \| | / _ \| | | | __/ _ \| |/ _ \/ __| __|
| (_| | |_) | |/ ___ \ |_| | || (_) | |  __/\__ \ |_ 
 \__,_| .__/|_/_/   \_\__,_|\__\___/|_|\___||___/\__|
      |_|                                            
      Starting      ...     ...     ...
    """)
    pytest.main(
        args=['test/test_api.py', f'--alluredir={file_path["report"]}/data'])
    # 自动以服务形式打开报告
    # os.system(f'allure serve {report}/data')

    # 本地生成报告
    os.system(
        f'allure generate {file_path["report"]}/data -o {file_path["report"]}/html --clean'
    )
    logger.success('报告已生成')

    # 发送邮件带附件报告
    EmailServe.send_email(email, file_path['report'])

    # 删除本地附件
    os.remove(email['enclosures'])
Beispiel #12
0
def delete_all_data_in_tables():
    Product.objects.all().delete()
    Category.objects.all().delete()
    logger.info("Toutes les données ont étaient éffacés")

    # def bulk_insert_product_category():
    #     """
    #     insert all product and ur categories
    #     """
    #     # Creation de deux objet (product & category) pour bulk insert
    #
    #     p1 = Product.objects.create(name="Pizza fromage",
    #                                 image_product="https://image.fr",
    #                                 stores="OpenClassrooms",
    #                                 url=None,
    #                                 nutriscore="D",
    #                                 image_reperes_nutrionnels="https://image_repere.fr")
    #
    #     p1.save()
    #     c1 = Category.objects.create(name='Pizza')
    #     c1.save()
    #     p1.category.add(c1)
    #     c2 = Category.objects.create(name='Fromage')
    #     c2.save()
    #     p1.category.add(c2)
    #
    #     get = Product.objects.filter(pk=221)
    #     print(get)
    """
Beispiel #13
0
 def bert_epoch_step(self, state, current):
     '''
     适合bert类型模型,适合pytorch_transformer模块
     :param state:
     :param current:
     :return:
     '''
     model_to_save = state['model']
     if self.save_best_only:
         if self.monitor_op(current, self.best):
             logger.info(
                 f"\nEpoch {state['epoch']}: {self.monitor} improved from {self.best:.5f} to {current:.5f}"
             )
             self.best = current
             state['best'] = self.best
             model_to_save.save_pretrained(str(self.base_path))
             output_config_file = self.base_path / 'configs.json'
             with open(str(output_config_file), 'w') as f:
                 f.write(model_to_save.config.to_json_string())
             state.pop("model")
             torch.save(state, self.base_path / 'checkpoint_info.bin')
     else:
         if state['epoch'] % self.epoch_freq == 0:
             save_path = self.base_path / f"checkpoint-epoch-{state['epoch']}"
             save_path.mkdir(exist_ok=True)
             logger.info(f"\nEpoch {state['epoch']}: save model to disk.")
             model_to_save.save_pretrained(save_path)
             output_config_file = save_path / 'configs.json'
             with open(str(output_config_file), 'w') as f:
                 f.write(model_to_save.config.to_json_string())
             state.pop("model")
             torch.save(state, save_path / 'checkpoint_info.bin')
Beispiel #14
0
 def run(cls):
     if os.path.exists('../report'):
         shutil.rmtree(path='../report')
     logger.add(logfile, enqueue=True, encoding='utf-8')
     logger.info('开始测试...')
     pytest.main(args=[f'--alluredir={report}/data'])
     os.system(f'allure generate {report}/data -o {report}/html --clean')
     logger.success('报告已生成')
Beispiel #15
0
 def save_response(cls, key: str, value: object) -> None:
     """
     保存实际响应
     :param key: 保存字典中的key,一般使用用例编号
     :param value: 保存字典中的value,使用json响应
     """
     cls.response_dict[key] = value
     logger.info(f'添加key: {key}, 对应value: {value}')
Beispiel #16
0
def stopFabricExplorer(yamlContent):
    clusterName = yamlContent["clusterName"]
    mountPoint = yamlContent["nfsServer"]["mountPoint"]
    explorerYaml = mountPoint + "/" + clusterName + "/resources/explorer-artifacts/fabric_1_0_explorer.yaml"
    command = "kubectl delete  --grace-period=0 --force -f " + explorerYaml
    re=os.system(command)
    if re != 0:
        logger.info(command + " exec failed")
Beispiel #17
0
def notifyDFgeneration(deploymentYaml, template):
    # Notification of deployment file generation
    deploymentYaml = deploymentYaml.rsplit("deployment/")[-1]
    # Only show the path under deployment/
    template = template.rsplit("templates/")[-1]
    # Only show the path under template/
    raw = "Generating {} from template: {}".format(deploymentYaml, template)
    # should replace by logger in the furture
    logger.info(raw)
Beispiel #18
0
 def execute_cmd(self, cmd: str):
     """
     :param cmd: 服务器下对应的命令, 可以是list,或者str
     """
     stdin, stdout, stderr = self.ssh.exec_command(cmd)
     error = stderr.read().decode()
     logger.info(f"输入命令: {cmd} -> 输出结果: {stdout.read().decode()}")
     logger.error(f"异常信息: {error}")
     return error
Beispiel #19
0
def showAddress(yamlContent):
    urls = getUrls(yamlContent)

    dashboard ="You can view Kubernetes dashboard at: " + urls["k8sDashboardUrl"]
    logger.info(dashboard)

    if urls["fabricExplorerUrl"]:
        explorerMsg = "You can view Fabric Explorer at: " + urls["fabricExplorerUrl"]
    else:
        explorerMsg = "Can not get Ingress Controller IP. Please check your Ingress controller settings."
    logger.info(explorerMsg)
Beispiel #20
0
def init_db():
    '''
    Crete db with table 'flt'
    Or do nothing if they exist
    '''
    try:
        Base.metadata.create_all(bind=engine)
        logger.info('Init DB - OK')
    except OperationalError:
        logger.info('Init DB - Error')
        raise MyLocalException
Beispiel #21
0
def run():
    if os.path.exists('report/'):
        shutil.rmtree(path='report/')
    logger.add(logfile, enqueue=True, encoding='utf-8')
    logger.info('开始测试...')
    pytest.main(args=['test/test_api.py', f'--alluredir={report}/data'])
    # 自动以服务形式打开报告
    # os.system(f'allure serve {report}/data')

    # 本地生成报告
    os.system(f'allure generate {report}/data -o {report}/html --clean')
    logger.success('报告已生成')
Beispiel #22
0
    def set(self):
        result = VariousTools.offline_check('display', hardware=False)
        if result is True:
            execute = False
            sensor = SensorHardware.get(label='display')

            if sensor.last_execution is not None:
                offset = datetime.datetime.now() - sensor.last_execution
                if offset.seconds >= 30 * 60:
                    execute = True
            else:
                execute = True

            if execute is True:
                sensor.last_execution = datetime.datetime.now()
                sensor.save()

                logger.debug('display: updated')

                bus = 1
                gpio_count = 16
                address = 0x20

                self.get()
                self.calculate()

                # Create MCP230xx GPIO adapter.
                mcp = MCP230XX_GPIO(bus, address, gpio_count)
                print('creating text')
                # Create LCD, passing in MCP GPIO adapter.
                lcd = Adafruit_CharLCD(pin_rs=8,
                                       pin_e=9,
                                       pins_db=[10, 11, 12, 13],
                                       GPIO=mcp)

                lcd.clear()
                lcd.message(self.data['display']['text'])
            else:
                logger.debug('display: not updated')

            # print self.data['display']['text']
        else:
            logger.info('display: in offline timeframe - clearing')
            bus = 1
            gpio_count = 16
            address = 32

            mcp = MCP230XX_GPIO(bus, address, gpio_count)
            lcd = Adafruit_CharLCD(pin_rs=8,
                                   pin_e=9,
                                   pins_db=[10, 11, 12, 13],
                                   GPIO=mcp)
            lcd.clear()
Beispiel #23
0
 def run(self, cmd, sudo=True):
     if sudo:
         cmd = 'sudo %s' % cmd
     logger.info('\nRunning %s' % cmd)
     _, stdout, stderr = self.connection.exec_command(cmd, get_pty=True)
     out = {
         'stdout': stdout.read().decode('utf-8'),
         'stderr': stderr.read().decode('utf-8'),
         'es': stdout.channel.recv_exit_status()
     }
     if out['stderr']:
         logger.info(out['stderr'])
     return out
Beispiel #24
0
 def files_action(self, post: bool, local_path: str = os.getcwd(), remote_path: str = "/root"):
     """
     :param post: 动作 为 True 就是上传, False就是下载
     :param local_path: 本地的文件路径, 默认当前脚本所在的工作目录
     :param remote_path: 服务器上的文件路径,默认在/root目录下
     """
     if post:  # 上传文件
         self.ftp_client.put(localpath=local_path, remotepath=f"{remote_path}{os.path.split(local_path)[1]}")
         logger.info(f"文件上传成功: {local_path} -> {self.host}:{remote_path}{os.path.split(local_path)[1]}")
     else:  # 下载文件
         file_path = local_path + os.path.split(remote_path)[1]
         self.ftp_client.get(remotepath=remote_path, localpath=file_path)
         logger.info(f"文件下载成功: {self.host}:{remote_path} -> {file_path}")
Beispiel #25
0
def main(conf_file_name):
    pid = os.getpid()
    logger = tools.logger.Logger('mainlog', str(pid) + 'main.log')
    work_path = os.path.abspath(os.curdir)
    config_path = os.path.join(work_path, conf_file_name)
    conf = configparser.ConfigParser()
    conf.read(config_path)
    redis_ip = conf.get('conf', 'redis_ip')
    redis_port = conf.get('conf', 'redis_port')
    platform = conf.get('conf', 'platform')
    key = conf.get('conf', 'key')
    is_send_message = conf.get('conf', 'is_send_message')
    currency_list = json.loads(conf.get('conf', 'currency_list'))

    pool = redis.ConnectionPool(
        host=redis_ip, port=redis_port,
        decode_responses=True)  # host是redis主机,需要redis服务端和客户端都起着 redis默认端口是6379
    r = redis.Redis(connection_pool=pool)

    #启动货币处理进程池
    p = Pool(len(currency_list))
    path = os.path.abspath(os.curdir) + '/data' + '/paths_result.dat'
    logger.info(path)
    f = open(path, 'r')
    path_list = []
    for line in f.readlines():
        line = line.strip()
        a = json.loads(line)
        path_list.append(a)

    for x in currency_list:
        currency_path = (x, path_list, platform, redis_ip, redis_port)
        p.apply_async(calc_core.calc_fork, args=(currency_path, ))
    logger.info('Waiting for all subprocesses done...')
    p.close()
    logger.info('All subprocesses done.')

    # #recalc process pool
    # p_recalc = Pool(4)
    # for i in range(4):
    #     p.apply_async(calc_core.recalc, args=((redis_ip,redis_port),))

    while 1:
        try:
            main_process(r, key, platform, currency_list, is_send_message,
                         logger, redis_ip, redis_port)
        except Exception as e:
            logger.info(e)
            msg = traceback.format_exc()
            logger.info(msg)
        time.sleep(15)
Beispiel #26
0
 def connect(self):
     client = SSHClient()
     client.set_missing_host_key_policy(AutoAddPolicy)
     logger.info('\nConnecting to %s' % self.host)
     for _ in range(self.tries):
         try:
             client.connect(hostname=self.host,
                            username='******',
                            key_filename=os.path.expanduser(
                                conf.get('environment', 'ssh_key')))
             return client
         except Exception as e:
             logger.info('%s occurred') % e
             time.sleep(10)
Beispiel #27
0
    def send_api(
            cls,
            url,
            method,
            parametric_key,
            header=None,
            data=None,
            file=None) -> dict:
        """
        :param method: 请求方法
        :param url: 请求url
        :param parametric_key: 入参关键字, params(查询参数类型,明文传输,一般在url?参数名=参数值), data(一般用于form表单类型参数)
        json(一般用于json类型请求参数)
        :param data: 参数数据,默认等于None
        :param file: 文件对象
        :param header: 请求头
        :return: 返回res对象
        """
        session = cls.get_session()

        if parametric_key == 'params':
            res = session.request(
                method=method,
                url=url,
                params=data,
                headers=header)
        elif parametric_key == 'data':
            res = session.request(
                method=method,
                url=url,
                data=data,
                files=file,
                headers=header)
        elif parametric_key == 'json':
            res = session.request(
                method=method,
                url=url,
                json=data,
                files=file,
                headers=header)
        else:
            raise ValueError(
                '可选关键字为params, json, data')
        response = res.json()
        logger.info(
            f'\n最终请求地址:{res.url}\n请求方法:{method}\n请求头:{header}\n请求参数:{data}\n上传文件:{file}\n响应数据:{response}')
        allure_step_no(f'响应耗时(s): {res.elapsed.total_seconds()}')
        allure_step('响应结果', response)
        return response
 def assert_result(cls, response: dict, expect_str: str):
     """ 预期结果实际结果断言方法
     :param response: 实际响应字典
     :param expect_str: 预期响应内容,从excel中读取
     return None
     """
     expect_dict = convert_json(expect_str)
     index = 0
     for k, v in expect_dict.items():
         actual = extractor(response, k)
         index += 1
         logger.info(
             f'第{index}个断言,实际结果:{actual} | 预期结果:{v} \n断言结果 {actual == v}')
         allure_step(f'第{index}个断言', f'实际结果:{actual} = 预期结果:{v}')
         assert actual == v
Beispiel #29
0
 def make_json(self):
     '''
     Expects abspath to .csv file
     Returns json data
     '''
     flt = self.parse_filename()
     passengers = self.csv_to_dict()
     flt['prl'] = passengers
     try:
         json_data = json.dumps(flt)
         logger.info(f'{self.file_name}; Convert to json - OK')
         return json_data
     except (AttributeError, TypeError):
         logger.error(f'{self.file_name}; Convert to json - Error')
         raise MyLocalException
Beispiel #30
0
 def parseChannel(self, response):
     info("-----------------kejiliechannels url:" + response.url)
     title = response.xpath(
         "//div[@class='am_news_list_all']//div[@class='am-titlebar am-titlebar-default mt-0']/h1/text()"
     ).extract_first()
     logo = response.xpath(
         "//div[@class='am_news_list_all']//div[@class='mt-10']/div[@class='am-fl']/img/@src"
     ).extract_first()
     self.redis_db.sadd(
         "kejiliechannels",
         json.dumps({
             'url': response.url,
             'title': title,
             'logo': logo
         }))
Beispiel #31
0
 def handle_sql(cls, sql: str, db: object):
     """处理sql,并将结果写到响应字典中"""
     if sql not in ['no', '']:
         sql = rep_expr(sql, DataProcess.response_dict)
     else:
         sql = None
     allure_step('运行sql', sql)
     logger.info(sql)
     if sql is not None:
         # 查后置sql
         result = db.fetch_one(sql)
         allure_step('sql执行结果', {"sql_result": result})
         logger.info(f'结果:{result}')
         if result is not None:
             # 将查询结果添加到响应字典里面,作用在,接口响应的内容某个字段 直接和数据库某个字段比对,在预期结果中
             # 使用同样的语法提取即可
             DataProcess.response_dict.update(result)
Beispiel #32
0
 def ret_func():
     logger.info("func [%s.%s] begin ..." %(func.__module__, func.__name__))
     begin_t = datetime.datetime.now()
     func()
     end_t = datetime.datetime.now()
     logger.info("func [%s.%s] end ... " %(func.__module__, func.__name__))
     logger.info("consume time %s" %(end_t - begin_t))