Exemplo n.º 1
0
    def _download(self, url):
        download_dir = global_define.XML_DIR
        if not os.path.exists(download_dir):
            os.mkdir(download_dir)

        day = global_define.TODAY
        download_dir = os.path.abspath(os.path.join(download_dir, str(day)))
        if not os.path.exists(download_dir):
            os.mkdir(download_dir)

        #(head,fname)=os.path.split(url)
        #file_path=os.path.join(file_dir,fname)
        file_name = url.replace(self.__url_prefix,"")
        file_name = file_name.replace("/","")
        file_path = os.path.join(download_dir, file_name)

        wgetcmd = "wget %s --timeout=10 --quiet -O %s" %(url,file_path)
        if not subprocess.call(wgetcmd,shell=True):
            logger.debug('wget url[%s] finished.' %url)
        else:
            logger.critical('cmd[%s] failed.' %wgetcmd)
            return None

        logger.info('download [%s] successfully' %(url))
        return os.path.abspath(file_path)
Exemplo n.º 2
0
 def extrasRequest(self, **kwargs):
     file_url = kwargs.pop('file_url')
     token = kwargs.pop('token')
     log = kwargs['extras']['log']
     snapshot = kwargs['extras']['screenshot_at_last']
     #logger.debug('snapshot upload: ' + snapshot)
     ##snapshot = kwargs['extras']['screenshot_at_failure']
     try:
         files = {'file': open(snapshot, 'rb')}
         headers = {'content-type': 'image/png','Ext-Type':'%s%s%s' % ('expect', ':', 'step'), 'accept': 'application/json'}
         ret = request(method='put', url=file_url, headers=headers, data=files['file'], timeout=10)
     except Exception, e:
         logger.debug('error: extraRequest snapshot\n%s' % str(e))
Exemplo n.º 3
0
 def basicPayloadRequest(self, **kwargs):
     headers = {'content-type': 'application/json', 'accept': 'application/json'}
     result_url = kwargs.pop('result_url')
     token = kwargs.pop('token')
     values = json.dumps({'subc':'update','token':token, 'data': kwargs['payload']})
     ret = request(method='post', url=result_url, data=values, headers=headers, timeout=REQ_TIMEOUT)
     #{u'msg': u'', u'data': {}, u'result': u'ok'}
     try:
         if ret['result'] == 'ok':
             return True
     except Exception, e:
         logger.debug('error: basicRequest\n%s' % str(e))
         return False
Exemplo n.º 4
0
 def run(self):
     '''
     The work method.
     '''
     try:
         if self.kwargs['payload']['result'] == 'pass':
             if self.basicPayloadRequest(**self.kwargs):
                 self.extrasRequest(**self.kwargs)
         elif self.kwargs['payload']['result'] == 'fail':
             if self.basicPayloadRequest(**self.kwargs):
                 self.extrasRequest(**self.kwargs)
         elif self.kwargs['payload']['result'] == 'error':       
             if self.basicPayloadRequest(**self.kwargs):
                 self.extrasRequest(**self.kwargs)
     except Exception, e:
         logger.debug('error: upload thread run\n%s' % str(e))
Exemplo n.º 5
0
 def regist(self, **kwargs):
     '''
     get token from server by server.config or user-input
     '''
     m = hashlib.md5()
     m.update(self.__dict__['password'])
     pwd = m.hexdigest()
     values = json.dumps({'subc': 'login', 'data':{'appid':'01', 'username':self.__dict__['username'], 'password':pwd}})
     headers = {'content-type': 'application/json', 'accept': 'application/json'}
     auth_url = self.__dict__['auth']
     ret = request(method='post', url=auth_url, data=values, headers=headers, timeout=AUTH_REQ_TIMEOUT)
     #OLD{u'results': {u'token': u'bdfbadaca1c514c3eafca6f2c4cb5c81', u'uid': u'51b8672e1ba1ee14235b03515c52c015'}}
     #NEW{u'msg': u'', u'data': {u'token': u'306fddbabe37011903e8f103829afc68', u'uid': 2}, u'result': u'ok|error'}
     try:
         self.token = ret['data']['token']
     except Exception, e:
         logger.debug('error: regist\n%s' % str(e))
Exemplo n.º 6
0
def request(method, url, data=None, **kwargs):
    '''
    Sends a request.
    :param url: URL for the request.    
    :param method: the request type of http method(get, post, put, delete)
    :param data: (optional) Dictionary, bytes, or file-like object to send in the body of http protocol
    :param \*\*kwargs: Optional arguments that request takes
    :return: dict or None 
    '''
    ret = None
    m = method.lower()
    if m in ('get', 'post', 'put', 'delete'):
        req = getattr(requests, m, None)
    try:
        r = req(url=url, data=data, **kwargs)
        if r:
            ret = r.json()
    except requests.exceptions.Timeout, e:
        #sys.stderr.write(str(e))
        logger.debug(str(e))
        pass
Exemplo n.º 7
0
 def basicPayloadRequest(**kwargs):
     """basic payload request"""
     headers = {
         'content-type': 'application/json',
         'accept': 'application/json'
     }
     result_url = kwargs.pop('result_url')
     token = kwargs.pop('token')
     values = json.dumps({
         'subc': 'update',
         'token': token,
         'data': kwargs['payload']
     })
     try:
         ret = request(method='post',
                       url=result_url,
                       data=values,
                       headers=headers,
                       timeout=REQ_TIMEOUT)
         #{u'msg': u'', u'data': {}, u'result': u'ok'}
         return ret['result'] == 'ok'
     except Exception, err:
         logger.debug('error: basicRequest\n%s' % str(err))
Exemplo n.º 8
0
    def updateSession(self, **kwargs):
        '''
        session_properties = {    'sid': self.session_id,\
                                  'product': 'p',\
                                  'revision': 'r',\
                                  'deviceid': 'devid',\
                                  'planname': 'test.plan',\
                                  'starttime': self.conf.test_start_time
                                 }
        '''
        self.session_id =  kwargs.pop('sid')
        url = self.__dict__['session_update'] % self.session_id
        headers = {'content-type': 'application/json', 'accept': 'application/json'}
        #new style API
        #values = { 'token': self.token,\
        #           'subc':'create',\
        #           'data':{'planname':kwargs.pop('planname'),\
        #                   'starttime':kwargs.pop('starttime'),\
        #                   'deviceinfo':{'product':kwargs.pop('product'),\
        #                   'revision':kwargs.pop('revision'),\
        #                   'deviceid':kwargs.pop('deviceid')\
        #                                }\
        #                  }\
        #          }
        #sys.stderr.write(str(kwargs)+'\n')
        values = json.dumps({'subc': 'update',
                             'token':self.token ,
                             'data' : kwargs})
        ret = request(method='post', url=url, data=values, headers=headers, timeout=REQ_TIMEOUT)
        #{u'msg': u'', u'data': {}, u'result': u'ok'}
        try:

            if ret['result'] == 'ok':
                pass
        except Exception, e:
            logger.debug('error: update session\n%s' % str(e))
Exemplo n.º 9
0
def checkConsensusServiceStatus(yamlContent):
    logger.info("Checking consensus service status.")            
    clusterName = yamlContent["clusterName"]
    ordererOrgName=yamlContent["crypto-config.yaml"]["OrdererOrgs"][0]["Name"]
    ORDERMSPID="OrdererMSP"
    ordererDomain = yamlContent["crypto-config.yaml"]["OrdererOrgs"][0]["Domain"]
    MSPCONFIGPATH = "/etc/hyperledger/ordererOrganizations/" + ordererDomain + "/orderers/orderer0." + ordererDomain + "/msp"
    ordererNamespace=ordererOrgName.lower() + "-" + clusterName.lower()
    peerOrgs = yamlContent["crypto-config.yaml"]["PeerOrgs"]
    checkCommand="env CORE_PEER_LOCALMSPID=" + ORDERMSPID + " " + "CORE_PEER_MSPCONFIGPATH=" + MSPCONFIGPATH + " " + \
        "peer channel fetch 0 -o " + "orderer0." + ordererNamespace + ":7050 -c testchainid"
    re = 0
    timeCount = 0
    config.load_kube_config()
    v1 = client.CoreV1Api()
    ret = v1.list_pod_for_all_namespaces(watch=False)
    while True:
        for peerOrg in peerOrgs:
            peerOrgName=peerOrg["Name"]
            namespace=peerOrgName.lower() + "-" + clusterName.lower()
            for i in ret.items:
                if (i.metadata.namespace == namespace and i.metadata.name.startswith("cli")):
                    cliPodName = i.metadata.name
                    resp = execCmdInPod(cliPodName, namespace, checkCommand)
                    logger.debug(resp)
                    re = resp.find("Error")
        if re == -1:
            break
        else:
            timeCount = timeCount + INTERVAL
            time.sleep(INTERVAL)
        if timeCount > TIMEOUT:
           errMsg="Error: Consensus service is not healthy"
           logger.error(errMsg)
           sys.exit(1)
    logger.info("Consensus service is OK.")            
Exemplo n.º 10
0
Arquivo: main.py Projeto: indietyp/IoP
    def next_execution_seconds(self):
        # get current time
        current_time = datetime.now()

        # get time in 5 min future for execution
        five_minutes = 5 * 60
        future_datetime = current_time + timedelta(seconds=five_minutes)

        # round number so that it's every time 5,10.. and not going to be 6,11...
        minute = self.round_base(future_datetime.minute)
        if minute == 60:
            minute = 59

        # reconstruct execution time with new minutes
        future_datetime = datetime(future_datetime.year, future_datetime.month,
                                   future_datetime.day, future_datetime.hour,
                                   minute, 0)

        logger.info('next planned execution: {}'.format(future_datetime))
        next_execution = future_datetime - current_time
        logger.debug(
            'exact time till next execution: {}'.format(next_execution))

        return next_execution.seconds
Exemplo n.º 11
0
class BaseRequest(object):
    session = None

    @classmethod
    def get_session(cls):
        if cls.session is None:
            cls.session = requests.Session()
        return cls.session

    @classmethod
    def send_request(cls, case: list, env: str = 'dev') -> object:
        """处理case数据,转换成可用数据发送请求
        :param case: 读取出来的每一行用例内容,可进行解包
        :param env: 环境名称 默认使用config.yaml server下的 dev 后面的基准地址
        return: 响应结果, 预期结果
        """
        case_number, case_title, path, token, method, parametric_key, file_obj, data, sql, expect, is_save = case
        logger.debug(
            f"用例进行处理前数据: \n 接口路径: {path} \n 请求参数: {data} \n 后置sql: {sql} \n 预期结果: {expect} \n 保存响应: {is_save}"
        )
        # allure报告 用例标题
        allure_title(case_title)
        # 处理url、header、data、file、的前置方法
        url = ReadFile.read_config(
            f'$.server.{env}') + DataProcess.handle_path(path)
        allure_step('请求地址', url)
        header = DataProcess.handle_header(token)
        allure_step('请求头', header)
        data = DataProcess.handle_data(data)
        allure_step('请求参数', data)
        file = DataProcess.handler_files(file_obj)
        allure_step('上传文件', file_obj)
        # 发送请求
        res = cls.send_api(url, method, parametric_key, header, data, file)
        allure_step('响应耗时(s)', res.elapsed.total_seconds())
        allure_step('响应内容', res.json())
        # 响应后操作
        if token == '写':
            DataProcess.have_token['Authorization'] = extractor(
                res.json(), ReadFile.read_config('$.expr.token'))
            allure_step('请求头中添加Token', DataProcess.have_token)
        # 保存用例的实际响应
        if is_save == "是":
            DataProcess.save_response(case_number, res.json())
        allure_step('存储实际响应', DataProcess.response_dict)
        return res.json(), expect, sql
Exemplo n.º 12
0
 def run(self, data):
     between = datetime.datetime.now()
     sd = self.get_sensor_data(data)
     logger.debug(
         '(206-208) time elapsed: {}'.format(datetime.datetime.now() -
                                             between))
     if sd.count() > 1000:
         between = datetime.datetime.now()
         data['prediction'] = self.predict(data, sd)
         logger.debug(
             '(210-212) time elapsed: {}'.format(datetime.datetime.now() -
                                                 between))
         between = datetime.datetime.now()
         self.insert_database(data)
         logger.debug(
             '(213-215) time elapsed: {}'.format(datetime.datetime.now() -
                                                 between))
Exemplo n.º 13
0
class BaseRequest(object):
    session = None

    @classmethod
    def get_session(cls):
        """
        单例模式保证测试过程中使用的都是一个session对象
        :return:
        """
        if cls.session is None:
            cls.session = requests.Session()
        return cls.session

    @classmethod
    def send_request(cls, case: list, env: str = 'dev') -> object:
        """处理case数据,转换成可用数据发送请求
        :param case: 读取出来的每一行用例内容,可进行解包
        :param env: 环境名称 默认使用config.yaml server下的 dev 后面的基准地址
        return: 响应结果, 预期结果
        """
        case_number, case_title, header, path, method, parametric_key, file_obj, data, sql, expect, is_save = case
        logger.debug(
            f"用例进行处理前数据: \n 接口路径: {path} \n 请求参数: {data} \n 后置sql: {sql} \n 预期结果: {expect} \n 保存响应: {is_save}")
        # allure报告 用例标题
        allure_title(case_title)
        # 处理url、header、data、file、的前置方法
        url = DataProcess.handle_path(path, env)
        header = DataProcess.handle_header(header)
        data = DataProcess.handle_data(data)
        allure_step('请求数据', data)
        file = DataProcess.handler_files(file_obj)
        # 发送请求
        response = cls.send_api(url, method, parametric_key, header, data, file)

        # 保存用例的实际响应
        if is_save == "是":
            DataProcess.save_response(case_number, response)
        return response, expect, sql
Exemplo n.º 14
0
  def __retrieve_data(self, target, source, sensor):
    logger.info('retrieving data')
    data = SensorData.select().where(SensorData.plant == target,
                                     SensorData.sensor == sensor) \
                              .order_by(SensorData.created_at.desc())
    saved = True

    if data.count() < 1:
      saved = False
      data = SensorData.select().where(SensorData.plant == source,
                                       SensorData.sensor == sensor) \
                                .order_by(SensorData.created_at.desc())

      logger.debug('other plant data entries: {}'.format(data.count()))
      logger.info('harvesting additional data')
      if data.count() > 1000:
        logger.debug('more than 1000 assets')
        offset = random.randint(0, data.count() - 1000)
        data = data.offset(offset).limit(1000)

      data = data.dicts()
      prepared = list(data)

      current = datetime.datetime.now()
      for sample in prepared:
        sample['plant'] = target
        sample['created_at'] = current
        del sample['id']

        current -= datetime.timedelta(minutes=30)

      logger.debug('amount of selected data: {}'.format(len(prepared)))

      with db.atomic():
        for idx in range(0, len(prepared), 100):
          SensorData.insert_many(prepared[idx:idx + 100]).execute()

      data = SensorData.select().where(SensorData.plant == target,
                                       SensorData.sensor == sensor) \
                                .order_by(SensorData.created_at.desc())

      saved = True

    return data, saved
Exemplo n.º 15
0
    def insert_database(self, data):
        deletion = datetime.datetime.now()
        SensorDataPrediction.delete().where(SensorDataPrediction.plant == data['plant']) \
                                     .where(SensorDataPrediction.sensor == data['sensor']) \
                                     .execute()
        logger.debug(
            'elapsed time deleting: {}'.format(datetime.datetime.now() -
                                               deletion))

        time_insert = datetime.datetime.now()
        prepared = []
        for key, prediction in enumerate(data['prediction']['prediction']):
            prepared.append({
                'plant': data['plant'],
                'sensor': data['sensor'],
                'value': prediction,
                'time': data['prediction']['date'][key]
            })

            # entry = SensorDataPrediction()
            # entry.plant = data['plant']
            # entry.sensor = data['sensor']
            # entry.value = prediction
            # entry.time = data['prediction']['date'][key]
            # entry.save()
        from models.plant import db

        with db.atomic():
            for idx in range(0, len(prepared), 100):
                SensorDataPrediction.insert_many(prepared[idx:idx +
                                                          100]).execute()
            # SensorDataPrediction.insert_many(prepared).execute()
        logger.debug('insert time elapsed: {}'.format(datetime.datetime.now() -
                                                      time_insert))
        logger.debug(
            'overall time elapsed: {}'.format(datetime.datetime.now() -
                                              deletion))
Exemplo n.º 16
0
 def log(self, output):
     try:
         with open('mylog.txt', 'a') as f:
             f.write('%s%s' % (str(output), os.linesep))
     except:
         logger.debug('error: open log file error')
Exemplo n.º 17
0
    def predict(self, data, sd):
        """ forecasting timebased sensor data
        INPUT dict
          plant - current plant object
          sensor - current sensor object

    """

        data = {}
        data['date'] = []
        data['value'] = []
        data['average'] = []

        future = {}
        future['date'] = []

        between = datetime.datetime.now()
        if sd.count() < 1000:
            logger.debug('amount of data: ' + len(sd))
            logger.error('not enough samples')
            return []
        logger.debug(
            '(101-106) time elapsed: {}'.format(datetime.datetime.now() -
                                                between))

        between = datetime.datetime.now()
        sd = sd.dicts()
        sd = list(sd)

        for entry in sd:
            data['date'].append(entry['created_at'])
            data['value'].append(entry['value'])

        last_datetime = data['date'][-1]
        logger.debug(
            '(106-120) time elapsed: {}'.format(datetime.datetime.now() -
                                                between))

        between = datetime.datetime.now()
        cap = int(len(data['date']) / 100 * 10)
        if cap > 144:
            cap = 144

        for i in range(0, cap):
            current = last_datetime + datetime.timedelta(minutes=30)
            future['date'].append(current)
            last_datetime = current
        logger.debug(
            '(122-131) time elapsed: {}'.format(datetime.datetime.now() -
                                                between))

        between = datetime.datetime.now()
        data = self.datetime_to_dict(data)
        future = self.datetime_to_dict(future)
        logger.debug(
            '(131-134) time elapsed: {}'.format(datetime.datetime.now() -
                                                between))

        between = datetime.datetime.now()
        index = pd.DatetimeIndex(data['date'])
        time_series = pd.Series(data['value'], index=index)
        rolmean = time_series.rolling(center=False, window=12).mean()
        logger.debug(
            '(136-140) time elapsed: {}'.format(datetime.datetime.now() -
                                                between))

        between = datetime.datetime.now()
        for entry in rolmean:
            data['average'].append(entry)

        data_frame = pd.DataFrame(data)
        data_frame = data_frame[data_frame.average.notnull()]

        columns = data_frame.columns.tolist()
        columns = [c for c in columns if c not in ['value', 'average', 'date']]
        logger.debug(
            '(142-151) time elapsed: {}'.format(datetime.datetime.now() -
                                                between))

        between = datetime.datetime.now()
        model = ExtraTreesRegressor()
        model.fit(data_frame[columns].values, data_frame['average'].values)

        pred_data_frame = pd.DataFrame(future)
        predictions = model.predict(pred_data_frame[columns].values)
        logger.debug(
            '(153-160) time elapsed: {}'.format(datetime.datetime.now() -
                                                between))

        between = datetime.datetime.now()
        future['prediction'] = []
        for prediction in predictions:
            future['prediction'].append(prediction)
        logger.debug(
            '(162-166) time elapsed: {}'.format(datetime.datetime.now() -
                                                between))

        if GRAPH is True:
            self.show_graph(data['date'], data['average'], future['date'],
                            predictions)

        return future
Exemplo n.º 18
0
    :return: dict or None
    '''
    ret = None
    meth = method.lower()
    if method.lower() in ('get', 'post', 'put', 'delete'):
        req = getattr(requests, meth, None)
    try:
        ret_code = req(url=url, data=data, **kwargs)
        if ret_code:
            ret = ret_code.json()
    except requests.exceptions.Timeout, err:
        #sys.stderr.write(str(e))
        logger.debug(str(err))
    except requests.exceptions.TooManyRedirects, err:
        #sys.stderr.write(str(e))
        logger.debug(str(err))
    except requests.exceptions.RequestException, err:
        logger.debug(str(err))
        #sys.stderr.write(str(e))
    except Exception, err:
        logger.debug(str(err))
        #sys.stderr.write(str(e))
    return ret


class ReportClient(object):
    '''
    client to communicate with server
    '''
    def __init__(self, **kwargs):
        '''init with keywords'''
Exemplo n.º 19
0
    def insert_data(self, data, mesh=True, prediction=True):
        """ dict of data:
          'sensor': object of sensor
          'value': value - float
          'plant': current selected plant
    """
        start = datetime.datetime.now()
        current_entries = SensorData.select()\
                                    .where(SensorData.sensor == data['sensor'])\
                                    .count()

        persistant = False
        data['value'] = round(data['value'], 2)

        sensor_db = SensorData()
        sensor_db.value = data['value']
        sensor_db.plant = data['plant']
        sensor_db.sensor = data['sensor']
        sensor_db.persistant = False
        sensor_db.save()

        last_entry = self.get_second_last_entry(sensor_db, data['plant'])
        last_value = last_entry.value if last_entry is not None else data[
            'value']

        offset = abs(data['value'] - last_value)
        if offset >= data['sensor'].persistant_offset:
            persistant = True
        elif current_entries > 6:
            persistant = self.persistant_evaluation(data['plant'],
                                                    data['sensor'])

        sensor_db.persistant = persistant
        sensor_db.save()

        self.delete_non_persistant_overflow(data['sensor'], data['plant'])
        logger.debug('{} - {} persistant: {}'.format(data['plant'].name,
                                                     data['sensor'].name,
                                                     persistant))

        if persistant:
            data['satisfaction'] = self.modify_sensor_status(data, mesh)
            self.mail_evaluation(data)

            if prediction:
                SensorDataForecast().run(data)

            if mesh:
                from mesh_network.dedicated import MeshDedicatedDispatch
                MeshDedicatedDispatch().new_data(data['sensor'],
                                                 plant=data['plant'])

                if data['plant'].localhost:
                    print('slave data')
                    slaves = Plant.select().where(
                        Plant.role == str(data['plant'].uuid))
                    slaves = list(slaves)

                    for slave in slaves:
                        print('slaved')
                        MeshDedicatedDispatch().slave_data(
                            slave, data['sensor'])

        logger.debug('time elapsed: {}'.format(datetime.datetime.now() -
                                               start))
        return persistant
Exemplo n.º 20
0
queried_domains = set()
returned_ips = set()

for domain in domains:
    if 'spf:' + domain in queried_domains:
        continue

    logger.info("\t+ [{}]".format(domain))

    # Query SPF record
    qr = dnsspf.query_spf(domain, queried_domains=queried_domains)
    spf = qr['spf']
    queried_domains = qr['queried_domains']

    if spf:
        logger.debug("\t\t+ SPF -> {}".format(spf))

        # Parse returned SPF record
        qr = dnsspf.parse_spf(domain,
                              spf,
                              queried_domains=queried_domains,
                              returned_ips=returned_ips)
    else:
        # Whitelist hosts listed in MX records.
        qr = dnsspf.query_mx([domain],
                             queried_domains=queried_domains,
                             returned_ips=returned_ips)

    ips = qr['ips']
    queried_domains = qr['queried_domains']
    returned_ips = qr['returned_ips']
Exemplo n.º 21
0
def joinChannelAndUpdate(yamlContent):
    clusterName = yamlContent["clusterName"]
    ordererOrgName = yamlContent["crypto-config.yaml"]["OrdererOrgs"][0]["Name"]
    channelName = yamlContent["channelName"]
    logger.info("Joining " + channelName + " and updating.")            
    ordererNamespace=ordererOrgName.lower() + "-" + clusterName.lower()
    OrdererUrl = "orderer0." + ordererNamespace + ":7050"
    peerOrgs = yamlContent["crypto-config.yaml"]["PeerOrgs"]
    createChannel = "peer channel create -c " + channelName +  " -o " + OrdererUrl + \
                    " " + "-t 15 -f resources/channel-artifacts/channel.tx"
    copyBlock = "cp ./" + channelName + ".block ./resources/channel-artifacts -rf"
    channelCreateFlag = 0
    config.load_kube_config()
    v1 = client.CoreV1Api()
    ret = v1.list_pod_for_all_namespaces(watch=False)
    re = 0
    for peerOrg in peerOrgs:
        peerOrgName=peerOrg["Name"]
        peersNum=peerOrg["Template"]["Count"]
        namespace=peerOrgName.lower() + "-" + clusterName.lower()
        ret = v1.list_namespaced_pod(namespace, watch=False)
        for i in ret.items:
            if i.metadata.name.startswith("cli"):
                cliPodName = i.metadata.name
        if channelCreateFlag == 0:
            resp = execCmdInPod(cliPodName, namespace, createChannel)
            logger.debug(resp)
            re = resp.find("Error")
            if re != -1:
                logger.error("Failed to create channel " + channelName + ".")
                sys.exit(1)
            resp = execCmdInPod(cliPodName, namespace, copyBlock)
            re = resp.find("cannot")
            if re != -1:
                logger.error("Failed to config channel " + channelName + ".")
                sys.exit(1)
            for n in range(peersNum):
                peerUrl = "peer" + str(n) + "." + namespace + ":7051"
                joinChannel="env CORE_PEER_ADDRESS=" + peerUrl + " peer channel join -b resources/channel-artifacts/" + channelName + ".block"
                resp = execCmdInPod(cliPodName, namespace, joinChannel)
                logger.debug(resp)
                re = resp.find("Error")
                if re != -1:
                    errMsg = "peer" + str(n) + " fail to join channel " + channelName + " on " + namespace
                    logger.error(errMsg)
                    sys.exit(1)
            updateChannel="peer channel update -o " + OrdererUrl + " -c " + channelName + " -f resources/channel-artifacts/" + peerOrgName + "MSPanchors.tx"
            resp = execCmdInPod(cliPodName, namespace, updateChannel)
            logger.debug(resp)
            re = resp.find("Error")
            if re != -1:
                errMsg = "Fail to update channel " + channelName + " on " + namespace
                logger.error(errMsg)
                sys.exit(1)
            channelCreateFlag = 1
        else:
            for n in range(peersNum):
                peerUrl = "peer" + str(n) + "." + namespace + ":7051"
                joinChannel="env CORE_PEER_ADDRESS=" + peerUrl + " " + "peer channel join -b resources/channel-artifacts/" + channelName + ".block"
                resp = execCmdInPod(cliPodName, namespace, joinChannel)
                logger.debug(resp)
                re = resp.find("Error")
                if re != -1:
                    errMsg = "peer" + str(n) + " fail to join " + channelName + " on " + namespace
                    logger.error(errMsg)
                    sys.exit(1)
            updateChannel="peer channel update -o " + OrdererUrl + " -c " + channelName + " -f resources/channel-artifacts/" + peerOrgName + "MSPanchors.tx"
            resp = execCmdInPod(cliPodName, namespace, updateChannel)
            logger.debug(resp)
            re = resp.find("Error")
            if re != -1:
                errMsg = "Failed to update channel " + channelName + " on " + namespace
                logger.error(errMsg)
                sys.exit(1)
    logger.info("Joining " + channelName + " and updating is over.")            
Exemplo n.º 22
0
queried_domains = set()
returned_ips = set()

for domain in domains:
    if 'spf:' + domain in queried_domains:
        continue

    logger.info(f"\t+ [{domain}]")

    # Query SPF record
    qr = dnsspf.query_spf(domain, queried_domains=queried_domains)
    spf = qr['spf']
    queried_domains = qr['queried_domains']

    if spf:
        logger.debug(f"\t\t+ SPF -> {spf}")

        # Parse returned SPF record
        qr = dnsspf.parse_spf(domain,
                              spf,
                              queried_domains=queried_domains,
                              returned_ips=returned_ips)
    else:
        # Whitelist hosts listed in MX records.
        qr = dnsspf.query_mx([domain],
                             queried_domains=queried_domains,
                             returned_ips=returned_ips)

    ips = qr['ips']
    queried_domains = qr['queried_domains']
    returned_ips = qr['returned_ips']
Exemplo n.º 23
0
import logging
import tools.logger

logger = logging.getLogger('mesh')
logger.debug('test')
Exemplo n.º 24
0
 def log(self, output):
     try:
         with open('mylog.txt', 'a') as f:
             f.write('%s%s' % (str(output), os.linesep))
     except:
         logger.debug('error: open log file error')
Exemplo n.º 25
0
def text2speech(text):
    """
     return: 返回 通过text 转换成 的mp3音频文件路径,如果转换失败,返回 None
    """
    # re.split(r'[;,\s]\s*', line)
    # 采用捕获分组,在分割的结果集合中包含匹配结果,如果不需要匹配结果可以用数据集:r'[.?!,;。?!,、;]'
    splitArr = re.split(r'(\.|\?|!|,|;|。|?|!|,|、|;)', text)
    # info splitArr[0]
    textArr = []
    subtext = ""
    for substr in splitArr:
        if len(subtext + substr) > 300 and len(subtext) != 0:
            textArr.append(parse.quote(subtext))
            subtext = substr
            continue
        elif len(subtext + substr) > 300 and len(subtext) == 0:
            subtext += substr
            textArr.append(parse.quote(subtext))
            subtext = ""
        else:
            subtext += substr
    textArr.append(parse.quote(subtext))

    access_token = login()

    if len(access_token) == 0:
        error("百度语音 API token 为空")
    else:
        ttsurl = "http://tsn.baidu.com/text2audio?lan=zh&tok=" + \
            access_token + "&ctp=1&cuid=aaaaaaaaaaaa&tex="
        # silenceAudio = AudioSegment.silent(duration=10000)
        song = None
        dir = os.getcwd() + "/ttsdata/"
        if os.path.isdir(dir) is False:
            os.mkdir(dir)
        dir = dir + "ttsdata" + \
            str(int(time.time() * 100000000000000)) + "/"
        if os.path.isdir(dir) is False:
            os.mkdir(dir)
        textfilepath = dir + str(int(time.time()))
        i = 0
        for sbtext in textArr:
            url = ttsurl + sbtext
            res = requests.get(url)
            if res.headers['content-type'] == 'audio/mp3':
                # res.content
                filepath = textfilepath + "_" + str(i) + ".mp3"
                mp3fileobj = open(filepath, 'wb')
                mp3fileobj.write(res.content)
                songtmp = AudioSegment.from_mp3(filepath)
                if song is not None:
                    db1 = song.dBFS
                    db2 = songtmp.dBFS
                    dbplus = db1 - db2
                    if dbplus < 0:
                        song += abs(dbplus)
                    elif dbplus > 0:
                        songtmp += abs(dbplus)
                    song = song + songtmp
                else:
                    song = songtmp
                mp3fileobj.close()
            else:
                error("文本<" + sbtext + ">转换音频失败,错误原因:" + res.text())
                return None
            debug("生成 MP3文件:第" + str(i) + "碎片:" + parse.unquote(sbtext))
            i += 1
        resultPath = dir + "/res_" + str(int(time.time())) + ".mp3"
        song.export(resultPath, format="mp3")
        info("音频文件生成成功")
        uploadPath = uploadspeech(resultPath)
        if uploadPath is not None:
            shutil.rmtree(dir)
    return uploadPath
Exemplo n.º 26
0
    '''
    ret = None
    m = method.lower()
    if m in ('get', 'post', 'put', 'delete'):
        req = getattr(requests, m, None)
    try:
        r = req(url=url, data=data, **kwargs)
        if r:
            ret = r.json()
    except requests.exceptions.Timeout, e:
        #sys.stderr.write(str(e))
        logger.debug(str(e))
        pass
    except requests.exceptions.TooManyRedirects , e:
        #sys.stderr.write(str(e))
        logger.debug(str(e))
        pass
    except requests.exceptions.RequestException , e:
        logger.debug(str(e))
        #sys.stderr.write(str(e))
        pass
    except Exception, e:
        logger.debug(str(e))
        #sys.stderr.write(str(e))
        pass
    return ret

REQ_TIMEOUT = 3
class ReportClient(object):
    '''
    client to communicate with server