Esempio n. 1
0
 def findProfile(self, **kwargs):
     r = requests.post(self.url + '/account-profile/find', data=json.dumps(kwargs), headers=self.headers)
     j = r.json()
     if r.status_code != 200:
         logger.error(j)
         return None
     return j
Esempio n. 2
0
 def getDevices(self, page, page_size):
     r = requests.get(self.url + '/account-dev/device_ids?page=%d&page_size=%d' % (page, page_size))
     d = r.json()
     if r.status_code != 200:
         logger.error(d)
         return ErrNo.INTERNAL_SERVER_ERROR
     return d['dids']
Esempio n. 3
0
def init(conn, data):
    try:
        dev = DeviceInfo()
        dev.ParseFromString(data)
    except Exception, e:
        logger.error(e)
        return None
Esempio n. 4
0
 def verifyMsg(self, app_key, verification_code, msg):
     payload = {'app_key': app_key, 'hash_code': verification_code, 'verify_msg': msg}
     r = requests.post(self.url + '/account-app/checkmsg', data=json.dumps(payload), headers=self.headers)
     if r.status_code != 200:
         logger.error(r.json())
         return ErrNo.UNAUTHORIZED
     return True
Esempio n. 5
0
 def pushObject(self, cmdID, msg, sendList):
     for target in sendList:
         try:
             conn = self.getConnectionByID(target)
             if conn:
                 conn.safeToWriteData(cmdID, msg)
         except Exception,e:
             logger.error(e)
Esempio n. 6
0
def push_ack(conn, data):
    try:
        ack = PushMessageAck()
        ack.ParseFromString(data)
        ids = json.loads(ack.ids)
    except Exception, e:
        logger.error(e)
        return None
Esempio n. 7
0
 def findApplication(self, user_id, app_key=None):
     payload = {'user_id': user_id, 'app_key': app_key}
     r = requests.post(self.url + '/account-app/find', data=json.dumps(payload), headers=self.headers)
     j = r.json()
     if r.status_code != 200:
         logger.error(j)
         return None
     return j['apps']
Esempio n. 8
0
 def callback(data, defer, page, page_size):
     if 'Error_code' in data:
         logger.error('get audience %s error(%d, %s)' % (audience,
                                                         data['Error_code'],
                                                         data['Error_msg']))
     else:
         dids = data['dids']
         self._sendto(dids, msg)
         if len(dids) == page_size:
             defer.addCallback(callback, defer, page + 1, page_size)
Esempio n. 9
0
def deleteApp(userID, app_key):
    try:
        with db_session:
            app = Application.get(app_key=app_key)
            if app and app.owner.id == userID:
                app.delete()
        return SuccessPage()
    except Exception, e:
        logger.error(e)
        return ErrorPage(ErrNo.INTERNAL_SERVER_ERROR)
Esempio n. 10
0
 def callback(data):
     if 'Error_code' in data:
         logger.error('register device error(%d: %s)' % (data['Error_code'], data['Error_msg']))
         return None
     dev.device_id = data['did']
     conn.device_id = dev.device_id
     ConnectionMapping[dev.device_id] = conn.transport.sessionno
     defer = remote.callRemote('login', remote.getName(), dev.device_id)
     defer.addCallback(lambda ret: dev.SerializeToString() if ret == RetNo.SUCCESS else None)
     return defer
Esempio n. 11
0
def createProfile(email, password):
    try:
        with db_session:
            user = Profile.get(email=email)
            if user:
                return ErrorPage(ErrNo.DUP_OPERATE)
            user = Profile(email=email, password=password)
            return SuccessPage(user.to_dict())
    except Exception, e:
        logger.error(e)
        return ErrorPage(ErrNo.INTERNAL_SERVER_ERROR)
Esempio n. 12
0
def subscribe(app_key, did):
    try:
        with db_session:
            app = Application.get(app_key=app_key)
            dev = Device.get(did=did)
            if not app or not dev:
                return ErrorPage(ErrNo.INVALID_PARAMETER)
            dev.apps.add(app)
            return SuccessPage()
    except Exception, e:
        logger.error(e)
        return ErrorPage(ErrNo.INTERNAL_SERVER_ERROR)
Esempio n. 13
0
def proccessEvent(data):
    """
    Proccess Event from QuakeWatcher
    :param data: quake entry
    :return: void
    """

    for rec in config.EMAIL_RECIPENTS:
        try:
            send_email(rec["email"], "template.html", "template.txt", "Earthquake Detected",
                       **{"data": data, "name": rec["name"]})
        except Exception as e:
            logger.error("Error sending mail to " + str(rec["email"]) + ". Stack: " + e.message)
Esempio n. 14
0
def register_dev(imei, platform, dev_type):
    string = imei + platform + dev_type
    did = uuid.uuid3(uuid.NAMESPACE_DNS, str(string)).hex
    logger.debug('register device info:')
    logger.debug('\timei: %s, platform: %s, dev_type: %s, did: %s' % (imei, platform, dev_type, did))
    try:
        with db_session:
            dev = Device.get(did=did)
            if not dev:
                mast_secret = uuid.uuid4().hex
                dev = Device(did=did, platform=platform, dev_type=dev_type, mast_secret=mast_secret)
            return SuccessPage(dev.to_dict())
    except Exception, e:
        logger.error(e)
        return ErrorPage(ErrNo.INTERNAL_SERVER_ERROR)
Esempio n. 15
0
def authorizeMessage(app_key, hash_code, verify_msg):
    try:
        with db_session:
            app = Application.get(app_key=app_key)
            if not app:
                return ErrorPage(ErrNo.UNAUTHORIZED)
            mast_secret = app.mast_secret
        mobj = hashlib.md5()
        verification_str = json.dumps(verify_msg) + mast_secret
        mobj.update(verification_str)
        code = mobj.hexdigest().upper()
        return SuccessPage() if code == hash_code else ErrorPage(ErrNo.UNAUTHORIZED)
    except Exception, e:
        logger.error(e)
        return ErrorPage(ErrNo.INTERNAL_SERVER_ERROR)
Esempio n. 16
0
def createApp(userID, appName):
    try:
        with db_session:
            app = Application.get(app_name=appName)
            if app:
                return ErrorPage(ErrNo.DUP_OPERATE)
            current_time = int(time.time())
            app_key = uuid.uuid3(uuid.NAMESPACE_DNS, str(appName)).hex
            mast_secret = uuid.uuid4().hex
            user = Profile[userID]
            app = Application(app_name=appName, app_key=app_key, mast_secret=mast_secret,
                              create_time=current_time, update_time=current_time, owner=user)
            return SuccessPage(app.to_dict())
    except Exception, e:
        logger.error(e)
        return ErrorPage(ErrNo.INTERNAL_SERVER_ERROR)
Esempio n. 17
0
 def _parse_response(response: str) -> List[Listing]:
     try:
         # NOTE: we are getting some chars we cannot parse with xmltodict, so getting rid of them
         listings_cleaned = re.sub(r"\\r\\n|\\", '', response[1:-1])
         listings = xmltodict.parse(
             listings_cleaned)['Container']['Results']['Offer']
     except (ExpatError, TypeError) as e:
         logger.error(e)
         raise ValueError
     else:
         parsed_offers = []
         if listings:
             for listing in listings:
                 try:
                     parsed_offers.append(
                         Listing(
                             **{
                                 k.strip('@').lower():
                                 urllib.parse.unquote_plus(v)
                                 for k, v in listing.items()
                             }))
                 except TypeError as e:
                     logger.error(f"Missing field or wrong name. {e}")
         return parsed_offers
Esempio n. 18
0
    def receive(self, text=None, bytes=None, **kwargs):
        try:
            if text:
                data = json.loads(text)
                if data[0] in ['stdin', 'stdout']:
                    self.queue().publish(self.message.reply_channel.name,
                                         json.loads(text)[1])
                elif data[0] == u'set_size':
                    self.queue().publish(self.message.reply_channel.name, text)
#                 else:
#                     self.message.reply_channel.send({"text":json.dumps(['stdout','\033[1;3;31mUnknow command found!\033[0m'])},immediately=True)
            elif bytes:
                self.queue().publish(self.message.reply_channel.name,
                                     json.loads(bytes)[1])
        except socket.error:
            self.closessh()
            self.close()
        except Exception, e:
            import traceback
            print traceback.print_exc()
            logger.error(msg="webssh receive failed: {ex}".format(
                ex=str(traceback.print_exc())))
            self.closessh()
            self.close()
Esempio n. 19
0
 def parse_sql(self, request):
     if not self.__check_user_perms(
             request, 'databases.databases_binlog_database_server_config'):
         return "您没有权限操作此项"
     sqlList = []
     try:
         dbServer = self.__get_db(request)
         timeRange = request.POST.get('binlog_time').split(' - ')
         conn_setting = {
             'host': dbServer.db_assets.server_assets.ip,
             'port': dbServer.db_port,
             'user': dbServer.db_user,
             'passwd': dbServer.db_passwd,
             'charset': 'utf8'
         }
         binlog2sql = Binlog2sql(
             connection_settings=conn_setting,
             back_interval=1.0,
             only_schemas=dbServer.db_name,
             end_file='',
             end_pos=0,
             start_pos=4,
             flashback=True,
             only_tables='',
             no_pk=False,
             only_dml=True,
             stop_never=False,
             sql_type=['INSERT', 'UPDATE', 'DELETE'],
             start_file=request.POST.get('binlog_db_file'),
             start_time=timeRange[0],
             stop_time=timeRange[1],
         )
         sqlList = binlog2sql.process_binlog()
     except Exception as ex:
         logger.error(msg="binglog解析失败: {ex}".format(ex=ex))
     return sqlList
Esempio n. 20
0
    def query_db_tree(self, request=None):

        user_database_list = []
        if request.user.is_superuser:
            dbList = DataBase_Server_Config.objects.all()
        else:
            dbList = DataBase_Server_Config.objects.filter(id__in=[
                ds.db
                for ds in Database_User.objects.filter(user=request.user.id)
            ])
        for ds in dbList:
            if ds.db_rw not in ["read", "r/w"]: continue
            try:
                data = self.convert_to_dict(ds)
                try:
                    data["ip"] = ds.db_assets.server_assets.ip
                except Exception as ex:
                    data["ip"] = "未知"
                env_data = ds.get_types()
                data["id"] = data["id"] + 100000
                data["type"] = "database"
                data["text"] = "{env} - {name} - {type} - {mark}".format(
                    env=env_data[data["db_env"]],
                    type=data["db_type"],
                    name=data["db_name"],
                    mark=data["db_mark"])
                data["icon"] = "fa fa-database"
                data["opened"] = "false"
                data["children"] = []
                data.pop("db_assets_id")
                data.pop("db_passwd")
                data.pop("db_user")
                user_database_list.append(data)
            except Exception as ex:
                logger.error(msg="查询数据库失败: {ex}".format(ex=str(ex)))
        return user_database_list
Esempio n. 21
0
def parsing_work(crawler, rumor_info):
    try:
        fetched = False
        if not args.update:
            for rumor in RumorModel.source_create_date_index.query(
                    crawler.source,
                    RumorModel.create_date == rumor_info["date"],
                    RumorModel.link == rumor_info["link"]):
                fetched = True

        if not fetched:
            rumor_content = crawler.parse_rumor_content(rumor_info)
            rumor_item = RumorModel(
                id=rumor_content['id'],
                clarification=rumor_content['clarification'],
                create_date=rumor_content['create_date'],
                title=rumor_content['title'],
                original_title=rumor_content['original_title'],
                rumors=rumor_content['rumors'],
                preface=rumor_content['preface'],
                tags=rumor_content['tags'],
                image_link=rumor_content['image_link'],
                link=rumor_content['link'],
                source=rumor_content['source'])
            logger.info(
                "Add rumor_item with id {}, link {} to rumor ddb table.".
                format(rumor_item.id, rumor_item.link))
            rumor_item.save()
            return (_NEW_RUMOR, rumor_info)
        else:
            return (_OLD_RUMOR, rumor_info)

    except Exception:
        msg = traceback.format_exc()
        logger.error(f"Error: {msg}")
        return (_FAILED, rumor_info)
Esempio n. 22
0
class Inception():
    def __init__(self,host=None,name=None,user=None,passwd=None,port=None):
        self.db_host = host
        self.db_name = name
        self.db_user = user
        self.db_passwd = passwd
        self.db_port = port

    def run(self,action,auditSql):
        try:
            incept = Inception_Server_Config.objects.get(id=1)
        except Exception, ex:
            return {"status":'error',"errinfo":str(ex)}
        sql='''/*--user={db_user};--password={db_passwd};--host={db_host};{action}--port={db_port};*/\
            inception_magic_start;
            use {db_name};
            {auditSql}
            inception_magic_commit;'''.format(
                                              db_user=self.db_user,db_host=self.db_host,
                                              db_passwd = self.db_passwd,db_port = self.db_port,
                                              db_name=self.db_name,
                                              action=action,auditSql=auditSql
                                              )
        try:
            conn = MySQLdb.connect(host=incept.db_host,user='',passwd='',db='',port=int(incept.db_port))
            cur = conn.cursor()
            ret = cur.execute(sql)
            result = cur.fetchall()
            dataList = []
            for row in result:
                data = dict()
                data['stage'] = row[1]
                data['errlevel'] = row[2]
                data['stagestatus'] = row[3]
                data['errmsg'] = row[4]
                data['sql'] = row[5]
                data['affected_rows'] = row[6]
                data['sequence'] = row[7]
                data['backup_dbname'] = row[8]
                data['execute_time'] = row[9]
                data['sqlsha1'] = row[10]
                dataList.append(data)
            cur.close()
            conn.close()
            return {"status":'success','data':dataList}
        except MySQLdb.Error,e:
            logger.error(msg="Mysql Error %d: %s" % (e.args[0], e.args[1]))
            return {"status":'error',"errinfo":"Mysql Error %d: %s" % (e.args[0], e.args[1])}        
Esempio n. 23
0
def update_domains(update_all=False):
    logger.debug('start to update domains')
    try:
        update_butian_src_domains(update_all)
    except RequestException:
        logger.error('update failed with bad network, please retry')
    except JSONDecodeError:
        logger.error('update failed with json decode error, please retry')
    except Exception:
        logger.error('unexpect error occured, please retry')
    logger.debug('finish update domains')
Esempio n. 24
0
def generate_and_send_payments(payment_message_id: str = None):
    PASS_CULTURE_IBAN = os.environ.get('PASS_CULTURE_IBAN', None)
    PASS_CULTURE_BIC = os.environ.get('PASS_CULTURE_BIC', None)
    PASS_CULTURE_REMITTANCE_CODE = os.environ.get(
        'PASS_CULTURE_REMITTANCE_CODE', None)

    TRANSACTIONS_RECIPIENTS = parse_email_addresses(
        os.environ.get('TRANSACTIONS_RECIPIENTS', None))
    PAYMENTS_REPORT_RECIPIENTS = parse_email_addresses(
        os.environ.get('PAYMENTS_REPORT_RECIPIENTS', None))
    PAYMENTS_DETAILS_RECIPIENTS = parse_email_addresses(
        os.environ.get('PAYMENTS_DETAILS_RECIPIENTS', None))
    WALLET_BALANCES_RECIPIENTS = parse_email_addresses(
        os.environ.get('WALLET_BALANCES_RECIPIENTS', None))

    not_processable_payments, payments_to_send = generate_or_collect_payments(
        payment_message_id)

    try:
        logger.info('[BATCH][PAYMENTS] STEP 3 : send transactions')
        send_transactions(payments_to_send, PASS_CULTURE_IBAN,
                          PASS_CULTURE_BIC, PASS_CULTURE_REMITTANCE_CODE,
                          TRANSACTIONS_RECIPIENTS)
    except Exception as e:
        logger.error('[BATCH][PAYMENTS] STEP 3', e)

    try:
        logger.info('[BATCH][PAYMENTS] STEP 4 : send payments report')
        send_payments_report(payments_to_send + not_processable_payments,
                             PAYMENTS_REPORT_RECIPIENTS)
    except Exception as e:
        logger.error('[BATCH][PAYMENTS] STEP 4', e)

    try:
        logger.info('[BATCH][PAYMENTS] STEP 5 : send payments details')
        send_payments_details(payments_to_send, PAYMENTS_DETAILS_RECIPIENTS)
    except Exception as e:
        logger.error('[BATCH][PAYMENTS] STEP 5', e)

    try:
        logger.info('[BATCH][PAYMENTS] STEP 6 : send wallet balances')
        send_wallet_balances(WALLET_BALANCES_RECIPIENTS)
    except Exception as e:
        logger.error('[BATCH][PAYMENTS] STEP 6', e)
Esempio n. 25
0
def test(arg):
    print(arg)
    from myapp.models import User
    from utils.logger import logger

    logger.error(arg)
    try:
        user = User.query.get(1)
    except AttributeError as e:
        logger.error(str(e), exc_info=True)
    user.first_name = arg
    db_session.commit()
    logger.error("Lastname fixed")
Esempio n. 26
0
def err(resp_dict):
    """ Error message handling with additional logging
    :return: Error response
    """
    logger.error('API related error being sent to client')
    logger.error('HTTP Status: %s' % str(resp_dict['code']))
    logger.error('Message Returned: %s' % resp_dict['message'])
    payload = {
        'error': {
            'code': resp_dict['code'],
            'message': resp_dict['message']
        }
    }
    return handle_response(payload, resp_dict['code'])
Esempio n. 27
0
def word2url(search_word, max_page, session, output_dir):
    """
        根据关键词搜索文库内容,获取文件url和对应的文件类型
    :param search_word: 待搜索的关键词
    :param max_page: 最大搜索页面数量,建议不宜过大,会导致搜索结果不准确
    :param session:
    :param output_dir: url输出的文件夹
    :return: 文件的输出位置
    """
    search_word_encode = str(search_word.encode('GB2312'))[2:].replace('\\x', '%').upper()[:-1]  # 编码search word
    basic_url = 'https://wenku.baidu.com/search?word={}&org=0&fd=0&lm=0&od=0&pn='.format(search_word_encode)
    logger.debug('basic url is {}'.format(basic_url))

    def has_title_parent_dd(tag):
        return tag.parent.name == 'dt'

    file_content = []  # 存储文库文件url和文件类型

    for page_num in range(max_page):  # 获取url和type
        url = basic_url + str(page_num * 10)
        try:
            r = session.get(url)
            html = r.text.encode(r.encoding).decode(encoding='gbk')
            soup = BeautifulSoup(html)
            content_list = soup.find_all(has_title_parent_dd, attrs={'class': 'fl'})
            for item in content_list:
                file_type = item.span['title']
                file_url = item.a['href']
                if file_type == 'doc' or file_type == 'txt':
                    file_content.append((file_type, file_url))
            logger.debug('finish process {}'.format(url))
        except Exception as e:
            logger.error('获取url出错!出错位置:')
            logger.error('url:{}'.format(url))
            logger.error(e)

    # url去重
    file_content = [{'type': x[0], 'url': x[1]} for x in set(file_content)]
    output_path = os.path.join(output_dir, '{}.json'.format(search_word))  # 以关键词的名称命名输出文件
    with open(output_path, 'w', encoding='utf-8') as f:
        f.write(json.dumps(file_content, ensure_ascii=False))
    logger.debug('finish word to url!!!')
    return output_path
Esempio n. 28
0
    def __init__(self):
        requests_times = global_config.getRaw('config', 'requests_times')
        self.cookie = global_config.getRaw('config', 'Cookie')
        self.ua = global_config.getRaw('config', 'user-agent')

        self.ua_engine = Factory.create()
        if self.ua is None:
            logger.error('user agent 暂时不支持为空')
            sys.exit()

        self.cookie_pool = global_config.getRaw('config', 'use_cookie_pool')
        self.cookie_pool = True if self.cookie_pool == 'True' else False
        if self.cookie_pool is True:
            logger.info('使用cookie池')
            if not os.path.exists('cookies.txt'):
                logger.error('cookies.txt文件不存在')
                sys.exit()
        try:
            self.stop_times = self.parse_stop_time(requests_times)
        except:
            logger.error('配置文件requests_times解析错误,检查输入(必须英文标点)')
            sys.exit()
        self.global_time = 0
        pass
Esempio n. 29
0
def deploy_inventory_groups(request, id,format=None):  

    try:
        inventory = Deploy_Inventory.objects.get(id=id)
    except Deploy_Inventory.DoesNotExist:
        return Response(status=status.HTTP_404_NOT_FOUND) 
    
    if request.method == 'GET':        
        dataList = []
        for ds in inventory.inventory_group.all():
            dataList.append({"name":ds.group_name,"id":ds.id})
        return JsonResponse({"code":200,"msg":"success","data":dataList})  
       
    elif request.method == "POST":     
        if not  request.user.has_perm('deploy.deploy_add_deploy_inventory'):
            return Response(status=status.HTTP_403_FORBIDDEN)            
        try:
            ext_vars = json.dumps(eval(request.POST.get('ext_vars')))
        except Exception as ex:
            ext_vars = None
            logger.error(msg="添加资产组,转化外部变量失败: {ex}".format(ex=str(ex)))
        try:
            inventoryGroups = Deploy_Inventory_Groups.objects.create(inventory=inventory,
                                             group_name=request.POST.get('group_name'),
                                             ext_vars=ext_vars)
        except Exception as ex:
            logger.error(msg="添加资产组失败: {ex}".format(ex=str(ex)))
            return JsonResponse({'msg':"添加资产组失败: {ex}".format(ex=ex),"code":500,'data':[]})   
        try:
            for aid in request.POST.get('server_list').split(','):
                Deploy_Inventory_Groups_Server.objects.create(groups=inventoryGroups,server=aid)
        except Exception as ex:
            inventoryGroups.delete()
            logger.error(msg="添加资产组成员失败: {ex}".format(ex=str(ex)))
            return JsonResponse({'msg':"添加资产组成员失败: {ex}".format(ex=ex),"code":500,'data':[]})    
        return JsonResponse({'msg':"添加成功","code":200,'data':[]}) 
Esempio n. 30
0
def process_start(scripts_cls, name='', process_num=None, help=True, code_key=None):
    """
    从配置中读取JD_COOKIES,开启多进程执行func。
    :param code_key:
    :param help:
    :param scripts_cls: 脚本类
    :param process_num: 进程数量
    :param name: 活动名称
    :return:
    """
    multiprocessing.freeze_support()
    process_count = multiprocessing.cpu_count()

    if process_count < PROCESS_NUM:
        process_count = PROCESS_NUM

    if process_count > len(JD_COOKIES):
        process_count = len(JD_COOKIES)

    if process_num:
        process_count = process_num

    if process_count < 1:
        println('未配置jd_cookie, 脚本无法运行, 请在conf/config.yaml中配置jd_cookie!')
        return

    pool = multiprocessing.Pool(process_count)  # 进程池
    process_list = []  # 进程列表

    println("开始执行{}, 共{}个账号, 启动{}个进程!\n".format(name, len(JD_COOKIES), process_count), style='bold green')

    kwargs_list = []

    for i in range(len(JD_COOKIES)):
        jd_cookie = JD_COOKIES[i]

        account = jd_cookie.pop('remark')
        if not account:
            account = unquote(jd_cookie['pt_pin'])

        if jd_cookie.get('ws_key'):  # 使用ws_key
            jd_cookie['pt_key'] = ws_key_to_pt_key(jd_cookie.get('pt_pin'), jd_cookie.get('ws_key'))
            if not jd_cookie['pt_key']:
                println('{}.账号:{}, ws_key已过期, 无法执行'.format(i+1, account, name))
                continue
        else:
            ok = sync_check_cookie(jd_cookie)
            if not ok:  # 检查cookies状态, 这里不通知, 有定时任务会通知cookies过期!
                println('{}.账号:{}, cookie已过期, 无法执行:{}!'.format(i+1, account, name))
                continue
        kwargs = {
            'name': name,
            'sort': i,   # 排序, 影响助力码顺序
            'account': account
        }
        kwargs.update(jd_cookie)
        kwargs_list.append(kwargs)
        process = pool.apply_async(start, args=(scripts_cls, ), kwds=kwargs)
        process_list.append(process)

    pool.close()
    pool.join()  # 等待进程结束

    notify_message = ''   # 消息通知内容

    for process in process_list:   # 获取通知
        try:
            message = process.get()
        except Exception as e:
            logger.error(e.args)
            continue
        if not message:
            continue
        notify_message += message + '\n'

    if code_key:
        timeout = random.random() * 10
        println('正在提交助力码, 随机等待{}秒!'.format(timeout))
        time.sleep(timeout)
        if type(code_key) == list:
            for key in code_key:
                post_code_list(key)
                time.sleep(random.random())
        else:
            post_code_list(code_key)

    if hasattr(scripts_cls, 'run_help') and help:
        pool = multiprocessing.Pool(process_count)  # 进程池
        for kwargs in kwargs_list:
            pool.apply_async(start_help, args=(scripts_cls,), kwds=kwargs)

        pool.close()
        pool.join()  # 等待进程结束

    if notify_message != '':
        title = '\n======📣{}📣======\n'.format(name)
        notify(title, notify_message)

    println('\n所有账号均执行完{}, 退出程序\n'.format(name))
Esempio n. 31
0
    def start_mining(self, mempool: Set[Transaction], chain: Chain,
                     wallet: Wallet):
        if not self.is_mining():
            if is_my_turn(wallet):
                if len(mempool) > consts.MINING_TRANSACTION_THRESHOLD or (
                        len(mempool) > 0 and abs(
                            get_time_difference_from_now_secs(
                                chain.header_list[-1].timestamp)) >
                        consts.MINING_INTERVAL_THRESHOLD):
                    vjti_chain_relayer = VJTIChainRelayer(wallet)
                    if not vjti_chain_relayer.chain_is_trusted(chain):
                        logger.error("Miner: Chain is not trusted")
                        return
                    logger.debug("Miner: Chain is trusted")
                    local_utxo = copy.deepcopy(chain.utxo)

                    manager = Manager()
                    mempool_list = manager.list()

                    def add_contract_tx_to_mempool(transaction) -> bool:
                        if transaction in mempool_list:
                            logger.debug(
                                f"Tx {transaction} already exists in mempool")
                            return True
                        else:
                            ok, error_msg = self.remove_utxo_of_tx(
                                transaction, local_utxo)
                            if ok:
                                mempool_list.append(transaction)
                                logger.info(
                                    f"Added tx {transaction} to mempool")
                                return True
                            else:
                                logger.error(
                                    f"Not adding contract tx {transaction} to mempool: {error_msg}"
                                )
                                return False

                    interface = BlockchainVMInterface(
                        add_contract_tx_to_mempool)
                    for tx in [x for x in mempool]:
                        ok, error_msg = self.remove_utxo_of_tx(tx, local_utxo)
                        if not ok:
                            logger.error(
                                f"Removing tx {tx} from mempool: {error_msg}")
                            mempool.remove(tx)
                            continue

                        if tx.contract_code != "":
                            contract_address = tx.get_contract_address()
                            if not is_valid_contract_address(contract_address):
                                logger.error(
                                    f"Removed tx {tx} from mempool: tx receiver address is invalid contract address"
                                )
                                mempool.remove(tx)
                            else:
                                try:
                                    output = interface.run_contract_code(
                                        tx.contract_code, tx.contract_priv_key)
                                    logger.debug(
                                        f"Output of contract {contract_address}: {output}"
                                    )
                                    for txn in mempool:
                                        if txn.get_contract_address(
                                        ) == contract_address:
                                            txn.contract_output = output
                                            break
                                except Exception as e:
                                    logger.error(
                                        f"Error while running code of contact: {contract_address}: {e}"
                                    )
                                    logger.error(
                                        f"Removed tx {tx} from mempool: Error while running contract code"
                                    )
                                    mempool.remove(tx)

                    mempool = mempool.union(mempool_list)
                    self.p = Process(target=self.__mine,
                                     args=(mempool, chain, wallet))
                    self.p.start()
                    logger.debug("Miner: Started mining")
def update_butian_src_domains(update_all=False):
    domain_dict = loads_butian_domains()
    domain_first_content = requests.post(domain_info_url, {
        's': 1,
        'p': 1
    },
                                         timeout=DEFAULT_TIMEOUT)
    page_count = json.loads(
        domain_first_content.content.decode())['data']['count']

    logger.debug('start to get {} company pages'.format(page_count))

    pbar = ColorTqdm(total=page_count)

    company_id_list = []
    finish_get_page = False
    for i in range(page_count):
        current_page_num = i + 1
        domain_content = requests.post(domain_info_url, {
            's': 1,
            'p': current_page_num
        },
                                       timeout=DEFAULT_TIMEOUT)
        json_content = json.loads(domain_content.content.decode())
        company_list = json_content['data']['list']

        #update progress
        pbar.update(1)

        for company_item in company_list:
            company_id = company_item['company_id']

            #when update all, all company id in list
            #when not, only new company id add to list
            if not update_all and company_id in domain_dict:
                finish_get_page = True
                logger.debug('company_id found in dict, skip after')
                break
            else:
                company_id_list.append(company_id)
        if finish_get_page:
            break
    pbar.close()

    result_list = []
    #need get detail to add to domains dict
    need_get_list = []

    #only add by new company id
    if not update_all:
        for company_id in domain_dict:
            result_list.append(domain_dict[company_id])
        for company_id in company_id_list:
            if company_id not in domain_dict:
                need_get_list.append(company_id)
    #update all by all company id
    else:
        for company_id in company_id_list:
            if company_id in domain_dict:
                result_list.append(domain_dict[company_id])
            else:
                need_get_list.append(company_id)

    detail_request_list = []
    logger.debug('start to request new {} domains detail'.format(
        len(need_get_list)))
    try:
        for id in need_get_list:
            request_config = {}
            request_config['method'] = 'get'
            request_config['url'] = domain_detail_url + id
            detail_request_list.append(request_config)
        butian_company_detail_grab = ButianCompanyDetailGrab(
            detail_request_list)
        butian_company_detail_grab.set_cookie(BUTIAN_SRC_COOKIES)
        butian_company_detail_grab.event_loop()
        company_detail_list = butian_company_detail_grab.results
    except Exception:
        logger.error(
            'request domain details faild in butian src, may cookies invalid')
    else:
        logger.info('add {} new domains from butian src'.format(
            len(company_detail_list)))
    result_list.extend(company_detail_list)
    with open(domains_path, 'w+') as f:
        for detail in result_list:
            f.write(json.dumps(detail, ensure_ascii=False) + '\n')
    logger.debug('save {} domains to butian_src_domains.txt'.format(
        len(result_list)))
Esempio n. 33
0
 def callNode(self, *args, **kwargs):
     node = self.loadBalance()
     if not node:
         logger.error("node doesn't exists")
         return
     return node.callbackNode(*args, **kwargs)
Esempio n. 34
0
 def callNodeByID(self, nodeID, *args, **kwargs):
     node = self._nodes.get(nodeID, None)
     if not node:
         logger.error("nodeID %s doesn't exists" % nodeID)
         return
     return node.callbackNode(*args, **kwargs)
Esempio n. 35
0
        if (0 == status):
            log_url = get_log_url(rst_json)
            print("log url:%s" % log_url)
            job_url = get_job_url(log_url)
            print("job url:%s" % job_url)
            job_log_url = job_url + "\n" + log_url
            #job_status_update.job_update(_job_key, "acceptted", log_url, "")
            job_status_update.job_update(_job_key, "acceptted", job_log_url,
                                         "")
            #job_space = helpers.job_space(_job_key)
            #log_cmd = "arena logs -f " + rst_json.get("job_name") + " >> " + job_space + "/weiflow-from-weiclient.log"
            #print("logs cmd:%s" % log_cmd)
            #status, output = commands.getstatusoutput(log_cmd)
            #job_url_update.job_url_update(_job_key)
            #job_status_update.job_status_update(_job_key = _job_key, _status = "finished")
        else:
            job_status_update.job_status_update(_job_key=_job_key,
                                                _status="failed")
            print(output)
    except Exception, e:
        logger.warn(e)
        job_status_update.job_status_update(_job_key=_job_key,
                                            _status="failed")


if __name__ == '__main__':
    if (len(sys.argv) != 2):
        logger.error("parameters error found when %s. " % __file__)
    job_key = sys.argv[1]
    print job_submit(job_key)
Esempio n. 36
0
def extract_clarification_and_rumor(content_soup):
    try:
        # clarification & rumor
        obj_list = content_soup.find(class_="field-type-text-with-summary")
        allP = obj_list.find_all(['p', 'h2', 'strong'])

        rumor_list = []
        clarification_list = []
        startToCrawlRumor = False
        startToCrawlClarification = False
        for p in allP:
            content = remove_space(p.text)
            if p.name == 'strong':
                if content == '背景':
                    startToCrawlRumor = True
                    startToCrawlClarification = False
                elif content == '查核':
                    startToCrawlRumor = False
                    startToCrawlClarification = True
                elif content == '結論':
                    break
            elif p.name == 'h2':
                if content == '背景':
                    startToCrawlRumor = True
                    startToCrawlClarification = False
                elif content == '查核':
                    startToCrawlRumor = False
                    startToCrawlClarification = True
                elif content == '結論':
                    break
            else:
                if content == '背景':
                    startToCrawlRumor = True
                    startToCrawlClarification = False

                if content == '結論':
                    continue

                if content == '參考資料':
                    break

                if content == '資料來源':
                    break

                if content == '補充資料':
                    break
                TAG_RE = re.compile(r'^圖.*:')
                if TAG_RE.search(content):
                    continue

                TAG_RE = re.compile(r'^表.*:')
                if TAG_RE.search(content):
                    continue

                if startToCrawlRumor:
                    rumor_list.append(content)

                if startToCrawlClarification:
                    clarification_list.append(content)

        if len(rumor_list) > 0:
            first_found = re.findall(
                ".+指出:|.+訊息:|.+指稱:|.+傳言:|.+宣稱:|.+流傳:|.+如下:", rumor_list[0])
            if len(first_found) > 0:
                rumor_list[0] = rumor_list[0].replace(first_found[0], "")

        clarification = "".join(clarification_list)
        rumor = "".join(rumor_list)
        return clarification, rumor

    except Exception:
        msg = traceback.format_exc()
        logger.error(f"Error: {msg}")
        return None, None
Esempio n. 37
0
 def __init__(self, data: str = None):
     self.data = data
     logger.error(self.__str__())
Esempio n. 38
0
 def dropConnectionByID(self, connID):
     try:
         del self.connects[connID]
     except Exception, e:
         logger.error(e)
Esempio n. 39
0
        docker = Docker(new_image)
        docker.remove_container()
        docker.remove()

        util_rm_file_or_folder(DOCGEN_CLIENT_PATH, verbose=True)
        util_rm_file_or_folder(SITE_PACKAGES, verbose=True)
        print("Docgen is been uninstalled")
        return
    except OSError:
        print(
            "To uninstall docgen completely, Try the same command with sudo (root permissions)."
        )


if __name__ == "__main__":
    option = sys.argv[1].strip(',')
    if option == "install":
        install_docgen()
    elif option == "uninstall":
        uninstall_docgen()
    elif option == "log":
        cmd_exec = subprocess.Popen("docgen",
                                    shell=True,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE)
        out, err = cmd_exec.communicate()
        if err:
            print("Please check logs at %s" % LOG_PATH)
            logger.error(err)
    def run(self):
        """
        Run
        :return:
        """
        start_time = time.time()
        subs = self.load_sub_domain_dict()
        self.remainder = len(subs)
        logger.debug('Sub domain dict count: {c}'.format(c=len(subs)))
        logger.debug('Generate coroutines...')
        # Verify that all DNS server results are consistent
        stable_dns = []
        wildcard_ips = None
        last_dns = []
        only_similarity = False
        for dns in self.dns_servers:
            delay = self.check(dns)
            if not delay:
                logger.warning("@{dns} is not available, skip this DNS server".format(dns=dns))
                continue
            self.resolver = aiodns.DNSResolver(loop=self.loop, nameservers=[dns], timeout=self.resolve_timeout)
            job = self.query(self.wildcard_sub)
            sub, ret = self.loop.run_until_complete(job)
            logger.debug('@{dns} {sub} {ips}'.format(dns=dns, sub=sub, ips=ret))
            if ret is None:
                ret = None
            else:
                ret = sorted(ret)

            if dns in self.stable_dns_servers:
                wildcard_ips = ret
            stable_dns.append(ret)

            if ret:
                equal = [False for r in ret if r not in last_dns]
                if len(last_dns) != 0 and False in equal:
                    only_similarity = self.is_wildcard_domain = True
                    logger.debug('Is a random resolve subdomain.')
                    break
                else:
                    last_dns = ret

        is_all_stable_dns = stable_dns.count(stable_dns[0]) == len(stable_dns)
        if not is_all_stable_dns:
            logger.debug('Is all stable dns: NO, use the default dns server')
            self.resolver = aiodns.DNSResolver(loop=self.loop, nameservers=self.stable_dns_servers, timeout=self.resolve_timeout)
        # Wildcard domain
        is_wildcard_domain = not (stable_dns.count(None) == len(stable_dns))
        if is_wildcard_domain or self.is_wildcard_domain:
            if not self.skip_rsc:
                logger.debug('This is a wildcard domain, will enumeration subdomains use by DNS+RSC.')
            else:
                logger.debug('This is a wildcard domain, but it is --skip-rsc mode now, it will be drop all random resolve subdomains in results')
            self.is_wildcard_domain = True
            if wildcard_ips is not None:
                self.wildcard_ips = wildcard_ips
            else:
                self.wildcard_ips = stable_dns[0]
            logger.debug('Wildcard IPS: {ips}'.format(ips=self.wildcard_ips))
            if not self.skip_rsc:
                try:
                    self.wildcard_html = requests.get('http://{w_sub}.{domain}'.format(w_sub=self.wildcard_sub, domain=self.domain), headers=self.request_headers, timeout=10, verify=False).text
                    self.wildcard_html = self.data_clean(self.wildcard_html)
                    self.wildcard_html_len = len(self.wildcard_html)
                    self.wildcard_html3 = requests.get('http://{w_sub}.{domain}'.format(w_sub=self.wildcard_sub3, domain=self.domain), headers=self.request_headers, timeout=10, verify=False).text
                    self.wildcard_html3 = self.data_clean(self.wildcard_html3)
                    self.wildcard_html3_len = len(self.wildcard_html3)
                    logger.debug('Wildcard domain response html length: {len} 3length: {len2}'.format(len=self.wildcard_html_len, len2=self.wildcard_html3_len))
                except requests.exceptions.SSLError:
                    logger.warning('SSL Certificate Error!')
                except requests.exceptions.ConnectTimeout:
                    logger.warning('Request response content failed, check network please!')
                except requests.exceptions.ReadTimeout:
                    self.wildcard_html = self.wildcard_html3 = ''
                    self.wildcard_html_len = self.wildcard_html3_len = 0
                    logger.warning('Request response content timeout, {w_sub}.{domain} and {w_sub3}.{domain} maybe not a http service, content will be set to blank!'.format(w_sub=self.wildcard_sub,
                                                                                                                                                                             domain=self.domain,
                                                                                                                                                                             w_sub3=self.wildcard_sub3))
                except requests.exceptions.ConnectionError:
                    logger.error('ESD can\'t get the response text so the rsc will be skipped. ')
                    self.skip_rsc = True
        else:
            logger.debug('Not a wildcard domain')

        if not only_similarity:
            self.coroutine_count = self.coroutine_count_dns
            tasks = (self.query(sub) for sub in subs)
            self.loop.run_until_complete(self.start(tasks, len(subs)))
            logger.debug("Brute Force subdomain count: {total}".format(total=self.count))
        dns_time = time.time()
        time_consume_dns = int(dns_time - start_time)

        # DNSPod JSONP API
        logger.debug('Collect DNSPod JSONP API\'s subdomains...')
        dnspod_domains = self.dnspod()
        logger.debug('DNSPod JSONP API Count: {c}'.format(c=len(dnspod_domains)))

        # CA subdomain info
        ca_subdomains = []
        logger.debug('Collect subdomains in CA...')
        ca_subdomains = CAInfo(self.domain).get_subdomains()
        if len(ca_subdomains):
            tasks = (self.query(sub) for sub in ca_subdomains)
            self.loop.run_until_complete(self.start(tasks, len(ca_subdomains)))
        logger.debug('CA subdomain count: {c}'.format(c=len(ca_subdomains)))

        # DNS Transfer Vulnerability
        transfer_info = []
        logger.debug('Check DNS Transfer Vulnerability in {domain}'.format(domain=self.domain))
        transfer_info = DNSTransfer(self.domain).transfer_info()
        if len(transfer_info):
            logger.info('DNS Transfer Vulnerability found in {domain}!'.format(domain=self.domain))
            tasks = (self.query(sub) for sub in transfer_info)
            self.loop.run_until_complete(self.start(tasks, len(transfer_info)))
        logger.info('DNS Transfer subdomain count: {c}'.format(c=len(transfer_info)))

        # Use search engines to enumerate subdomains (support Baidu,Bing,Google,Yahoo)
        subdomains = []
        if self.engines:
            logger.debug('Enumerating subdomains with search engine')
            subdomains_queue = multiprocessing.Manager().list()
            enums = [enum(self.domain, q=subdomains_queue, verbose=False, proxy=self.proxy) for enum in self.engines]
            for enum in enums:
                enum.start()
            for enum in enums:
                enum.join()
            subdomains = set(subdomains_queue)
            if len(subdomains):
                tasks = (self.query(sub) for sub in subdomains)
                self.loop.run_until_complete(self.start(tasks, len(subdomains)))
            logger.info('Search engines subdomain count: {subdomains_count}'.format(subdomains_count=len(subdomains)))

        if self.is_wildcard_domain and not self.skip_rsc:
            # Response similarity comparison
            total_subs = set(subs + dnspod_domains + list(subdomains) + transfer_info + ca_subdomains)
            self.wildcard_subs = list(set(subs).union(total_subs))
            logger.info('Enumerates {len} sub domains by DNS mode in {tcd}.'.format(len=len(self.data), tcd=str(datetime.timedelta(seconds=time_consume_dns))))
            logger.debug('Will continue to test the distinct({len_subs}-{len_exist})={len_remain} domains used by RSC, the speed will be affected.'.format(len_subs=len(subs), len_exist=len(self.data),
                                                                                                                                                          len_remain=len(self.wildcard_subs)))
            self.coroutine_count = self.coroutine_count_request
            self.remainder = len(self.wildcard_subs)
            tasks = (self.similarity(sub) for sub in self.wildcard_subs)
            self.loop.run_until_complete(self.start(tasks, len(self.wildcard_subs)))

            # Distinct last domains use RSC
            # Maybe misinformation
            # self.distinct()

            time_consume_request = int(time.time() - dns_time)
            logger.debug('Requests time consume {tcr}'.format(tcr=str(datetime.timedelta(seconds=time_consume_request))))
        # RS(redirect/response) domains
        while len(self.domains_rs) != 0:
            logger.debug('RS(redirect/response) domains({l})...'.format(l=len(self.domains_rs)))
            tasks = (self.similarity(''.join(domain.rsplit(self.domain, 1)).rstrip('.')) for domain in self.domains_rs)

            self.loop.run_until_complete(self.start(tasks, len(self.domains_rs)))

        # write output
        tmp_dir = '/tmp/esd'
        if not os.path.isdir(tmp_dir):
            os.mkdir(tmp_dir, 0o777)
        output_path_with_time = '{td}/.{domain}_{time}.esd'.format(td=tmp_dir, domain=self.domain, time=datetime.datetime.now().strftime("%Y-%m_%d_%H-%M"))
        output_path = '{td}/.{domain}.esd'.format(td=tmp_dir, domain=self.domain)
        if len(self.data):
            max_domain_len = max(map(len, self.data)) + 2
        else:
            max_domain_len = 2
        output_format = '%-{0}s%-s\n'.format(max_domain_len)
        with open(output_path_with_time, 'w') as opt, open(output_path, 'w') as op:
            for domain, ips in self.data.items():
                # The format is consistent with other scanners to ensure that they are
                # invoked at the same time without increasing the cost of
                # resolution
                if ips is None or len(ips) == 0:
                    ips_split = ''
                else:
                    ips_split = ','.join(ips)
                con = output_format % (domain, ips_split)
                op.write(con)
                opt.write(con)

        logger.debug('Output: {op}'.format(op=output_path))
        logger.debug('Output with time: {op}'.format(op=output_path_with_time))
        logger.info('Total domain: {td}'.format(td=len(self.data)))
        time_consume = int(time.time() - start_time)
        logger.debug('Time consume: {tc}'.format(tc=str(datetime.timedelta(seconds=time_consume))))
        return self.data
Esempio n. 41
0
def sephora_start(url, **kwargs):
    try:
        return parse_sephora(fetch_sephora(url), url)
    except Exception as e:
        logger.error(f"sephora: unknown error: {e}")
Esempio n. 42
0
    robot = WuziRobot(logger)
    try:
        robot.prepare()
        robot.set_tool_power_type(power_type=RobotToolPowerType.OUT_0V)
        import time

        time.sleep(5)
        robot.move_to_init()
        robot.move_to_zero_z()
        # robot.set_tool_power_type(power_type=RobotToolPowerType.OUT_24V)
        robot.move_to_init()
        robot.move_to_zero_z()
        robot.set_tool_power_type(power_type=RobotToolPowerType.OUT_24V)
        robot.move_to_init()

        # 断开服务器链接
        robot.disconnect()
    except RobotError, e:
        logger.error("{0} robot Event:{1}".format(robot.get_local_time(), e))

    finally:
        # 断开服务器链接
        if robot.connected:
            # 关闭机械臂
            robot.robot_shutdown()
            # 断开机械臂链接
            robot.disconnect()
        # 释放库资源
        Auboi5Robot.uninitialize()
        logger.info("{0} test completed.".format(Auboi5Robot.get_local_time()))
Esempio n. 43
0
 def parse(self, response):
     """
     解析入口
     :param response:
     :return:
     """
     # 有提取码
     if self.has_key:
         try:
             data = json.loads(response.text)
             if data['errno'] != 0:
                 logger.error(
                     'parse has-key fail, url:{}, pwd:{}, errno:{}'.format(
                         response.url, self.pwd, str(data['errno'])))
                 return
             cookie_jar = CookieJar()
             cookie_jar.extract_cookies(response, response.request)
             self.save_cookie(cookie_jar)
             url = 'https://pan.baidu.com/s/{}'.format(response.meta['key'])
             meta = {'shorturl': response.meta['key']}
             logger.info('parse has-key succ, url:{}, key:{}'.format(
                 url, self.pwd))
             yield Request(url,
                           cookies=self.cookies,
                           callback=self.parse_data_key,
                           dont_filter=True,
                           meta=meta)
         except Exception as e:
             logger.error(
                 'parse has-key fail: exception, url:{}, pwd:{}, err_msg:{}'
                 .format(response.url, self.pwd, e))
     # 无提取码
     else:
         try:
             data = json.loads(response.text)
             if data['errno'] == -3:
                 url = 'https://pan.baidu.com/share/list?web=5&app_id=250528&channel=chunlei&clienttype=5&desc=1' \
                       '&showempty=0&order=time&root=1&shorturl={}'.format(response.meta['key'][1:])
                 meta = {
                     'share_username': data['share_username'],
                     'share_photo': data['share_photo'],
                     'ctime': data['ctime'],
                     'shorturl': data['shorturl'],
                     'expiredtype': data['expiredtype']
                 }
                 logger.info('parse no-key succ, url:{}, key:{}'.format(
                     url, self.pwd))
                 yield Request(url,
                               dont_filter=True,
                               callback=self.parse_data_nokey,
                               meta=meta)
             elif data['errno'] == -21:
                 logger.error(
                     'parse no-key fail: share cancel, url:{}'.format(
                         response.url))
             elif data['errno'] == -105 or data['errno'] == 2:
                 logger.error(
                     'parse no-key fail: link error, url:{}'.format(
                         response.url))
             else:
                 logger.error(
                     'parse no-key fail: unknown error, url:{}'.format(
                         response.url))
         except Exception as e:
             logger.error(
                 'parse no-key fail: exception, url:{}, err_msg:{}'.format(
                     response.url, e))
Esempio n. 44
0
def report_it(e):
    logger.error("产生了无法预知的错误")
    logger.error("错误内容如下:")
    error = form_report(e)
    logger.error(error['string'])
    logger.error('文件 %s' % error['file'])
    logger.error('行号 %s' % error['line'])
    logger.info('正在尝试反馈错误...')
    logger.info('尝试发送bug报告邮件...')
    send_report(error)
    logger.info('发送bug报告邮件成功')
    try:
        logger.info('尝试把bug发送到远程数据库...')
        from database.database import DataBase
        _db = DataBase()
        _db.error_report(error)
    except Exception as e2:
        logger.error('把bug发送到远程数据库失败')
        send_report(e2)
    logger.info('发送bug报告完成。')
    sys.exit(1)
Esempio n. 45
0
    def parse_data_key(self, response):
        """
        解析并保存有提取码类型的第一级目录/文件
        :param data:
        :return:
        """
        try:
            res = re.search(r'yunData.setData\((.*?)\);', response.text)
            if not res:
                logger.error(
                    'parse has-key first data fail, url:{}, pwd:{}'.format(
                        response.url, self.pwd))
                return

            data = json.loads(res.group(1))
            if data and data['file_list']['errno'] != 0:
                logger.error(
                    'parse data fail, url:{}, pwd:{}, errno:{}'.format(
                        response.url, self.pwd, data['file_list']['errno']))
                return

            for file in data['file_list']['list']:
                yield FileItem(url=response.meta['shorturl'],
                               pwd=self.pwd,
                               expiredtype=data['expiredType'],
                               fs_id=file['fs_id'],
                               parent_id=0,
                               size=file['size'],
                               isdir=int(file['isdir']),
                               local_ctime=file['local_ctime'],
                               local_mtime=file['local_mtime'],
                               md5=file['md5'],
                               path=file['path'],
                               server_ctime=file['server_ctime'],
                               server_filename=file['server_filename'],
                               share_id=data['shareid'],
                               uk=data['uk'])

                if int(file['isdir']) == 1:
                    url = 'https://pan.baidu.com/share/list?uk={}&shareid={}&order=other&desc=1&showempty=0&web=1&' \
                          'dir=/sharelink{}-{}/{}&channel=chunlei&web=1&app_id=250528'.format(data['uk'], data['shareid'],
                                data['uk'], file['fs_id'], file['server_filename'])
                    meta = {
                        'uk': data['uk'],
                        'share_id': data['shareid'],
                        'fs_id': file['fs_id'],
                        'parent_id': file['fs_id'],
                        'filepath': file['server_filename']
                    }
                    yield Request(url=url,
                                  cookies=self.cookies,
                                  dont_filter=True,
                                  callback=self.parse_dir,
                                  meta=meta)

            yield UserItem(url=response.meta['shorturl'],
                           pwd=self.pwd,
                           share_username=data['linkusername'],
                           share_photo=data['photo'],
                           ctime=data['ctime'])
            logger.info(
                'parse has-key first data succ, url:{}, key:{}, share_id:{}, uk:{}'
                .format(response.url, self.pwd, data['shareid'], data['uk']))
        except Exception as e:
            logger.error(
                'parse has-key first data fail: exception, url:{}, err_msg:{}'.
                format(response.url, e))
Esempio n. 46
0
def DefferedErrorHandle(e):
    logger.error(str(e))
    return
Esempio n. 47
0
 def callNodeByName(self, nodeName, *args, **kwargs):
     node = self.getNodeByName(nodeName)
     if not node:
         logger.error("nodeName %s doesn't exists" % nodeName)
         return
     return node.callbackNode(*args, **kwargs)
Esempio n. 48
0
def grant(request):
    """微信公众号授权回调
    """
    try:
        auth_code = request.GET['auth_code']
        ex = int(request.GET['expires_in'])
    except (KeyError, ValueError, TypeError):
        logger.warning('无效的调用|%s', request.GET)
        return HttpResponse('error')

    logger.info('公众号授权回调|%s|%f', auth_code, ex)

    component_access_token = global_store.get_component_access_token()
    if not component_access_token:
        logger.error('没有component_access_token')
        return HttpResponse('error')

    # 1. 通过 auth_code 获取公众号 `接口调用凭据`

    api = 'https://api.weixin.qq.com/cgi-bin/component/api_query_auth?' \
          'component_access_token={0}'.format(component_access_token)
    data = {
        'component_appid': config.AppID,
        'authorization_code': auth_code,
    }
    resp = requests.post(api, data=json.dumps(data), timeout=1)
    resp_data = json.loads(resp.text)

    auth_info = resp_data['authorization_info']
    auth_appid = auth_info['authorizer_appid']
    auth_access_token = auth_info['authorizer_access_token']
    auth_refresh_token = auth_info['authorizer_refresh_token']
    auth_ex = int(auth_info['expires_in'])

    logger.info('获得公众号调用接口凭据|%s', auth_appid)

    # 2. 获取授权方的帐号基本信息:
    # 头像、昵称、帐号类型、认证类型、微信号、原始ID和二维码图片URL

    api = 'https://api.weixin.qq.com/cgi-bin/component/api_get_authorizer_info?' \
          'component_access_token={0}'.format(component_access_token)
    data = {'component_appid': config.AppID, 'authorizer_appid': auth_appid}
    resp = requests.post(api, data=json.dumps(data), timeout=1)
    resp_data = json.loads(resp.text)

    auth_info = resp_data['authorizer_info']
    nick_name = auth_info['nick_name']
    head_img = auth_info['head_img']
    service_type_info = int(auth_info['service_type_info']['id'])  # 2 表示服务号
    user_name = auth_info['user_name']
    principal_name = auth_info['principal_name']
    qrcode_url = auth_info['qrcode_url']

    logger.info('获得授权方账号信息|%s|%s', nick_name, principal_name)
    if service_type_info != 2:
        logger.error('接入的公众号不是服务号|%s|%s', nick_name, principal_name)
        return HttpResponse('授权失败:请确认您的微信公众号类型为服务号')

    # 3. 接入公众号到本服务器
    # 如果已经接入的公众号,则更新基本信息(为了解决授权码没有及时更新全部失效)

    # 提前 11 分钟过期
    expire_ts = int(time.time()) + auth_ex - 11 * 60
    auth_wechat, created = AuthWechat.objects.update_or_create(
        appid=auth_appid,
        defaults={
            'access_token': auth_access_token,
            'refresh_token': auth_refresh_token,
            'expire_ts': expire_ts,
            'nick_name': nick_name,
            'head_img': head_img,
            'user_name': user_name,
            'principal_name': principal_name,
            'qrcode_url': qrcode_url,
        })
    if created:
        logger.info('授权接入微信公众号|%s|%s', nick_name, principal_name)
    else:
        logger.info('更新授权接入微信公众号|%s|%s', nick_name, principal_name)

    # 4. 接入成功之后,清空预授权码
    # 一个预授权码只能被一个微信公众号接入
    global_store.delete_pre_auth_code()

    return HttpResponse('恭喜您,授权成功')
Esempio n. 49
0
    else:
        trainer.evaluate()
        logger.info("Finish evaluating")


def make_log_files(config):
    """
    Sets up log directories and saves git diff and command line.
    """
    config.run_name = '{}.{}.{}'.format(config.prefix, config.seed,
                                        config.suffix)

    config.log_dir = os.path.join(config.log_root_dir, config.run_name)
    logger.info('Create log directory: %s', config.log_dir)
    os.makedirs(config.log_dir, exist_ok=True)

    if config.is_train:
        # log config
        param_path = os.path.join(config.log_dir, 'params.json')
        logger.info('Store parameters in %s', param_path)
        with open(param_path, 'w') as fp:
            json.dump(config.__dict__, fp, indent=4, sort_keys=True)


if __name__ == '__main__':
    args, unparsed = argparser()
    if len(unparsed):
        logger.error('Unparsed argument is detected:\n%s', unparsed)
    else:
        run(args)
Esempio n. 50
0
def article_edit(request, pid):
    try:
        article = Post.objects.select_related().get(id=pid)
    except Exception as ex:
        logger.error(msg="文章不存在: {ex}".format(ex=ex))
        return render(request, 'wiki/wiki_edit.html', {
            "user": request.user,
            "errorInfo": ex
        })
    if request.method == "GET":
        tagList = Tag.objects.all()
        categoryList = Category.objects.all()
        article.tag = [t.id for t in article.tags.all()]
        return render(
            request, 'wiki/wiki_edit.html', {
                "user": request.user,
                "tagList": tagList,
                "categoryList": categoryList,
                "article": article
            })
    elif request.method == "POST":
        title = request.POST.get('title')
        content = request.POST.get('content')
        category = request.POST.get('category')
        tags = request.POST.getlist('tag[]')
        try:
            category = Category.objects.get(id=category)
        except Exception as ex:
            logger.warn(msg="获取分类失败: {ex}".format(ex=str(ex)))
            return JsonResponse({
                'msg': "获取分类失败: {ex}".format(ex=str(ex)),
                "code": 500,
                'data': []
            })
        try:
            Post.objects.filter(id=pid).update(
                title=title,
                content=content,
                category=category,
                author=User.objects.get(username=request.user))
        except Exception as ex:
            logger.warn(msg="更新文章失败: {ex}".format(ex=str(ex)))
            return JsonResponse({
                'msg': "更新文章失败: {ex}".format(ex=str(ex)),
                "code": 500,
                'data': []
            })
        try:
            newTagsList = []
            for tg in tags:
                newTagsList.append(int(tg))
        except Exception as ex:
            logger.warn(msg="获取文章标签失败: {ex}".format(ex=ex))
        try:
            oldTagsList = [t.id for t in article.tags.all()]
            addTagsList = list(set(newTagsList).difference(set(oldTagsList)))
            delTagsList = list(set(oldTagsList).difference(set(newTagsList)))
            for tg in addTagsList:
                tag = Tag.objects.get(id=tg)
                article.tags.add(tag)
            for tg in delTagsList:
                tag = Tag.objects.get(id=tg)
                article.tags.remove(tag)
        except Exception as ex:
            logger.warn(msg="更新文章标签失败: {ex}".format(ex=ex))
        return JsonResponse({'msg': "文章添加成功", "code": 200, 'data': []})
Esempio n. 51
0
def handler_404(error):
    logger.error(f"{error}")
    return f"<p>{error}</p><br><p>你来到了知识的荒原——</p>"