Beispiel #1
0
    def state_changed(self, new_state):
        global AudioQueue

        if new_state == group.State.BUFFERING:
            self.silence_samples = 0
            self.silence_samples_pending = 0
            self.silence_samples_sent = 0
            self.development_audio_drop_prescaler = 100

        if self.m_state != new_state:
            log.debug('state changing from %s to %s' %
                      (self.m_state, new_state))
            self.m_state = new_state
            play_groups = self.playing_groups()

            for _group in play_groups:
                if new_state == group.State.BUFFERING:
                    _group.send({'runtime': {'command': 'buffering'}})
                elif new_state == group.State.STOPPED:
                    _group.stop_playing()
                elif new_state == group.State.PLAYING:
                    _group.start_playing()
                else:
                    log.critical('internal error #0082')
        else:
            log.debug('ignoring a state change request from %s to %s' %
                      (self.m_state, new_state))
Beispiel #2
0
    def run(self):
        try:
            while not self.terminated:
                try:
                    event = self.input_mux.event_poll()
                except queue.Empty:
                    continue

                key = event['key']
                value = event['value']

                if key == 'audio':
                    self.play_sequencer.new_audio(value)
                elif key == 'codec':
                    self.play_sequencer.set_codec(value)
                elif key == 'state':
                    self.play_sequencer.set_state(value)
                elif key == 'volume':
                    self.play_sequencer.set_volume(value)
                else:
                    log.critical('got an unknown key %s' % key)

        except Exception as e:
            log.critical("server loop caught exception '%s'" % str(e))
            self.terminate()

        log.debug('server exits')
Beispiel #3
0
 def __init__(self):
     self.param = Param()
     self.args = self.param.options()
     logger.debug("All args' final value is:\n{}".format(
         self.args.__dict__))
     self.raw_data = RawDataFactory()
     self.comp_data = ComposeData(self.args, self.raw_data)
Beispiel #4
0
 def send_messages(self, email_messages, **kwargs):
     logger.debug("send_messages %s" % len(email_messages))
     results = []
     kwargs['_backend_init_kwargs'] = self.init_kwargs
     for msg in email_messages:
         results.append(send_email.delay(msg, **kwargs))
     return results
Beispiel #5
0
def url_split(url):
    obj = {}
    if url:
        r = urlparse.urlparse(url.lower())
        obj = {
            'scheme': r.scheme,
            'hostname': r.hostname,
            'path': r.path,
            'params': {}
        }

        if r.query:
            for q in re.split('&', r.query):
                if q:
                    #处理url中的中文
                    try:
                        q = targetDecode(
                            q)  #urlparse.unquote(q).decode('utf-8')
                    except Exception as e:
                        logger.debug(q)
                        logger.exception(e.message)

                    pa = re.split('=', q)
                    if pa[0]:
                        obj['params'][pa[0].strip()] = pa[1].strip(
                        ) if len(pa) > 1 else ''

    return obj
Beispiel #6
0
 def __init__(self, alsa_src_config):
     super(SourceAlsa, self).__init__(name='alsa')
     self.config = alsa_src_config
     self.client_buffer = self.config['client_buffer']
     log.debug('starting alsasource')
     self.is_playing = False
     self.start()
def play_function_7012(request):
    """
    改变用户状态
    """
    req = GamePlayerReadyReq()
    req.ParseFromString(request['body'])

    logger.debug('req:%s\n, desk:%s\n', req, request['desk'])
    request['desk'].player_status_change(request['user'].uin, req)

    evt = GamePlayerReadyEvt()
    evt.deskid = request['desk'].id
    if 'game_start' in request['desk'].timeout_info and request[
            'desk'].type == config.DESK_TYPE_MJ_WZ:
        evt.pre_remain_time = request['desk'].timeout_info['game_start'] - int(
            time.time())
    else:
        evt.pre_remain_time = -1

    for player in request['desk'].player_group.valid_players:
        evt_info_list = evt.users.add()
        if req.status == 2 or req.status == 3:
            evt_info_list.status = player.delete_status
        else:
            evt_info_list.status = player.status
        evt_info_list.piaofen = player.piaofen
        evt_info_list.shanghuo = player.shanghuo
        evt_info_list.uin = player.uin

    logger.debug('status evt:\n%s', evt)
    evt = evt.SerializeToString()
    for player in request['desk'].player_group.valid_players:
        write_to_users(player.uin, proto.CMD_PLAYER_STATUS_CHANGE, evt)
Beispiel #8
0
    def add_cli(self, username: str, port: str, tree_sent: str, transport):

        tree_database = database.find_user(username, 'pass')

        if tree_database is None:
            logger.info('user {} does not exists'.format(username))
            return None

        ts = tree.Tree(tree_sent)
        td = tree.Tree(tree_database)

        cli = Client(username, port, ts, transport)

        if username not in self.clients:
            self.clients[username] = [cli]
        else:
            self.clients[username].append(cli)
            logger.debug("new instance of client {}".format(username))
        self.transports[transport] = cli

        tree.Tree.process_subtree(cli, ts.raw_tree, td.raw_tree)

        # TODO: think about client conflicts + database sync between instances
        logger.debug('tree has been processed for {}'.format(username))

        database.save_new_tree(username, ts.raw_tree)

        return cli
Beispiel #9
0
 def get(self,tablename):
     try:
         session = Session()
         M = make_table_model(tablename.encode('utf-8'))   
         #filter_args = [ getattr(M,k)==v[0] for k,v in self.request.arguments.items()]
         _params = {}
         [_params.update({k: v[0]}) for k, v in self.request.arguments.items()]
         logger.info(tablename+str(_params))
         filter_args = utils.get_filter_args(M, _params)
         if filter_args:
             models =  session.query(M).filter(*filter_args).order_by(desc('id')).limit(100)
         else:
             models =  session.query(M).order_by(desc('id')).limit(100)
         logger.debug(models)
         models = [ [ getattr(model,c.name) for c in M.__table__._columns] for model in models]
         clms = map(lambda x:x.name, M.__table__._columns)
         # hide the primary_key 
         result = map(lambda x: dict(zip(clms[1:], x[1:])), models)
         for item in result:
             for k in item:
                 if  type(item[k])==datetime:
                     item[k] = item[k].strftime("%Y-%m-%d %H:%M:%S")
                 elif type(item[k])==unicode:
                     item[k] = item[k].strip()
         self.write(json.dumps(result))
     except BaseException, e:
         self.write(json.dumps({'msg':'Request Error'}))
         logger.error(traceback.format_exc(e))
Beispiel #10
0
    def add_cli(self, username: str, port: str, tree_sent: str, transport):

        tree_database = database.find_user(username, 'pass')

        if tree_database is None:
            logger.info('user {} does not exists'.format(username))
            return None

        ts = tree.Tree(tree_sent)
        td = tree.Tree(tree_database)

        cli = Client(username, port, ts, transport)

        if username not in self.clients:
            self.clients[username] = [cli]
        else:
            self.clients[username].append(cli)
            logger.debug("new instance of client {}".format(username))
        self.transports[transport] = cli

        tree.Tree.process_subtree(cli, ts.raw_tree, td.raw_tree)

        # TODO: think about client conflicts + database sync between instances
        logger.debug('tree has been processed for {}'.format(username))

        database.save_new_tree(username, ts.raw_tree)

        return cli
Beispiel #11
0
 def send_websocket_result(command, result):
     log.debug('server returns %s = %s' % (command, result))
     websocket.WebSocket.send_message(None, {
         'command': command,
         'from': 'server',
         'result': result
     })
Beispiel #12
0
def store_chunk(chk, data: bytes):
    if not os.path.exists(store_name):
        os.makedirs(store_name)

    with open(store_name + chk, "wb") as file:
        file.write(data)
        logger.debug("chunk written in {}".format(store_name + chk))
Beispiel #13
0
 def gst_mainloop_thread(self):
     try:
         self.mainloop = GLib.MainLoop()
         self.mainloop.run()
         log.debug('gst mainloop exits')
     except:
         util.die('caught a gst mainloop exception, exiting..', 1, True)
Beispiel #14
0
def get_inner_host_info(request):
    """

    :return: [{"ip": 10.10.0.10, "os_type": "linux", "hostname": "default"}...]
    """
    # 获取内部数据库中的主机信息ip, os_type, hostname
    logger.info(u"获取内部数据库中的主机信息ip, os_type, hostname")
    client = get_client_by_request(request)
    kwargs = {
        "app_code": APP_ID,
        "app_secret": APP_TOKEN,
        "app_id": 1,
        "bk_token": request.COOKIES['bk_token']
    }
    result = client.bhcp.get_ip(kwargs)
    data = result['data']
    logger.debug(data)
    response_data = []
    for item in data:
        response_data.append({
            "ip": item['ip'],
            "os_type": item['os_type'],
            "hostname": item['hostname']
        })
    logger.debug(response_data)
    return response_data
Beispiel #15
0
 def multicast_rx(self, message):
     device = "unknown"
     try:
         command = message['command']
         if command == 'get_server_socket':
             if message['version'] != util.LUDIT_VERSION:
                 raise Exception(
                     'server version is %s but client version is %s' %
                     (util.LUDIT_VERSION, message['version']))
             device = message['from']
             groupname, devicename = device.split(':')
             endpoint = self.play_sequencer.get_group(groupname).get_device(
                 devicename).get_endpoint()
             log.debug('sending tcp socket endpoint %s to device %s' %
                       (str(endpoint), device))
             self.multicast.send({
                 'command': 'server_socket',
                 'from': 'server',
                 'to': device,
                 'endpoint': str(endpoint)
             })
     except Exception as e:
         log.error('connection failed from unknown device %s (%s)' %
                   (device, str(e)))
         self.multicast.send({
             'command': 'server_socket',
             'from': 'server',
             'to': device,
             'endpoint': "None"
         })
Beispiel #16
0
 def multicast_rx(self, message):
     if self.terminated:
         return
     device_id = "unknown"
     try:
         command = message['command']
         if command == 'get_server_socket':
             if message['version'] != util.LUDIT_VERSION:
                 raise Exception(
                     'server version is %s but client version is %s' %
                     (util.LUDIT_VERSION, message['version']))
             device_id = message['from']
             groupname, devicename = device_id.split(':')
             device = self.play_sequencer.get_group(groupname).get_device(
                 devicename)
             if device.connected:
                 log.warning(
                     f'ignoring second connection request from {device_id}, already have a device with that id'
                 )
                 self.multicast_tx('server_socket', device_id, 'endpoint',
                                   'None')
                 return
             endpoint = device.get_endpoint()
             log.debug('sending tcp socket endpoint %s to device %s' %
                       (str(endpoint), device_id))
             self.multicast_tx('server_socket', device_id, 'endpoint',
                               str(endpoint))
     except Exception as e:
         log.error('connection failed from unknown device %s (%s)' %
                   (device_id, str(e)))
         self.multicast_tx('server_socket', device_id, 'endpoint', 'None')
Beispiel #17
0
    def __init__(self, file=None):
        errmsg = ''
        cfgdir = os.path.dirname(
            os.path.dirname(__file__)) + os.sep + 'config' + os.sep
        cfg = cp.ConfigParser()
        if file:
            self.cfg_path = cfgdir + file
        else:
            env = cfgdir + 'env.ini'
            cfg.read(env)
            self.cfg_path = cfgdir + 'envconf' + os.sep + cfg.get(
                'env', 'env') + '.ini'

        if os.path.exists(self.cfg_path):
            logger.debug('读取配置文件 %s' % self.cfg_path)
            try:
                cfg.read(self.cfg_path)
                self.cfg = cfg
            except:
                errmsg = '读取配置文件异常'
                logger.error(errmsg)
        else:
            errmsg = '配置文件路径无效'
            logger.error(errmsg)
        if errmsg:
            assert False, errmsg
Beispiel #18
0
 def __enter__(self):
     logger.debug("Start class OutputData!")
     self.content_format = self.args.op_format.upper()
     self.output_type = self.args.op_type.upper()
     if self.output_type == "FILE":
         self.fp = open(self.args.op_file, "w")
         self.csv_list = []
     elif self.output_type == "KAFKA":
         # check topic exists
         self.topic = self.args.op_topic
         kafka_topic = NewTopic(name=self.topic,
                                num_partitions=1,
                                replication_factor=1)
         client = KafkaAdminClient(bootstrap_servers=self.args.op_bootstrap)
         try:
             client.delete_topics([kafka_topic])
             client.create_topics([kafka_topic])
         except KafkaError:
             logger.warn(
                 "delete or create kafka topic raised error, ignore it!")
         self.producer = KafkaProducer(
             bootstrap_servers=self.args.op_bootstrap)
     elif self.output_type == "ES" or self.output_type == "ElasticSearch".upper(
     ):
         self.es = Elasticsearch(
             hosts=self.args.op_es_hosts,
             sniff_on_start=True,
             # sniff_on_connection_fail=True,
             sniffer_timeout=20,
             # http_auth=('user', 'secret')
         )
         self.es_index = self.args.op_index
     return self
Beispiel #19
0
    def is_vcenter_in_secret_manager(self, vcenter_dns: str) -> bool:
        modified_vcenter_dns_name = vcenter_dns.rstrip(".")

        secrets_resp = self.__secrets_manager.list_secrets()
        secrets_list = secrets_resp["SecretList"]

        while "NextToken" in secrets_resp:
            secrets_resp = self.__secrets_manager.list_secrets(
                NextToken=secrets_resp["NextToken"])
            secrets_list.extend(secrets_resp["SecretList"])

        num_of_vcenter_secrets = len([
            secret for secret in secrets_list
            if (modified_vcenter_dns_name in secret["Name"]) and (
                secret["Name"].startswith(f"/rpcv/{self.__stage}"))
        ])

        if num_of_vcenter_secrets == 0:
            logger.debug(
                f"Vcenter DNS: {vcenter_dns} not found in the secrets manager")
            return False
        else:
            logger.debug(
                f"Vcenter DNS: {vcenter_dns} found in the secrets manager")
            return True
Beispiel #20
0
    def set_volume(self, volume, local_cec=False):
        if local_cec:
            if self.filter_pipeline:
                log.debug('local cec volume %.3f' % volume)
                for channel in self.channel_list:
                    volume_element = self.filter_pipeline.get_by_name('vol%s' %
                                                                      channel)
                    volume_element.set_property('volume', volume)
        else:
            if volume is not None:
                self.user_volume = volume

            for channel in self.channel_list:
                channel_int = int(channel)
                balance = 1.0
                if channel_int == 0 and self.balance > 0.0:
                    balance = 1.0 - self.balance
                elif channel_int == 1 and self.balance < 0.0:
                    balance = 1.0 + self.balance

                channel_vol = max(
                    0.0005, self.user_volume * self.source_gain * balance)
                self.remote_streaming_volumes[channel_int] = channel_vol
                if self.filter_pipeline:
                    log.debug('channel %s volume %.3f' %
                              (channel, channel_vol))
                    volume_element = self.filter_pipeline.get_by_name('vol%s' %
                                                                      channel)
                    volume_element.set_property('volume', channel_vol)
Beispiel #21
0
def store_chunk(chk, data: bytes):
    if not os.path.exists(store_name):
        os.makedirs(store_name)

    with open(store_name + chk, "wb") as file:
        file.write(data)
        logger.debug("chunk written in {}".format(store_name + chk))
Beispiel #22
0
 def get_session(self):
     logger.debug('开始登陆,获取教务后台session')
     data = {
         'redirect': self.redirect,
         'username': self.username,
         'password': self.password,
         'isNextLoad': 'true'
     }
     session = requests.session()
     try:
         ret = session.post(url=self.url, data=data)
         if ret.status_code == 200:
             token = ret.json()['data']
             jumprst = session.get('%s?token=%s' % (self.redirect, token))
             if jumprst.status_code == 200:
                 return session
             else:
                 errmsg = '登陆跳转失败'
                 logger.error(errmsg)
         else:
             errmsg = '登陆失败,状态码为:%s' % ret.status_code
             logger.error(errmsg)
     except Exception as err:
         if str(err).find('Failed to establish a new connection') != -1:
             errmsg = '访问登陆接口失败,未能建立连接,请检查接口地址或网络环境是否OK'
             logger.error(errmsg)
         else:
             errmsg = '登陆发生异常\n%s' % str(err)
             logger.error(errmsg)
     if errmsg:
         assert False, errmsg
Beispiel #23
0
def classifyFactors(req_data):
    """ 
    计算出 measure_list, mension_list
    这里每个List里面单元的构造是 (name, kind, cmd, x_y)
    name: 表示属性名字; 
    kind: 表示是文字列还是数字列,0表示
    cmd:  表示运算符号,'sum','avg'等等
    x_y:  表示属于哪个轴,值有x、y,还有'group'
    """
    logger.debug("function classifyFactors() is called")

    axis_factor_list, group_factor_list = extractFactor(req_data)

    # 找到轴上文字列和时间列,其并集就是msn_factor_list

    msn_factor_list = [axis_factor for axis_factor in axis_factor_list \
                                if Protocol.NoneFunc == axis_factor.getProperty(Protocol.Func) \
                                    or 2 == axis_factor.getProperty(Protocol.Kind)]
    msu_factor_list = [axis_factor for axis_factor in axis_factor_list \
                                if Protocol.NoneFunc != axis_factor.getProperty(Protocol.Func) \
                                    and 0 == axis_factor.getProperty(Protocol.Kind)]

    return {
        'msn':      msn_factor_list
        , 'msu':    msu_factor_list
        , 'group':  group_factor_list
    }
Beispiel #24
0
    def updateRRD(self, rowobj):
        if str(rowobj["HTTP_CODE"]) == "200":
            unavailablevalue = 0
        else:
            unavailablevalue = 1
        FID = rowobj["FID"]

        time_rrdpath = RRDPATH + '/' + str(
            self.getURL(FID)) + '/' + str(FID) + '_' + str(
                self.rrdfiletype[0]) + '.rrd'
        download_rrdpath = RRDPATH + '/' + str(
            self.getURL(FID)) + '/' + str(FID) + '_' + str(
                self.rrdfiletype[1]) + '.rrd'
        unavailable_rrdpath = RRDPATH + '/' + str(
            self.getURL(FID)) + '/' + str(FID) + '_' + str(
                self.rrdfiletype[2]) + '.rrd'

        try:
            rrdtool.updatev(
                time_rrdpath, '%s:%s:%s:%s:%s:%s' %
                (str(rowobj["DATETIME"]), str(rowobj["NAMELOOKUP_TIME"]),
                 str(rowobj["CONNECT_TIME"]), str(rowobj["PRETRANSFER_TIME"]),
                 str(rowobj["STARTTRANSFER_TIME"]), str(rowobj["TOTAL_TIME"])))
            rrdtool.updatev(
                download_rrdpath, '%s:%s' %
                (str(rowobj["DATETIME"]), str(rowobj["SPEED_DOWNLOAD"])))
            rrdtool.updatev(
                unavailable_rrdpath,
                '%s:%s' % (str(rowobj["DATETIME"]), str(unavailablevalue)))
            logger.debug(rrdtool.last(time_rrdpath))
            self.setMARK(rowobj["ID"])
        except Exception, e:
            logger.error('Update rrd error:' + str(e))
Beispiel #25
0
def genWidgetImageData(req_data, hk):
    """
    生成返回前端数据
    """
    logger.debug("function genWidgetImageData() is called")
    st = SqlExecutorMgr.stRestore(hk)

    # 地图先特殊对待
    if 'china_map' == req_data.get(u'graph') or \
            'world_map' == req_data.get(u'graph'):
        data = formatData('', '', '', '', req_data.get(u'graph'))
        return {u'type': 'map', u'data': data}

    shape_list, shape_in_use    = judgeWhichShapes(req_data)
    shape_in_use                = req_data.get(u'graph', u'bar')

    # 获取画出图形所必须相关数据
    factors_lists_dict = classifyFactors(req_data)
    sql_obj         = transReqDataToSqlObj(req_data, st)
    result          = st.conn.execute(sql_obj).fetchall()
    data_from_db    = cleanDataFromDb(result)
    strf_data_from_db = strfDataAfterFetchDb(data_from_db)
    echart_data     = formatData(strf_data_from_db, factors_lists_dict['msu'], \
                                    factors_lists_dict['msn'], factors_lists_dict['group'], \
                                    shape_in_use)

    return {u'type': shape_in_use, u'data': echart_data}
Beispiel #26
0
def fast_execute_script(bk_biz_id,
                        script_type=None,
                        script_content=None,
                        account="root",
                        ip_list=[],
                        request=None,
                        user=None):
    """快速执行脚本

    :param bk_biz_id: 业务ID
    :param script_type: 脚本类型:1(shell脚本)、2(bat脚本)、3(perl脚本)、4(python脚本)、5(Powershell脚本)
    :param script_content: 脚本内容Base64,如果同时传了script_id和script_content,则script_id优先
    :param request:
    :param user:
    :return:
    """
    """"""
    client, kwargs = client_and_common_args(request, user)
    kwargs['script_type'] = script_type
    kwargs['bk_biz_id'] = bk_biz_id
    kwargs['ip_list'] = ip_list
    kwargs['account'] = account
    kwargs['script_timeout'] = 3
    if script_content:
        kwargs['script_content'] = base64.encodestring(script_content)
    result = client.job.fast_execute_script(kwargs)
    logger.debug('fast execute script, result is {}'.format(result))
    if result['result'] is False:
        logger.warning('fast execute script, msg is {}'.format(result))
    # 补充获取结果的方法
    return result['result'], result['data'], result['message']
Beispiel #27
0
def makeupFilterSql(filter_list):
    """
    根据筛选器生成对应的SQL
    """
    logger.debug("function makeupFilterSql() is called")

    if type(filter_list) != list \
        or 0 == len(filter_list):
        return u''

    sens = []
    for filter_dict in filter_list:
        property = filter_dict.get(u'property')
        calc     = filter_dict.get(u'calc', '')

        val_list = json.loads( filter_dict.get(u'val_list') )

        lll = []
        for x in val_list:
            x = x if type(x) == u'unicode' else unicode(x)  
            lll.append( property + u'=' + x ) 

        sens.append( u' or '.join(lll) ) 
    
    return u'where ' + u' and '.join(sens)
Beispiel #28
0
def online_users(request):
    dict_desk_id = defaultdict(int)
    for _, desk_type, desk_list in MjDeskMgrController.get_all_desks():
        dict_desk_id[desk_list[0]] += desk_list[1]

    keys = rds.keys(MemUser.KEY_PRE + '*')

    # logger.debug('keys:%s, user_json_list:%s', keys, user_json_list)
    hall_online = 0
    desk_online = 0
    for key in keys:
        user_dict = rds.hgetall(key)
        logger.debug('user_dict:%s, status:%s', user_dict,
                     user_dict.get('status'))
        if not user_dict.has_key('status'):
            continue
        if user_dict.get('status') == str(config.LOGIN_USER_STATUS_ONLINE):
            hall_online += 1
        if user_dict.get('status') == str(config.LOGIN_USER_STATUS_PLAYING):
            desk_online += 1

    # logger.debug('total_online : %s', total_online)
    list_room_id = [(k, dict_desk_id[k]) for k in sorted(dict_desk_id.keys())]

    # 因为暂时没有心跳来维护status状态,所以会出现桌子里面有人而总人数为0的情况
    return render(
        request, 'online_info.html',
        dict(
            type_items=list_room_id,
            hall_online=hall_online,
            desk_online=desk_online,
        ))
Beispiel #29
0
def dump():

    for user, chks in users.items():
        logger.debug('user {} has {}'
                     .format(user.username, tuple(c.pretty() for c in chks))) # TODO: use builtin istead of comprehension
    for chks, usrs in chunks.items():
        logger.debug('chk {} is owned by {}'.format(chks.pretty(), usrs))
Beispiel #30
0
def get_task_ip_log(client, task_instance_id, user_name):
    kwargs = {
        "app_code": APP_ID,
        "app_secret": APP_TOKEN,
        "username": user_name,
        "task_instance_id": task_instance_id
    }
    result = client.job.get_task_ip_log(kwargs)
    logger.debug(result["data"][0])
    if result["result"]:
        if result["data"][0]["isFinished"]:
            # return_result = [{"result":False,"ips":''},{"result":True,"ips":''}]
            return_result=[]
            log_content = []
            logger.debug(result["data"][0]["stepAnalyseResult"])
            logger.debug(result["data"][0]["stepAnalyseResult"][0]["resultType"])
            logger.debug(len(result["data"][0]["stepAnalyseResult"]))
            for i in result["data"][0]["stepAnalyseResult"]:
                if i["resultType"] != 9:
                    logger.error(u"脚本执行失败,错误码如下:")
                    logger.error(i["resultType"])
                    logger.error(i["resultTypeText"])
                    return_result.append({"result":False,"ips":[u["ip"] for u in i["ipLogContent"]]})
                else:
                    log_content += i["ipLogContent"]
                    return_result.append({"result": True, "ips": [(u["ip"], u['logContent']) for u in i["ipLogContent"]]})
                    logger.debug(return_result)
            return return_result
        else:
            import time
            time.sleep(10)
            return get_task_ip_log(client, task_instance_id, user_name)
    else:
        logger.error(result["message"])
        return ""
Beispiel #31
0
def upload_file(request):
    # logger.debug("enter")
    if request.method == 'POST':
        logger.debug("enter upload_file")
        path_root = "/data/release/download"  # 上传文件的主目录
        myFile = request.FILES.get("file", None)  # 获取上传的文件,如果没有文件,则默认为None
        if not myFile:
            dstatus = "请选择需要上传的文件!"
        else:
            path_ostype = os.path.join(path_root, request.POST.get("ostype"))
            path_version = os.path.join(path_ostype,
                                        str(config.APP_UPDATE_VERSION))
            # 还是不要这样命名吧,是什么就写什么名称
            # if request.POST.get("ostype") == 'code':
            #     myFile.name = 'game_code_{0}.zip'.format(config.APP_UPDATE_VERSION)
            path_dst_file = os.path.join(path_version, myFile.name)
            if os.path.isfile(path_dst_file):
                dstatus = "%s 已存在!" % (myFile.name)
            else:
                if os.path.isdir(path_version):
                    pass
                else:
                    os.mkdir(path_version)

                destination = open(path_dst_file, 'wb+')  # 打开特定的文件进行二进制的写操作
                for chunk in myFile.chunks():  # 分块写入文件
                    destination.write(chunk)
                destination.close()
                dstatus = "%s 上传成功!" % (myFile.name)
        return HttpResponse(str(dstatus))

    return render(request, 'upload_file.html')
Beispiel #32
0
    def _create_ordered_tfrecords(save_d_path, f_name, data, batch_size,
                                  tgt_len):
        save_path = os.path.join(save_d_path, f_name)
        record_writer = tf.io.TFRecordWriter(save_path)
        batched_data = TFRecordMaker._batchify(data, batch_size)
        num_batch = 0
        for t in range(0, batched_data.shape[1] - 1, tgt_len):
            cur_tgt_len = min(batched_data.shape[1] - 1 - t, tgt_len)
            if cur_tgt_len < tgt_len:
                break

            if num_batch % 500 == 0:
                logger.debug("  processing batch {}".format(num_batch))
            for idx in range(batch_size):
                inputs = batched_data[idx, t:t + cur_tgt_len]
                labels = batched_data[idx, t + 1:t + cur_tgt_len + 1]

                # features dict
                feature = {
                    "inputs": _int64_feature(inputs),
                    "labels": _int64_feature(labels),
                }

                example = tf.train.Example(features=tf.train.Features(
                    feature=feature))
                record_writer.write(example.SerializeToString())

            num_batch += 1

        record_writer.close()
        logger.debug("Done writing {}. batches: {}".format(f_name, num_batch))

        return f_name, num_batch
Beispiel #33
0
    def _check_intersections_related_to_airports(self, departure_airport,
                                                 destination_airport):
        intersections = []

        sections = self._sections_from_airports(departure_airport,
                                                destination_airport)
        longitude_based = Airport.should_be_longitude_based(
            departure_airport, destination_airport)
        follow_ascending_order = Airport.follow_ascending_order(
            departure_airport, destination_airport)

        bbox = bounding_box_related_to_airports(departure_airport,
                                                destination_airport)
        sorting_key = (lambda x: reference_point(x, longitude_based))
        sorting_reverse = (not follow_ascending_order)
        cells = self._stsc.cells_within_bounding_box(
            bbox, sorting_key=sorting_key, sorting_reverse=sorting_reverse)

        if not cells or not sections:
            return intersections

        iter_sections, iter_cells = iter(sections), iter(cells)
        section, cell = next(iter_sections), next(iter_cells)

        def move_section_iterator(section, cell):
            return ((follow_ascending_order and section.section_point <
                     reference_point(cell, longitude_based))
                    or (not follow_ascending_order and section.section_point >
                        reference_point(cell, longitude_based)))

        while True:
            try:
                distance = (ObstacleDetector.distance_between_section_and_cell(
                    section, cell))

                if distance < cell.radius:
                    intersection = (ObstacleDetector.
                                    _intersection_between_section_and_cell(
                                        section, cell))
                    if intersection.flight_ids:
                        intersections.append(intersection)
                    section = next(iter_sections)
                else:
                    if move_section_iterator(section, cell):
                        # section is placed before cell
                        section = next(iter_sections)
                    else:
                        cell = next(iter_cells)
            except StopIteration:
                break

        merged_intersections = self.merge_intersections_with_the_same_convection_cell(
            intersections)
        logger.debug(
            'Found following intersections {0} from departure {1} to destination {2}'
            .format(merged_intersections, departure_airport,
                    destination_airport))

        return merged_intersections
Beispiel #34
0
def send_payload(ip, port, data):

    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.connect((ip, port))

    final = format_cmd(b'CSTR ' + data)
    logger.debug('sending: {}'.format(log.nomore(final)))
    sock.send(final)
Beispiel #35
0
def send_cmd(msg):

    if type(msg) is str:
        msg = msg.encode()

    final_cmd = format_cmd(msg)
    logger.debug('sending: {}'.format(log.nomore(final_cmd)))
    server_transport.write(final_cmd)
Beispiel #36
0
def formatData(data_from_db, msu_factor_list, msn_factor_list, group_list, shape_in_use):
    """
    格式化数据
    """
    logger.debug("function formatData() is called")

    echart = EChartManager().get_echart(shape_in_use)
    return echart.makeData(data_from_db, msu_factor_list, msn_factor_list, group_list)
Beispiel #37
0
def send_payload(ip, port, data):

    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    sock.connect((ip, port))

    final = format_cmd(b'CSTR ' + data)
    logger.debug('sending: {}'.format(log.nomore(final)))
    sock.send(final)
Beispiel #38
0
def connect_send_logger(*args, **kw):
    global count
    if count < 15:
        count += 1
        logger.debug(*args, **kw)
    else:
        count = 0
        logger.error(*args, **kw)
Beispiel #39
0
 def broadcast_new_configuration(self):
     log.debug('ws: sending current configuration')
     current_config = self.play_sequencer.current_configuration()
     _ = json.dumps(current_config, indent=4, sort_keys=True)
     self.ws.send_message(None, {
         'command': 'configuration',
         'current_conf': current_config
     })
Beispiel #40
0
def send_cmd(msg):

    if type(msg) is str:
        msg = msg.encode()

    final_cmd = format_cmd(msg)
    logger.debug('sending: {}'.format(log.nomore(final_cmd)))
    server_transport.write(final_cmd)
Beispiel #41
0
def remove_chunk(chk):
    if chk not in chunks:
        logger.debug('chunk {} does not exists'.format(chk))
        return

    for cli in chunks[chk]:
        users[cli].remove(chk)

    del chunks[chk]
Beispiel #42
0
def FADD(client, args):
    logger.debug('fadd : {}'.format(args))
    tr = json.loads(args.decode())

    directory, filename = client.find_in_tree(tr)

    engine.FADD(directory, 'client', filename, tr, client)

    logger.info('new user tree has been saved for {}'.format(client.username))
    database.save_new_tree(client.username, client.user_tree.raw_tree)
Beispiel #43
0
def remove_user(frm):

    if frm not in users:
        logger.debug('user {} has zero chunks'.format(frm.username))
        return

    for chk in users[frm]:
        chunks[chk].remove(frm)
        keep_chunk_alive(frm, chk)

    del users[frm]
Beispiel #44
0
def add_user(who, hashes: list):

    logger.debug('{} has {} chunks to register'.
                 format(who.username, len(hashes)))

    for h in hashes:
        if h == '':
            logger.warn('{} sent invalid chunk hash'.format(who.username))
            continue
        h = Hash(h)
        register_chunk(h, who)
        keep_chunk_alive(who, h)
Beispiel #45
0
    def send_cmd(self, msg):

        if self.transport is None:
            logger.error('client has no transport registered')
            return

        if type(msg) is str:
            msg = msg.encode()

        final_cmd = msg[:5] + bytes(str(len(msg) - 5) + ' ', 'ascii') + msg[5:]
        logger.debug('to {}: {}'.format(self.username, final_cmd.decode()))
        self.transport.write(final_cmd)
Beispiel #46
0
def send_chunk_to(client: hash.Hash, chk):
    chk = check_for_string(chk)

    from_cli = chunk.get_chunk_owner(chk)
    if from_cli is None:
        logger.warn('could not find any user hosting {}'.format(chk.pretty()))
        return

    logger.debug('{} is being sent from {} to {}'
                 .format(chk.pretty(), from_cli.username, client.username))
    protocol.send_CSND(from_cli, client, 1, chk)
    protocol.send_CSND(client, from_cli, 0, chk)
    chunk.register_chunk(chk, client)
Beispiel #47
0
def CSTR(data):

    logger.debug('CSTR for {} bytes'.format(len(data), data))

    # TODO: store only if we are waiting for it
    hasher = hashlib.sha256()
    hasher.update(data)
    chk = hasher.hexdigest()

    if chk in chunk.chunk_awaiting:
        with open(chunk.chunk_awaiting[chk], "wb") as file:
            file.write(data)
        del chunk.chunk_awaiting[chk]
    else:
        chunk.store_chunk(chk, data)
Beispiel #48
0
def save_records(records):
    session = Session()
    for r in records:
        try:
            r = eval(r)
            tablename = r['tablename']
            data = r['data']
            M = make_table_model(tablename.encode('utf-8'))   
            m = M()
            for k,v in data.items():
                setattr(m,k,v)
            session.add(m)
            logger.debug('%s save %s'%(tablename,str(m)))
        except Exception,e:
            logger.error(traceback.format_exc(e))
Beispiel #49
0
def parse(cmd, size, args):

    class Command:

        def __init__(self, fcall, isstr):
            self.function_call = fcall
            self.is_string = isstr

    cmds = {b'FADD': Command(FADD, True),
            b'FUPT': Command(FUPT, True),
            b'CSND': Command(CSND, True),
            # TODO: rethink the hole stuff
            b'CSTR': Command(CSTR, False),
            b'CDEL': Command(CDEL, True)}

    logger.debug('somebody sent {} with args {}'.format(cmd, log.nomore(args)))
    cmds[cmd].function_call(args)
Beispiel #50
0
def login(client):

    json_tree = tree.usertree
    my_chks = list(chunk.my_chunks.keys())
    my_store = []  # TODO: later read the content of .store

    hashes = 'None' if len(my_chks) == 0 else ':'.join(my_chks + my_store)
  
    try:
      network.send_cmd('JOIN {} {} {} {}'
          .format(client.username, client.port,
            hashes, json.dumps(json_tree)))
    except Exception as e:
        logger.error('2: {} was raised'.format(log.nomore(e)))
        for l in traceback.format_tb(e.__traceback__):
            logger.debug(l)
        raise e
Beispiel #51
0
def transReqDataToSqlObj(req_data, st):
    """
    获取画图参数
    """
    logger.debug("function transReqDataToSqlObj() is called")

    # 先看请求里面分别有多少个文字类和数字类的属性
    factors_lists_dict = classifyFactors(req_data)

    # 从数据库中找出该图形要画得数据
    axis_factor_list = factors_lists_dict['msu'] + factors_lists_dict['msn'] 
    group_factor_list = factors_lists_dict['group']

    sql_obj = st.getSwither().makeSelectSql( \
            **mapFactorToSqlPart(axis_factor_list, group_factor_list))

    return sql_obj
Beispiel #52
0
def FADD(directory, from_who, filename, tree, client):
    logger.debug('user {} adding {} from {}'
                 .format(client.username, tree['path'], from_who))

    if from_who == 'server':
        protocol.send_FADD(tree, client)
    elif tree['kind'] != 0:
        hsh = chunk.register_chunk(hash.Hash(tree['unique_hash']), client)
        chunk.keep_chunk_alive(client, hsh)

        for instance in shared.climanager.clients[client.username]:
            if instance is not client:
                FADD(directory, 'server', filename, tree, instance)
    else:
        pass  # TODO: handle adding a directory with some content

    directory[filename] = tree
Beispiel #53
0
    def connection_made(self, transp):
        
        NetworkClient.connection_made(self, transp)

        global server_transport

        logger.info("connected to master server")
        server_transport = transp

        try:
            protocol.login(self)
        except Exception as e:
            logger.error(e)
            logger.error('{} was raised'.format(log.nomore(e)))
            for l in traceback.format_tb(e.__traceback__):
                logger.debug(l)
            raise e
Beispiel #54
0
    def data_received(self, data):

        def parse(cmd, size, args):
            self.parse_cmd(cmd, size, args, self.transp)

        if self.incoming_bytes > 0:
            self.data_buffer += data
            self.incoming_bytes -= len(data)
            logger.debug("waiting for {}".format(self.incoming_bytes))
            if self.incoming_bytes <= 0:
                if self.incoming_bytes < 0:
                    logger.warn("incoming_bytes should not be less than zero")
                self.incoming_bytes = 0
                parse(b'CSTR', 0, self.data_buffer)
                self.data_buffer = bytes()

        else:
            bytes_left = data
            while len(bytes_left) > 0:
              parsed = bytes_left.split(b' ', 2)
              if len(parsed) < 2:
                  logger.warn('invalid command {}'.format(bytes_left))
                  return

              cmd = parsed[0]
              size = parsed[1]
              args = parsed[2]
              # TODO: handle other commands as well
              size_int = int(size.decode())
              if cmd == b'CSTR' and size_int > len(args):
                  self.data_buffer = args
                  self.incoming_bytes = size_int - len(self.data_buffer)
                  break
              else:
                args_temp = args[:size_int]
                bytes_left = args[size_int:]
                args = args_temp
                try:
                    parse(cmd, size, args)
                except Exception as e:
                    logger.error(e)
                    for l in traceback.format_tb(e.__traceback__):
                        logger.error(l)
Beispiel #55
0
def register_chunk(chk, cli):

    logger.debug("registering chunk {} for {}".format(chk.pretty(), cli.username))

    if isinstance(chk, str):
        chk = Hash(chk)

    # TODO: use smart dict

    if chk not in chunks:
        chunks[chk] = {cli}
    else:
        chunks[chk].add(cli)

    if cli not in users:
        users[cli] = {chk}
    else:
        users[cli].add(chk)

    return chk
Beispiel #56
0
def handleDraw(request):
    """
    获取能画出chart的数据
    """
    req_data = json.loads(request.POST.get('data', '{}'), 
                                object_pairs_hook=OrderedDict)

    rsu = checkExtentData(req_data)
    if not rsu[0]:
        return MyHttpJsonResponse({'succ': False, 'msg': rsu[1]})

    hk = request.session.get('hk')
    try:
        producer = DrawDataProducer(hk)
        result = producer.produce(req_data)
    except Exception, e:
        logger.debug("catch Exception: %s" % e)
        logExcInfo()
        error_dict = {'succ': False, 'msg': str(e)}
        return MyHttpJsonResponse(error_dict)
Beispiel #57
0
    def get(self, key):
        '''
        Retrieve an element from the cache using its key. If it is not found,
        use the retrieval_method to find it
        '''
        try:
            results, node = self.elements[key]
        except KeyError:
            logger.debug('Cache miss for key "{0}"'.format(key))
            results = yield self.retrieval_method(key)

            # Check that another request hasn't added the key already
            try:
                results, node = self.elements[key]
            except KeyError:
                self._add_to_cache(key, results)
                raise Return(results)

        logger.debug('Cache hit for key "{0}"'.format(key))
        self._reorder_lru(node)
        raise Return(results)
Beispiel #58
0
def make_query(name, params={}, default_conf={}):
    session = Session()
    Schema = make_table_model("system_chartschema")
    schema = session.query(Schema).filter(Schema.name == name).one()
    schema = yaml.load(schema.schema)

    XModel, xfield = get_field(schema[name]["X"])
    y = schema[name]["Y"]
    time_interval = schema[name]["interval"] if schema[name].has_key("interval") else 12

    field_query = []
    filter_args = []
    group_by = []

    for table_field, options in y.items():
        if options is None:
            options = {}
        Model, field = get_field(table_field)
        filter_args = get_filter_args(Model, params, time_interval)
        if options and options.get("func"):
            function = getattr(func, options["func"])
            field = function(field)

        field_query.append(field)
    field_query.append(xfield)
    query = session.query(*field_query)
    filter_function = getattr(query, "filter")

    query = filter_function(*filter_args)
    if schema[name].get("group"):
        fields = get_group_fields(schema[name]["group"])
        query = query.group_by(*fields)
    query = query.order_by(asc("datetime"))
    logger.debug(str(query))
    option = schema[name].get("option", {})
    option.update(default_conf)
    result = _parse_query(query, field_query, option)
    session.close()
    return json.dumps(result)
Beispiel #59
0
def loop(protocol_factory, ip, port):

    loop = asyncio.get_event_loop()

    # Each client connection will create a new protocol instance
    coro = loop.create_server(protocol_factory, ip, port)
    try:
        server = loop.run_until_complete(coro)

        # Serve requests until Ctrl+C is pressed
        logger.info('Serving on {}'.format(server.sockets[0].getsockname()))
        loop.run_forever()
    except Exception:
        logger.debug("something happened")

    try:
        # Close the server
        server.close()
        loop.run_until_complete(server.wait_closed())
        loop.close()

    except KeyboardInterrupt:
        logger.warn('keyboard interrupt')
        exit(1)
Beispiel #60
0
    def post(self,tablename):
        logger.info(str(self.request.arguments))
        cli = self.redis_cli

        apidata = self.get_arguments('apidata')
        if apidata:
            logger.debug(str(apidata))
            data_list = json.loads(apidata[0])
            for data in data_list:
                cli.rpush('ag:post',{'tablename':tablename.encode('utf-8'),'data':data})
        else:
            data = { k:v[0] for k,v in self.request.arguments.items()}
            logger.debug('redis cli start rpush %s'%time.time())
            cli.rpush(CACHE_NAME,{'tablename':tablename.encode('utf-8'),'data':data})
            logger.debug('redis cli end rpush %s'%time.time())
        self.write({'status':'OK'})