Exemplo n.º 1
0
    def __init__(self, configfile, spool=SoundPool()):

        # load configuration file
        with open(configfile, "r") as f:
            data = f.read()
        root = XmlEt.fromstring(data).find("Ambient")

        # set the name of the ambient
        self.name = root.get("name")

        LOGGER.logInfo("Ambient '{}'".format(self.name))

        # set the update rate from the volatility
        self.urate = 1.0 / float(root.get("volatility"))
        self.urate = constrain(self.urate, 0.0, 5.0)

        # flag indicating whether ambient is currently running
        self.loaded = False
        self.running = False

        # load sounds and sound configuration
        self.sounds = list()
        self.spool = spool
        for soundcfg in root.findall("Sound"):
            sfile = soundcfg.get("file")
            base = float(soundcfg.get("base"))
            drift = float(soundcfg.get("drift"))

            self.sounds.append((sfile, base, drift))

            LOGGER.logInfo("'{}': [{}] +/- ({})".format(sfile, base, drift))
Exemplo n.º 2
0
    def __init__(self, vocab_file, max_size):
        self._word_to_id = {}
        self._id_to_word = {}
        self._count = 0

        counter = 0
        with open(vocab_file, 'rb') as vocab_f:
            for line in vocab_f:
                counter += 1
                if counter % 1000 == 0:
                    LOGGER.debug("processing line %d", counter)

                pieces = line.split()
                if pieces[0] in self._word_to_id:
                    raise ValueError('duplicated word: %s' % pieces[0])

                if pieces[0] and pieces[0].strip():
                    pieces[0] = pieces[0].strip()

                    self._word_to_id[pieces[0]] = self._count
                    self._id_to_word[self._count] = pieces[0]

                    self._count += 1
                else:
                    sys.stderr.write('bad line: %s\n' % line)

                if self._count > max_size:
                    raise ValueError('too many words: >%d' % max_size)

        assert self.check_vocab(PAD_TOKEN) > 0
        assert self.check_vocab(UNKNOWN_TOKEN) >= 0
        assert self.check_vocab(SENTENCE_START) > 0
        assert self.check_vocab(SENTENCE_END) > 0
Exemplo n.º 3
0
    def __init__(self, filename):
        if not os.path.isfile(filename):
            raise IOError("No such file: " + filename)
        # TODO check if file is actual sound file

        # internal state
        self.loaded = False

        # attributes
        self.volume = 1.0
        self.length = None

        # commands
        self.cstop = False
        self.cstart = False
        self.cvolume = False

        # internal sound object
        self.sobj = None

        # init lock around object
        self.lock = Lock()

        # remember own filename
        self.filename = filename

        LOGGER.logDebug("loading: " + filename)

        # load the sound
        Thread(target=self.__load).start()
Exemplo n.º 4
0
def convert_text_to_binary():
    """convert text data to binary

    input data format:
    each line looks like:
    article=<d> <p> <s> word1 word2 ... </s> <s> ... </s> </p> ... </d>\tabstract=<d> <p> <s> ... </s> </p> ... </d>
    """
    text_data_path = FLAGS.in_file
    binary_data_path = FLAGS.out_file

    assert text_data_path and binary_data_path, 'filename of text data or binary data should be provided'

    if not gfile.Exists(binary_data_path):
        LOGGER.debug('convert text to binary format: %s => %s', text_data_path, binary_data_path)

        reader = open(text_data_path, mode='rb')
        writer = open(binary_data_path, mode='wb')

        for line in reader:
            tf_example = example_pb2.Example()
            for feature in line.strip().split(FLAGS.feature_separator):
                (k, v) = feature.split('=')
                tf_example.features.feature[k].bytes_list.value.extend([v])

            tf_example_str = tf_example.SerializeToString()
            str_len = len(tf_example_str)
            writer.write(struct.pack('q', str_len))
            writer.write(struct.pack('%ds' % str_len, tf_example_str))

        writer.close()
        reader.close()
    else:
        LOGGER.error('binary data exist: %s', binary_data_path)
Exemplo n.º 5
0
async def reply(client, message):
    try:
        inline = await client.get_inline_bot_results(Config.BOT_USERNAME,
                                                     "ETHO_ORUTHAN_PM_VANNU")
        m = await client.send_inline_bot_result(message.chat.id,
                                                query_id=inline.query_id,
                                                result_id=inline.results[0].id,
                                                hide_via=True)
        old = Config.msg.get(message.chat.id)
        if old:
            await client.delete_messages(message.chat.id,
                                         [old["msg"], old["s"]])
        Config.msg[message.chat.id] = {
            "msg": m.updates[1].message.id,
            "s": message.message_id
        }
    except BotInlineDisabled:
        LOGGER.error(
            f"Error: Inline Mode for @{Config.BOT_USERNAME} is not enabled. Enable from @Botfather to enable PM Permit."
        )
        await message.reply(
            f"{Config.REPLY_MESSAGE}\n\n<b>You can't use this bot in your group, for that you have to make your own bot from the [SOURCE CODE](https://github.com/subinps/VCPlayerBot) below.</b>",
            disable_web_page_preview=True)
    except Exception as e:
        LOGGER.error(e, exc_info=True)
        pass
Exemplo n.º 6
0
 def _http_query(self, query, timeout=None):
     """
     Query Transmission through HTTP.
     """
     headers = {'x-transmission-session-id': str(self.session_id)}
     result = {}
     request_count = 0
     if timeout is None:
         timeout = self._query_timeout
     while True:
         LOGGER.debug(json.dumps({'url': self.url, 'headers': headers, 'query': query, 'timeout': timeout}, indent=2))
         try:
             result = self.http_handler.request(self.url, query, headers, timeout)
             break
         except HTTPHandlerError as error:
             if error.code == 409:
                 LOGGER.info('Server responded with 409, trying to set session-id.')
                 if request_count > 1:
                     raise TransmissionError('Session ID negotiation failed.', error)
                 session_id = None
                 for key in list(error.headers.keys()):
                     if key.lower() == 'x-transmission-session-id':
                         session_id = error.headers[key]
                         self.session_id = session_id
                         headers = {'x-transmission-session-id': str(self.session_id)}
                 if session_id is None:
                     debug_httperror(error)
                     raise TransmissionError('Unknown conflict.', error)
             else:
                 debug_httperror(error)
                 raise TransmissionError('Request failed.', error)
         request_count += 1
     return result
Exemplo n.º 7
0
 def _rpc_version_warning(self, version):
     """
     Add a warning to the log if the Transmission RPC version is lower then the provided version.
     """
     if self.rpc_version < version:
         LOGGER.warning('Using feature not supported by server. RPC version for server %d, feature introduced in %d.'
                        % (self.rpc_version, version))
Exemplo n.º 8
0
async def service_msg(client, message):
    if message.service == 'voice_chat_started':
        Config.IS_ACTIVE = True
        k = scheduler.get_job(str(Config.CHAT),
                              jobstore=None)  #scheduled records
        if k:
            await start_record_stream()
            LOGGER.info("Resuming recording..")
        elif Config.WAS_RECORDING:
            LOGGER.info(
                "Previous recording was ended unexpectedly, Now resuming recordings."
            )
            await start_record_stream()  #for unscheduled
        a = await client.send(
            GetFullChannel(channel=(await client.resolve_peer(Config.CHAT))))
        if a.full_chat.call is not None:
            Config.CURRENT_CALL = a.full_chat.call.id
        LOGGER.info("Voice chat started.")
        await sync_to_db()
    elif message.service == 'voice_chat_scheduled':
        LOGGER.info("VoiceChat Scheduled")
        Config.IS_ACTIVE = False
        Config.HAS_SCHEDULE = True
        await sync_to_db()
    elif message.service == 'voice_chat_ended':
        Config.IS_ACTIVE = False
        LOGGER.info("Voicechat ended")
        Config.CURRENT_CALL = None
        if Config.IS_RECORDING:
            Config.WAS_RECORDING = True
            await stop_recording()
        await sync_to_db()
    else:
        pass
Exemplo n.º 9
0
 def __init__(self,  dim1, dim2, initializer, expand=True):
     """The initializer can be an array, an object with [][]
     accessor, a file path (string), a single floating point number
     within [0,1] (the array is uniformly initialized to the same
     value), or a user-provided callable that takes two integers x
     and y in [0, dim1[ and [0, dim2[ respectively, and returns the
     value to be stored in the array at [x][y]. The optional
     parameter expand affects the case where the initializer is a
     callable, an object with __getitem__, or a single number. In
     those case, setting expand to False prevents the
     precomputation of the whole array, and the InputSample
     accessor encapsulate the function call, the object accessor,
     or always returns the given number. If expand is True, the
     InputSample created is mutable. If expand is False, the
     InputSample is immutable."""
     self._array = []
     self._getitem = lambda k: self._array[k]
     self._setitem = self._assign_to_array
     if isinstance(initializer, basestring):
         try:
             self._array = read_input_data(initializer, dim1, dim2)
         except IOError as e:
             LOGGER.error("Could not read file %s.", initializer)
             raise e
     elif isinstance(initializer, types.FileType):
         raise TypeError("Pass a string with the filepath to the " 
                         "InputSample initializer, instead of a "
                         "file descriptor.")
     elif isinstance(initializer, list): 
         self._array = initializer
     elif hasattr(initializer, '__getitem__'):
         if expand:
             for x in xrange(dim1):
                 self._array.append([])
                 for y in xrange(dim2):
                     self._array[x].append(initializer[x][y])
         else:
             self._array = initializer
             self._setitem = self._raise_immutable
     elif hasattr(initializer, '__call__'): 
         # to restrict to functions:
         # isinstance(initializer, 
         #            (types.FunctionType, types.BuiltinFunctionType))
         if expand:
             for x in xrange(dim1):
                 self._array.append([])
                 for y in xrange(dim2):
                     self._array[x].append(initializer(x,y))
         else:
             class InitCont(object):
                 def __init__(self, x):
                     self._x = x
                 def __getitem__(self, y): 
                     return initializer(self._x, y)
             self._getitem = lambda x: InitCont(x)
             self._setitem = self._raise_immutable
     self._dim1 = dim1
     self._dim2 = dim2
     if expand:
         verify_input_array(self._array, dim1, dim2)
Exemplo n.º 10
0
 def _rpc_version_warning(self, version):
     """
     Add a warning to the log if the Transmission RPC version is lower then the provided version.
     """
     if self.rpc_version < version:
         LOGGER.warning('Using feature not supported by server. RPC version for server %d, feature introduced in %d.'
             % (self.rpc_version, version))
Exemplo n.º 11
0
    def get_reference(self, avro_type):
        if not avro_type in self.types.keys():
            msg = 'No schema for type %s' % (avro_type)
            LOGGER.error(msg)
            raise KeyError(msg)

        return self.types.get(avro_type).get_reference()
Exemplo n.º 12
0
    def _watch_threads(self):
        while True:
            time.sleep(60)

            input_threads = []
            for t in self._input_threads:
                if t.is_alive():
                    input_threads.append(t)
                else:
                    LOGGER.error('found input thread dead')
                    new_t = Thread(target=self._fill_input_queue)
                    input_threads.append(new_t)
                    input_threads[-1].daemon = True
                    input_threads[-1].start()

            self._input_threads = input_threads

            bucketing_threads = []
            for t in self._bucketing_threads:
                if t.is_alive():
                    bucketing_threads.append(t)
                else:
                    LOGGER.error('found bucketing thread dead')
                    new_t = Thread(target=self._fill_bucket_input_queue)
                    bucketing_threads.append(new_t)
                    bucketing_threads[-1].daemon = True
                    bucketing_threads[-1].start()

            self._bucketing_threads = bucketing_threads
Exemplo n.º 13
0
def json_pack(snippets_dir, video_name, frame_width, frame_height, label='unknown', label_index=-1):
    sequence_info = []
    p = Path(snippets_dir)
    LOGGER.info(path)
    for path in p.glob(video_name+'*.json'):
        json_path = str(path)
        LOGGER.info(path)
        frame_id = int(path.stem.split('_')[-2])
        frame_data = {'frame_index': frame_id}
        data = json.load(open(json_path))
        skeletons = []
        for person in data['people']:
            score, coordinates = [], []
            skeleton = {}
            keypoints = person['pose_keypoints_2d']
            for i in range(0, len(keypoints), 3):
                coordinates += [keypoints[i]/frame_width, keypoints[i + 1]/frame_height]
                score += [keypoints[i + 2]]
            skeleton['pose'] = coordinates
            skeleton['score'] = score
            skeletons += [skeleton]
        frame_data['skeleton'] = skeletons
        sequence_info += [frame_data]

    video_info = dict()
    video_info['data'] = sequence_info
    video_info['label'] = label
    video_info['label_index'] = label_index

    return video_info
Exemplo n.º 14
0
def main(unused_argv):
    LOGGER.info('load vocab')
    vocab = data.Vocab(FLAGS.vocab_path, FLAGS.vocab_max_size)

    batch_size = FLAGS.batch_size
    if FLAGS.mode == 'decode':
        batch_size = FLAGS.beam_size

    hyper_params = HYPER_PARAMS(
        mode=FLAGS.mode,
        batch_size=batch_size,
        num_hidden=FLAGS.num_hidden,
        emb_dim=FLAGS.emb_dim,
        enc_layers=FLAGS.enc_layers,
        enc_timesteps=FLAGS.enc_timesteps,
        dec_timesteps=FLAGS.dec_timesteps,
        max_grad_norm=FLAGS.max_grad_norm,
        num_softmax_samples=FLAGS.num_softmax_samples,
        min_input_len=FLAGS.min_input_len,
        min_lr=FLAGS.min_lr,
        lr=FLAGS.lr
    )

    batch_reader = BatchReader(
        data_path=FLAGS.data_path,
        vocab=vocab,
        hyper_params=hyper_params,
        article_key=FLAGS.article_key,
        abstract_key=FLAGS.abstract_key,
        max_article_sentences=FLAGS.max_article_sentences,
        max_abstract_sentences=FLAGS.max_abstract_sentences,
        bucketing=FLAGS.use_bucketing,
        truncate_input=FLAGS.truncate_input
    )

    tf.set_random_seed(FLAGS.random_seed)

    config = tf.ConfigProto(
        gpu_options={"allow_growth": True},  # 按需增长
        device_count={"GPU": 2},  # limit to 2 GPU usage
        allow_soft_placement=True,
        inter_op_parallelism_threads=1,  # Nodes that perform blocking operations are enqueued on a pool
        intra_op_parallelism_threads=2  # The execution of an individual op (for some op types)
    )

    if FLAGS.mode == 'train':
        model = Seq2SeqAttentionModel(hyper_params, vocab, num_gpus=FLAGS.num_gpus)
        _train(model, config, batch_reader)
    elif FLAGS.mode == 'eval':
        model = Seq2SeqAttentionModel(hyper_params, vocab, num_gpus=FLAGS.num_gpus)
        _eval(model, config, batch_reader, vocab)
    elif FLAGS.mode == 'decode':
        decode_mdl_hps = hyper_params
        decode_mdl_hps.dec_timesteps = 1

        model = Seq2SeqAttentionModel(decode_mdl_hps, vocab, num_gpus=FLAGS.nu_gpus)
        decoder = BeamSearchDecoder(model, batch_reader, hyper_params, vocab)
        decoder.decode_loop()
    else:
        LOGGER.error('not supported mode: %s', hyper_params.mode)
Exemplo n.º 15
0
 def _http_query(self, query, timeout=None):
     """
     Query Transmission through HTTP.
     """
     headers = {'x-transmission-session-id': str(self.session_id)}
     result = {}
     request_count = 0
     if timeout is None:
         timeout = self._query_timeout
     while True:
         LOGGER.debug(
             json.dumps({'url': self.url, 'headers': headers, 'query': query, 'timeout': timeout}, indent=2))
         try:
             result = self.http_handler.request(self.url, query, headers, timeout)
             break
         except HTTPHandlerError as error:
             if error.code == 409:
                 LOGGER.info('Server responded with 409, trying to set session-id.')
                 if request_count > 1:
                     raise TransmissionError('piSession ID negotiation failed.', error)
                 session_id = None
                 for key in list(error.headers.keys()):
                     if key.lower() == 'x-transmission-session-id':
                         session_id = error.headers[key]
                         self.session_id = session_id
                         headers = {'x-transmission-session-id': str(self.session_id)}
                 if session_id is None:
                     debug_httperror(error)
                     raise TransmissionError('Unknown conflict.', error)
             else:
                 debug_httperror(error)
                 raise TransmissionError('Request failed.', error)
         request_count += 1
     return result
 def handle_read(self):
     """Handle read message."""
     try:
         buff = self.recv(BUFFER_SIZE)
         self.append_recv_buffer += buff
         self.process_molo_tcp_pack()
     except Exception as e:
         LOGGER.info("recv error: %s", e)
 def on_token_expired(self, jdata):
     """Handle on_token_expired json packet."""
     LOGGER.debug('on_token_expired %s', str(jdata))
     if 'Payload' not in jdata:
         return
     data = jdata['Payload']
     self.client_token = data['token']
     self.update_notify_state(data)
Exemplo n.º 18
0
 def handle_accept(self):
     pair = self.accept()
     if pair is not None:
         sock, addr = pair
         LOGGER.info('incoming connection from %s', repr(addr))
         handler = LocalConnection(sock)
         handler.server = self
         self.conn_list.append(handler)
Exemplo n.º 19
0
 def handle_close(self):
     """When closed, this method will be call. clean itself."""
     self.clear()
     LOGGER.debug("local session closed(%d)", id(self))
     MOLO_CLIENT_APP.remote_session_dict.pop(id(self), None)
     remote_session = MOLO_CLIENT_APP.remote_session_dict.get(id(self))
     if remote_session:
         remote_session.handle_close()
     self.close()
 def on_req_proxy(self, jdata):
     """Handle on_req_proxy json packet."""
     LOGGER.debug("on_req_proxy, %s, %s, %s, %s", self.host, self.port,
                  self.tunnel['lhost'], self.tunnel['lport'])
     remotesession = RemoteSession(self.client_id, self.host, self.port,
                                   self.tunnel['lhost'],
                                   self.tunnel['lport'],
                                   MOLO_CLIENT_APP.async_map)
     remotesession.sock_connect()
 def handle_close(self):
     """When closed, this method will be call. clean itself."""
     LOGGER.debug("server closed(%d)", id(self))
     self.clear()
     MOLO_CLIENT_APP.local_session_dict.pop(id(self), None)
     local_session = MOLO_CLIENT_APP.local_session_dict.get(id(self))
     if local_session:
         local_session.handle_close()
     self.close()
Exemplo n.º 22
0
    def __update(self):
        if not self.running:
            return
        LOGGER.logDebug("'{}' update".format(self.name))

        for sound in self.sounds:
            sound.adaptVolume()

        Timer(self.urate, self.__update).start()
Exemplo n.º 23
0
    def switch(self, ambient_id):
        if self.ambient != None:
            self.ambient.stop()

        # switch to new ambient
        self.ambient = self.ambients[ambient_id]

        LOGGER.logInfo("Switched to ambient '{}'".format(
            self.ambient.getName()))
 def handle_connect(self):
     """When connected, this method will be call."""
     LOGGER.debug("server connected")
     self.append_connect = False
     domain = MOLO_CONFIGS.get_config_object().get('domain', '')
     self.send_dict_pack(
         MoloSocketHelper.molo_auth(CLIENT_VERSION,
                                    MOLO_CLIENT_APP.hass_context, '1.0.0',
                                    domain), )
    def on_auth_resp(self, jdata):
        """Handle on_auth_resp json packet."""
        LOGGER.debug('on_auth_resp %s', str(jdata))
        self.client_id = jdata['Payload']['ClientId']

        self.send_dict_pack(
            MoloSocketHelper.req_tunnel(self.tunnel['protocol'],
                                        self.tunnel['hostname'],
                                        self.tunnel['subdomain'],
                                        self.tunnel['rport'], self.client_id))
 def on_bind_status(self, jdata):
     """Handle on_bind_status json packet."""
     LOGGER.debug("on_bind_status %s", str(jdata))
     jpayload = jdata['Payload']
     self.client_status = jpayload['Status']
     jpayload['token'] = self.client_token
     if self.client_status == CLIENT_STATUS_BINDED:
         self.update_notify_state(jpayload, STAGE_AUTH_BINDED)
     elif self.client_status == CLIENT_STATUS_UNBINDED:
         self.update_notify_state(jpayload, STAGE_SERVER_CONNECTED)
Exemplo n.º 27
0
def rectilinear_shape(population):
    try:
        pos = population.positions
    except Exception, e:
        LOGGER.warning(("Could not retrieve units positions for population "
                         "%s; assuming square shape."), population.label)
        if not is_square(population.size):
            raise TypeError(("The shape population %s is not square and could "
                              "neither be retreived nor guessed."), population.label)
        dim1 = dim2 = int(math.sqrt(population.size))
    def handle_close(self):
        """When closed, this method will be call. clean itself."""
        LOGGER.debug("server closed")
        self.clear()
        data = {}
        self.update_notify_state(data, STAGE_SERVER_UNCONNECTED)
        self.close()

        # close all and restart
        asyncore.close_all()
 def on_start_proxy(self, jdata):
     """Handle Start Proxy."""
     LOGGER.debug("on_start_proxy %s", str(jdata))
     localsession = LocalSession(self.lhost, self.lport,
                                 MOLO_CLIENT_APP.async_map)
     MOLO_CLIENT_APP.local_session_dict[id(self)] = localsession
     MOLO_CLIENT_APP.remote_session_dict[id(localsession)] = self
     LOGGER.debug("remote local (%d)<->(%d)", id(self), id(localsession))
     localsession.sock_connect()
     self.tranparency = True
     self.process_tranparency_pack()
Exemplo n.º 30
0
    def stop(self):
        if not self.loaded:
            return

        LOGGER.logInfo("'{}' stop".format(self.name))

        for sound in self.sounds:
            sound.stop()

        # indicate stop
        self.running = False
Exemplo n.º 31
0
    def _gen_text(self, example):
        while True:
            ex = six.next(example)
            try:
                article_text = self._get_example_feature_text(ex, self._article_key)
                abstract_text = self._get_example_feature_text(ex, self._abstract_key)
            except ValueError:
                LOGGER.error('failed to get article or abstract from example')
                continue

            yield (article_text, abstract_text)
Exemplo n.º 32
0
def main(unused_argv):
    assert FLAGS.command

    if FLAGS.command == 'build_vocab':
        build_vocab()
    elif FLAGS.command == 'build_binary':
        convert_text_to_binary()
    elif FLAGS.command == 'build_text':
        convert_binary_to_text()
    else:
        LOGGER.error('not support command: %s', FLAGS.command)
Exemplo n.º 33
0
def play():
    global AMB_ID
    if request.method == 'POST':
        try:
            AMB_ID = request.form['id']
            AMBC.switch(AMB_ID)
            AMBC.get().start()
        except Exception as e:
            AMB_ID = -1
            LOGGER.logError(e)
    return redirect(url_for('.index'))
 def process_molo_tcp_pack(self):
     """Handle received TCP packet."""
     ret = True
     while ret:
         ret = self.molo_tcp_pack.recv_buffer(self.append_recv_buffer)
         if ret and self.molo_tcp_pack.error_code == MoloTcpPack.ERR_OK:
             self.process_json_pack(self.molo_tcp_pack.body_jdata)
         self.append_recv_buffer = self.molo_tcp_pack.tmp_buffer
     if self.molo_tcp_pack.error_code == MoloTcpPack.ERR_MALFORMED:
         LOGGER.error("tcp pack malformed!")
         self.handle_close()
 def process_tranparency_pack(self):
     """Handle transparency packet."""
     localsession = MOLO_CLIENT_APP.local_session_dict.get(id(self))
     if not localsession:
         LOGGER.debug(
             "process_tranparency_pack() localsession session not found")
         self.handle_close()
         return
     if self.append_recv_buffer:
         localsession.send_raw_pack(self.append_recv_buffer)
         self.append_recv_buffer = bytes()
 def process_new_tunnel(self, jdata):
     """Handle new tunnel."""
     jpayload = jdata['Payload']
     self.client_id = jpayload['clientid']
     self.client_token = jpayload['token']
     LOGGER.debug("Get client id:%s token:%s", self.client_id,
                  self.client_token)
     data = {}
     data['clientid'] = self.client_id
     data['token'] = self.client_token
     self.update_notify_state(data, STAGE_SERVER_CONNECTED)
Exemplo n.º 37
0
 def handle_read(self):
     """Handle read message."""
     buff = self.recv(BUFFER_SIZE)
     if not buff:
         return
     remotesession = MOLO_CLIENT_APP.remote_session_dict.get(id(self))
     if not remotesession:
         LOGGER.error("LocalSession handle_read remove session not found")
         self.handle_close()
         return
     LOGGER.debug("local session handle_read %s", buff)
     remotesession.send_raw_pack(buff)
Exemplo n.º 38
0
async def stream(client, m: Message):
    with suppress(MessageIdInvalid, MessageNotModified):
        msg = await m.reply("Checking the recived input.")
        if m.reply_to_message and m.reply_to_message.text:
            link = m.reply_to_message.text
        elif " " in m.text:
            text = m.text.split(" ", 1)
            link = text[1]
        else:
            k = await msg.edit("Provide a link to stream!")
            await delete_messages([m, k])
            return
        regex = r"^(?:https?:\/\/)?(?:www\.)?youtu\.?be(?:\.com)?\/?.*(?:watch|embed)?(?:.*v=|v\/|\/)([\w\-_]+)\&?"
        match = re.match(regex, link)
        if match:
            stream_link = await get_link(link)
            if not stream_link:
                k = await msg.edit("This is an invalid link.")
                await delete_messages([m, k])
                return
        else:
            stream_link = link
        try:
            is_audio_ = await is_audio(stream_link)
        except:
            is_audio_ = False
            LOGGER.error("Unable to get Audio properties within time.")
        if not is_audio_:
            k = await msg.edit(
                "This is an invalid link, provide me a direct link or a youtube link."
            )
            await delete_messages([m, k])
            return
        try:
            dur = await get_duration(stream_link)
        except:
            dur = 0
        if dur != 0:
            k = await msg.edit("This is not a live stream, Use /play command.")
            await delete_messages([m, k])
            return
        k, msg_ = await stream_from_link(stream_link)
        if k == False:
            k = await msg.edit(msg_)
            await delete_messages([m, k])
            return
        if Config.msg.get('player'):
            await Config.msg['player'].delete()
        Config.msg['player'] = await msg.edit(
            f"[Streaming]({stream_link}) Started. ㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤㅤ",
            disable_web_page_preview=True,
            reply_markup=await get_buttons())
        await delete_messages([m])
Exemplo n.º 39
0
def handle_error(e):
    """
    A catch-all error handler that catches any unhandled exception and returns
    an error message to the end user.
    """
    if isinstance(e, AuthError) or isinstance(e, InvalidUsage):
        response = jsonify(e.to_dict())
        response.status_code = e.code
    else:
        LOGGER.error('Internal Error: {}'.format(traceback.format_exc()))
        response = jsonify(error_response('Internal Error',
                                          'internal_error', 500))
        response.status_code = 500
    return response
Exemplo n.º 40
0
 def get(self, _id):
     """
     Return a single Task given an id.
     :param _id: the id of the Task to return.
     :return: a Task
     """
     try:
         response = self.service.find_by_id(_id)
         respond_success(self, response, 200)
     except MediaProError as error:
         respond_error(self, error)
     except Exception as e:
         LOGGER.error(e, exc_info=True)
         error = MediaProError(MediaProError.GENERIC)
         respond_error(self, error)
Exemplo n.º 41
0
def _load_articles(fname):
    articles = []
    with open(fname, 'rb') as fp:
        counter = 0
        for line in fp:
            if counter % 1000 == 0:
                LOGGER.debug('loading article: %d' % counter)

            counter += 1

            line = line.strip().split('\001')
            doc_id = line[0]
            doc_url = line[1]
            sen_id = line[3]
            sen = line[4]
            words = [w.strip() for w in line[5].split("(")[-1].split(")")[0].split(",")]

            articles.append([doc_id, doc_url, sen_id, sen, words])

    articles = sorted(articles, key=lambda a: a[0])

    # articles:
    # {
    #   doc_id: {
    #       url: ''.
    #       sen: {
    #           sen_id: sen
    #       },
    #       words: [
    #           (sen_id, words)
    #       ]
    #   }
    # }

    grouped_articles = {}
    for (doc_id, doc_url), group in groupby(articles, lambda a: (a[0], a[1])):
        sen = {g[2]: (g[3], g[4]) for g in group}
        words = [(sen_id, _words) for sen_id, (_, _words) in sen.items()]

        cur_article = {
            'url': doc_url,
            'sen': sen,
            'words': words
        }

        grouped_articles[doc_id] = cur_article

    return grouped_articles
Exemplo n.º 42
0
 def delete(self, _id, key):
     """
     Deletes a certain first level key from the payload if it exists.
     :param _id: the id of the Task to delete its payload
     :param key: the key to delete
     :return: the modified Task
     """
     try:
         task = self.service.remove_payload_key_by_task_id(_id, key)
         respond_success(self, task)
     except MediaProError as error:
         respond_error(self, error)
     except Exception as e:
         LOGGER.error(e, exc_info=True)
         error = MediaProError(MediaProError.GENERIC)
         respond_error(self, error)
Exemplo n.º 43
0
 def post(self):
     """
     Creates a new Task.
     :return:  the Task created.
     """
     try:
         task = JSONDecoder().decode(self.request.body)
         task_id = self.service.insert(task).inserted_id
         task['_id'] = task_id
         respond_success(self, task)
     except MediaProError as error:
         respond_error(self, error)
     except Exception as e:
         LOGGER.error(e, exc_info=True)
         error = MediaProError(MediaProError.GENERIC)
         respond_error(self, error)
Exemplo n.º 44
0
 def patch(self, _id):
     """
     Updates a Task given one or more fields.
     :param _id: the id of the Taks to update.
     :return: the Task updated.
     """
     try:
         task = JSONDecoder().decode(self.request.body)
         self.service.update_by_id(_id, task)
         respond_success(self, task)
     except MediaProError as error:
         respond_error(self, error)
     except Exception as e:
         LOGGER.error(e, exc_info=True)
         error = MediaProError(MediaProError.GENERIC)
         respond_error(self, error)
Exemplo n.º 45
0
 def post(self, _id):
     """
     Adds or replace payload first level key, and returns the modified task
     :param _id: the id of the Task to update its payload.
     :return: the modified Task
     """
     try:
         payload = JSONDecoder().decode(self.request.body)
         task = self.service.update_payload_by_task_id(_id, payload)
         respond_success(self, task)
     except MediaProError as error:
         respond_error(self, error)
     except Exception as e:
         LOGGER.error(e, exc_info=True)
         error = MediaProError(MediaProError.GENERIC)
         respond_error(self, error)
Exemplo n.º 46
0
    def sim_semantic_jaccard(self, s1, s2, alpha=0.6):
        assert isinstance(s1, list) and isinstance(s2, list), 'list is required for sentences'
        assert self._word2vec, 'word vec is required'

        n1 = len(s1)
        n2 = len(s2)

        if n1 < n2:
            s2, s1 = s1, s2
            n2, n1 = n1, n2

        LOGGER.debug('build word similar matrix')
        m = np.zeros((n1, n2))
        for i in xrange(n1):
            for j in xrange(n2):
                vs1i = np.array(self._word2vec.get(s1[i], self._unknown_vec))
                vs2j = np.array(self._word2vec.get(s2[j], self._unknown_vec))

                m[i, j] = self.cosine(vs1i, vs2j)

        LOGGER.debug('calculate similarity')
        numerator = 0.0
        while True and m.size > 0:
            max_m_i, max_m_j = np.unravel_index(m.argmax(), m.shape)
            max_m = m[max_m_i, max_m_j]

            if max_m < alpha:
                break

            numerator += max_m
            n_row, n_col = m.shape
            row = np.reshape(range(0, max_m_i) + range(max_m_i + 1, n_row), (-1, 1))
            col = range(0, max_m_j) + range(max_m_j + 1, n_col)

            if len(row) > 0 and len(col) > 0:
                m = m[row, col]
            else:
                m = np.array([[]])

        beta = m.size
        m_diff = (1 - m).sum()

        denominator = numerator + beta * m_diff
        if denominator > 0:
            return numerator / denominator
        else:
            return 1e-6
Exemplo n.º 47
0
def population_adpater_provider(pop_prov_dict,
                                provided_class,
                                population):
    """Factory function providing an adapter of the specified class
    for the population parameter. pop_prov_dict is a dictionary taking
    a (population, provided_class) tuple as key, and returning an
    instance of provided_class initialized with 3 arguments: the
    population, its size in the first dimension, and its size in the
    second dimension."""
    key = (population, provided_class)
    if pop_prov_dict.has_key(key):
        return pop_prov_dict[key]
    else:
        LOGGER.warning("No %s for population %s, creating one.",
                       provided_class.__name__, population.label)
        dim1, dim2 = rectilinear_shape(population)
        inst = provided_class(population, dim1, dim2)
    return pop_prov_dict.setdefault(key, inst)
Exemplo n.º 48
0
def _load_word2vec(fname, dim=256):
    length = dim + 1

    word2vec = {}

    with open(fname, 'rb') as fp:
        counter = 0
        for line in fp:
            if counter % 1000 == 0:
                LOGGER.debug('loading vector: %d' % counter)

            line = line.strip().split()
            if len(line) == length:
                word2vec[line[0]] = [float(v) for v in line[1:]]

            counter += 1

    return word2vec
Exemplo n.º 49
0
 def delete(self, _id):
     """
     Deletes a Task.
     :param _id: the id of the Task to delete.
     :return: a message if the deletion was successfully or not.
     """
     try:
         result = self.service.delete_by_id(_id)
         response = {
             'message': "The task with id: {0} has been deleted.".format(
                 _id)
         }
         if result.deleted_count == 0:
             response['message'] = "No task has been deleted. This could "\
                                   "due there is no task with such id or "\
                                   "the task has a RUNNING status."
         respond_success(self, response, 200)
     except Exception as e:
         LOGGER.error(e, exc_info=True)
         error = MediaProError(MediaProError.GENERIC)
         respond_error(self, error)
Exemplo n.º 50
0
def _eval(model, config, batch_reader, vocab=None):
    model.build_graph()

    saver = tf.train.Saver()
    summary_writer = tf.summary.FileWriter(FLAGS.eval_dir)
    sess = tf.Session(config=config)

    running_avg_loss = 0
    step = 0
    while True:
        time.sleep(FLAGS.eval_interval_secs)

        try:
            ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root)
        except tf.errors.OutOfRangeError as e:
            LOGGER.error('cannot restore checkpoint: %s', e)
            continue

        if not (ckpt_state and ckpt_state.model_checkpoint_path):
            LOGGER.info('no model to eval yet at %s', FLAGS.train_dir)
            continue

        LOGGER.info('loading checkpoint %s', ckpt_state.model_checkpoint_path)
        saver.restore(sess, ckpt_state.model_checkpoint_path)

        (
            article_batch, abstract_batch, targets, article_lens, abstract_lens, loss_weights, _, _
        ) = batch_reader.next_batch()

        (summaries, loss, train_step) = model.eval(
            sess, config, article_batch, abstract_batch, targets, article_lens, abstract_lens, loss_weights
        )

        LOGGER.info('article:  %s', ' '.join(data.convert_ids_to_words(article_batch[0][:].tolist(), vocab)))
        LOGGER.info('abstract: %s', ' '.join(data.convert_ids_to_words(abstract_batch[0][:].tolist(), vocab)))

        summary_writer.add_summary(summaries, train_step)
        running_avg_loss = _running_avg_loss(running_avg_loss, loss, summary_writer, train_step)
        if step % 100 == 0:
            summary_writer.flush()
Exemplo n.º 51
0
def debug_httperror(error):
    """
    Log the Transmission RPC HTTP error.
    """
    try:
        data = json.loads(error.data)
    except ValueError:
        data = error.data
    LOGGER.debug(
        json.dumps(
            {
                'response': {
                    'url': error.url,
                    'code': error.code,
                    'msg': error.message,
                    'headers': error.headers,
                    'data': data,
                }
            },
            indent=2
        )
    )
Exemplo n.º 52
0
 def get(self):
     """
     Returns all tasks using pagination and filtering.
     Example of usage:
         /api/tasks?to=1469930541448&status=SCHEDULED&worker=task_worker_001
     Valid filter params are:
         page: int
         rows: int
         worker: str
         status: str
         from: int (timestamp)
         to: int (timestamp)
     :return: all tasks using pagination.
     """
     try:
         response = [doc for doc in self.service.find(self)]
         respond_success(self, response)
     except MediaProError as error:
         respond_error(self, error)
     except Exception as e:
         LOGGER.error(e, exc_info=True)
         error = MediaProError(MediaProError.GENERIC)
         respond_error(self, error)
Exemplo n.º 53
0
def build_vocab():
    """build vocab from raw data in text format.

    input data format:
    each line looks like:
    article=<d> <p> <s> word1 word2 ... </s> <s> ... </s> </p> ... </d>\tabstract=<d> <p> <s> ... </s> </p> ... </d>
    """

    data_path = FLAGS.in_file
    vocab_path = FLAGS.out_file

    assert data_path and vocab_path, 'filename of data and vocabulary should be provided'

    if not gfile.Exists(vocab_path):
        LOGGER.debug('build vocabulary from %s, storing it into %s', data_path, vocab_path)

        vocab = {}
        counter = 0

        reader = codecs.open(data_path, mode='rb', encoding='utf-8')

        for line in reader:
            counter += 1
            if counter % 1000 == 0:
                LOGGER.debug("processing line %d", counter)

            for feature in line.strip().split(FLAGS.feature_separator):
                (k, v) = feature.split('=')
                word_freq = {k: len(list(g)) for k, g in groupby(sorted(v.split())) if k not in SPECIAL_TOKENS}
                for word, freq in word_freq.items():
                    vocab[word] = vocab.get(word, 0) + freq

        reader.close()

        vocab = sorted(vocab.iteritems(), key=lambda kv: kv[1], reverse=True)
        vocab = [(k, v) for k, v in SPECIAL_TOKENS_FREQ.items()] + vocab

        with gfile.GFile(vocab_path, mode='wb') as vocab_file:
            for word, freq in vocab:
                vocab_file.write(word + b'\t' + str(freq) + b'\n')
    else:
        LOGGER.error('vocabulary file exist: %s', vocab_path)
Exemplo n.º 54
0
def convert_binary_to_text():
    """convert binary data to text

    output data format:
    each line looks like:
    article=<d> <p> <s> word1 word2 ... </s> <s> ... </s> </p> ... </d>\tabstract=<d> <p> <s> ... </s> </p> ... </d>
    """

    binary_data_path = FLAGS.in_file
    text_data_path = FLAGS.out_file

    assert binary_data_path and text_data_path, 'filename of binary data or text data should be provided'

    if not gfile.Exists(text_data_path):
        LOGGER.debug('convert binary to text format: %s => %s', binary_data_path, text_data_path)

        reader = open(binary_data_path, mode='rb')
        writer = codecs.open(text_data_path, mode='wb', encoding='utf-8')

        while True:
            len_bytes = reader.read(8)

            if not len_bytes:
                LOGGER.debug('done reading')
                break

            str_len = struct.unpack('q', len_bytes)[0]
            tf_example_str = struct.unpack('%ds' % str_len, reader.read(str_len))[0]
            tf_example = example_pb2.Example.FromString(tf_example_str)
            examples = []
            for key in tf_example.features.feature:
                value = tf_example.features.feature[key].bytes_list.value[0]
                value = value.decode('utf-8')
                examples.append('%s=%s' % (key, value))

            writer.write('%s\n' % FLAGS.feature_separator.join(examples))

        writer.close()
        reader.close()
    else:
        LOGGER.error('text data exist: %s', text_data_path)
Exemplo n.º 55
0
 def __init__(self, address='localhost', port=DEFAULT_PORT, user=None, password=None, http_handler=None, timeout=None):
     if isinstance(timeout, (integer_types, float)):
         self._query_timeout = float(timeout)
     else:
         self._query_timeout = DEFAULT_TIMEOUT
     urlo = urlparse(address)
     if urlo.scheme == '':
         base_url = 'http://' + address + ':' + str(port)
         self.url = base_url + '/transmission/rpc'
     else:
         if urlo.port:
             self.url = urlo.scheme + '://' + urlo.hostname + ':' + str(urlo.port) + urlo.path
         else:
             self.url = urlo.scheme + '://' + urlo.hostname + urlo.path
         LOGGER.info('Using custom URL "' + self.url + '".')
         if urlo.username and urlo.password:
             user = urlo.username
             password = urlo.password
         elif urlo.username or urlo.password:
             LOGGER.warning('Either user or password missing, not using authentication.')
     if http_handler is None:
         self.http_handler = DefaultHTTPHandler()
     else:
         if hasattr(http_handler, 'set_authentication') and hasattr(http_handler, 'request'):
             self.http_handler = http_handler
         else:
             raise ValueError('Invalid HTTP handler.')
     if user and password:
         self.http_handler.set_authentication(self.url, user, password)
     elif user or password:
         LOGGER.warning('Either user or password missing, not using authentication.')
     self._sequence = 0
     self.session = None
     self.session_id = 0
     self.server_version = None
     self.protocol_version = None
     self.get_session()
     self.torrent_get_arguments = get_arguments('torrent-get'
                                                , self.rpc_version)
Exemplo n.º 56
0
    def _request(self, method, arguments=None, ids=None, require_ids=False, timeout=None):
        """
        Send json-rpc request to Transmission using http POST
        """
        if not isinstance(method, string_types):
            raise ValueError('request takes method as string')
        if arguments is None:
            arguments = {}
        if not isinstance(arguments, dict):
            raise ValueError('request takes arguments as dict')
        ids = parse_torrent_ids(ids)
        if len(ids) > 0:
            arguments['ids'] = ids
        elif require_ids:
            raise ValueError('request require ids')

        query = json.dumps({'tag': self._sequence, 'method': method
                            , 'arguments': arguments})
        self._sequence += 1
        start = time.time()
        http_data = self._http_query(query, timeout)
        elapsed = time.time() - start
        LOGGER.info('http request took %.3f s' % (elapsed))

        try:
            data = json.loads(http_data)
        except ValueError as error:
            LOGGER.error('Error: ' + str(error))
            LOGGER.error('Request: \"%s\"' % (query))
            LOGGER.error('HTTP data: \"%s\"' % (http_data))
            raise

        LOGGER.debug(json.dumps(data, indent=2))
        if 'result' in data:
            if data['result'] != 'success':
                raise TransmissionError('Query failed with result \"%s\".' % (data['result']))
        else:
            raise TransmissionError('Query failed without result.')

        results = {}
        if method == 'torrent-get':
            for item in data['arguments']['torrents']:
                results[item['id']] = Torrent(self, item)
                if self.protocol_version == 2 and 'peers' not in item:
                    self.protocol_version = 1
        elif method == 'torrent-add':
            item = None
            if 'torrent-added' in data['arguments']:
                item = data['arguments']['torrent-added']
            elif 'torrent-duplicate' in data['arguments']:
                item = data['arguments']['torrent-duplicate']
            if item:
                results[item['id']] = Torrent(self, item)
            else:
                raise TransmissionError('Invalid torrent-add response.')
        elif method == 'session-get':
            self._update_session(data['arguments'])
        elif method == 'session-stats':
            # older versions of T has the return data in "session-stats"
            if 'session-stats' in data['arguments']:
                self._update_session(data['arguments']['session-stats'])
            else:
                self._update_session(data['arguments'])
        elif method in ('port-test', 'blocklist-update', 'free-space', 'torrent-rename-path'):
            results = data['arguments']
        else:
            return None

        return results
Exemplo n.º 57
0
    def rank(self, sentences, theta=0.5):
        LOGGER.debug('build sentences similarity matrix')
        n_sentences = len(sentences)

        graph = np.zeros((n_sentences, n_sentences))  # adj-matrix

        for i in xrange(n_sentences):
            for j in xrange(i+1, n_sentences):
                weight = self.sim_word_embedding(sentences[i][1], sentences[j][1])

                if weight >= theta:
                    graph[i, j] = weight
                    graph[j, i] = weight
        nx_graph = nx.from_numpy_matrix(graph)

        D = nx_graph
        if not D.is_directed():
            D = D.to_directed()

        W = self._right_stochastic_graph(D)
        N = W.number_of_nodes()

        # power iteration
        # x = (1-d) + d * x' * w
        x = dict.fromkeys(W, 1.0 / N)
        if self._using_matrix:
            x = x.values()
            w = np.zeros((N, N))
            for (u, v, _w) in W.out_edges(data=True):
                w[u][v] = _w['weight']

            for i in xrange(self._max_iter):
                x_last = x
                x = 1 - self._alpha + self._alpha * np.matmul(x_last, w)

                delta = x - x_last

                err = np.linalg.norm(delta)
                LOGGER.error('iter: %d, err: %.5f' % (i, err))
                if err < N * self._tol:
                    return sorted(
                        [(x[n], sentences[n][0]) for n in xrange(len(x))], key=lambda v: v[0], reverse=True
                    )
        else:
            for i in xrange(self._max_iter):
                x_last = x

                x = dict.fromkeys(x_last.keys(), 0)
                for n in x:
                    sum_in_nbr = sum([w['weight'] * x_last.get(u, 0.0) for (u, _, w) in W.in_edges(n, data=True)])
                    x[n] = 1 - self._alpha + self._alpha * sum_in_nbr

                # check convergence
                err = sum([abs(x[n] - x_last[n]) for n in x])
                LOGGER.error('iter: %d, err: %.5f' % (i, err))
                if err < N * self._tol:
                    return sorted(
                        [(r, sentences[n][0]) for n, r in x.items()], key=lambda v: v[0], reverse=True
                    )

        raise nx.NetworkXError('text-rank: power iteration failed to converge in %d iterations', self._max_iter)
Exemplo n.º 58
0
    def _fill_input_queue(self):
        sentence_start_id = self._vocab.word_to_id(data.SENTENCE_START)
        sentence_end_id = self._vocab.word_to_id(data.SENTENCE_END)
        pad_id = self._vocab.word_to_id(data.PAD_TOKEN)

        input_gen = self._gen_text(data.gen_example(self._data_path))

        while True:
            (article, abstract) = six.next(input_gen)
            article_sentences = [sent.strip() for sent in data.convert_paragraph_to_sentences(article, False)]
            abstract_sentences = [sent.strip() for sent in data.convert_paragraph_to_sentences(abstract, False)]

            enc_inputs = []
            dec_inputs = [sentence_start_id]  # use the <s> as the <GO> symbol for decoder inputs

            # convert first N sentences to word ids, stripping existing <s> and </s>
            for i in xrange(min(self._max_article_sentences, len(article_sentences))):
                enc_inputs += data.convert_words_to_ids(article_sentences[i], self._vocab)

            for i in xrange(min(self._max_abstract_sentences, len(abstract_sentences))):
                dec_inputs += data.convert_words_to_ids(abstract_sentences[i], self._vocab)

            # filter out too-short input
            if len(enc_inputs) < self._min_input_len or len(dec_inputs) < self._min_input_len:
                LOGGER.warn('drop an example - too short. enc: %d, dec: %d', len(enc_inputs), len(dec_inputs))
                continue

            if not self._truncate_input:
                if len(enc_inputs) > self._enc_timesteps or len(dec_inputs) > self._dec_timesteps:
                    LOGGER.warn('drop an example - too long. enc: %d, dec: %d', len(enc_inputs), len(dec_inputs))
                    continue
            else:
                if len(enc_inputs) > self._enc_timesteps:
                    enc_inputs = enc_inputs[:self._enc_timesteps]

                if len(dec_inputs) > self._dec_timesteps:
                    dec_inputs = dec_inputs[:self._dec_timesteps]

            # targets is dec_inputs without <s> at beginning, plus </s> at end
            targets = dec_inputs[1:]
            targets.append(sentence_end_id)

            enc_input_len = len(enc_inputs)
            dec_output_len = len(targets)

            # pad if necessary
            enc_inputs += [pad_id] * (self._enc_timesteps - len(enc_inputs))
            dec_inputs += [sentence_end_id] * (self._dec_timesteps - len(dec_inputs))
            targets += [sentence_end_id] * (self._dec_timesteps - len(targets))

            # 'enc_input dec_input target enc_len dec_len origin_article origin_abstract'
            element = MODEL_INPUT(
                enc_input=enc_inputs,
                dec_input=dec_inputs,
                target=targets,
                enc_len=enc_input_len,
                dec_len=dec_output_len,
                origin_article=' '.join(article_sentences),
                origin_abstract=' '.join(abstract_sentences)
            )

            self._input_queue.put(element)