Example #1
0
  def do_whois(self, args):
    '''查看用户信息,参数为用户昵称'''
    if len(args) != 1:
      self.msg.reply('错误:你想知道关于谁的信息?')
      return

    u = get_user_by_nick(args[0])
    if u is None:
      self.msg.reply(u'Sorry,查无此人。')
      return

    now = datetime.datetime.now()
    status = u.avail
    addtime = (u.add_date + timezone).strftime('%Y年%m月%d日 %H时%M分').decode('utf-8')
    allowpm = u'否' if u.reject_pm else u'是'
    if u.snooze_before is not None and u.snooze_before > now:
      status += u' (snoozing)'
    if u.black_before is not None and u.black_before > now:
      status += u' (已禁言)'
    r = []
    r.append(u'昵称:\t%s' % u.nick)
    if self.sender.is_admin:
      r.append(u'JID:\t%s' % u.jid)
    r.append(u'状态:\t%s' % status)
    r.append(u'消息数:\t%d' % u.msg_count)
    r.append(u'消息总量:\t%s' % utils.filesize(u.msg_chars))
    r.append(u'加入时间:\t%s' % addtime)
    r.append(u'接收私信:\t%s' % allowpm)
    r.append(u'自我介绍:\t%s' % u.intro)
    self.msg.reply(u'\n'.join(r).encode('utf-8'))
Example #2
0
 def do_chatty(self, args):
   '''消息数排行'''
   r = []
   for u in User.gql('ORDER BY msg_count ASC'):
     m = u.nick
     m = u'* %s:\t%5d条,共 %s' % (
       u.nick, u.msg_count,
       utils.filesize(u.msg_chars))
     r.append(m)
   n = len(r)
   r.insert(0, u'消息数量排行:')
   r.append(u'共 %d 人。' % n)
   self.msg.reply(u'\n'.join(r).encode('utf-8'))
Example #3
0
 def do_iam(self, args):
   '''查看自己的信息'''
   u = self.sender
   addtime = (u.add_date + timezone).strftime('%Y年%m月%d日 %H时%M分').decode('utf-8')
   allowpm = u'否' if u.reject_pm else u'是'
   r = []
   r.append(u'昵称:\t%s' % u.nick)
   r.append(u'JID:\t%s' % u.jid)
   r.append(u'资源:\t%s' % u' '.join(u.resources))
   r.append(u'消息数:\t%d' % u.msg_count)
   r.append(u'消息总量:\t%s' % utils.filesize(u.msg_chars))
   r.append(u'加入时间:\t%s' % addtime)
   r.append(u'命令前缀:\t%s' % u.prefix)
   r.append(u'接收私信:\t%s' % allowpm)
   r.append(u'自我介绍:\t%s' % u.intro)
   self.msg.reply(u'\n'.join(r).encode('utf-8'))
Example #4
0
def convert_worker(target_format, message, url, config, bot):
    """Generic process spawned every time user sends a link or a file"""
    input_filename = "".join([config["temp_path"], utils.random_string()])
    output_filename = "".join(
        [config["temp_path"],
         utils.random_string(), ".", target_format])

    # Tell user that we are working
    status_message = bot.reply_to(message, text.starting, parse_mode="HTML")

    def update_status_message(new_text):
        bot.edit_message_text(chat_id=status_message.chat.id,
                              message_id=status_message.message_id,
                              text=new_text,
                              parse_mode="HTML")

    # Try to download URL
    try:
        r = requests.get(url, stream=True)
    except:
        update_status_message(text.error.downloading)
        return

    # Check file size
    if int(r.headers.get("Content-Length", "0")) >= MAXIMUM_FILESIZE_ALLOWED:
        update_status_message(text.error.huge_file)
        return

    # Download the file
    update_status_message(text.downloading)
    chunk_size = 4096
    raw_input_size = 0
    try:
        with open(input_filename, "wb") as f:
            for chunk in r.iter_content(chunk_size=chunk_size):
                f.write(chunk)
                raw_input_size += chunk_size
                # Download files without Content-Length, but apply standard limit to them
                if raw_input_size >= MAXIMUM_FILESIZE_ALLOWED:
                    update_status_message(text.error.huge_file)
                    utils.rm(input_filename)
                    return
    except:
        update_status_message(text.error.downloading)
        bot.reply_to(message, f"HTTP {r.status_code}")
        return

    # Start ffmpeg
    ffmpeg_process = None
    if target_format == "mp4":
        ffmpeg_process = subprocess.Popen([
            "ffmpeg",
            "-v",
            "error",
            "-threads",
            str(config["ffmpeg_threads"]),
            "-i",
            input_filename,
            "-map",
            "V:0?",  # select video stream
            "-map",
            "0:a?",  # ignore audio if doesn't exist
            "-c:v",
            "libx264",  # specify video encoder
            "-max_muxing_queue_size",
            "9999",  # https://trac.ffmpeg.org/ticket/6375
            "-movflags",
            "+faststart",  # optimize for streaming
            "-preset",
            "veryslow",  # https://trac.ffmpeg.org/wiki/Encode/H.264#a2.Chooseapresetandtune
            "-timelimit",
            "900",  # prevent DoS (exit after 15 min)
            "-vf",
            "pad=ceil(iw/2)*2:ceil(ih/2)*2",  # https://stackoverflow.com/questions/20847674/ffmpeg-libx264-height-not-divisible-by-2#20848224
            output_filename,
        ])
    elif target_format == "png":
        ffmpeg_process = subprocess.Popen([
            "ffmpeg",
            "-v",
            "error",
            "-threads",
            str(config["ffmpeg_threads"]),
            "-thread_type",
            "slice",
            "-i",
            input_filename,
            "-timelimit",
            "60",  # prevent DoS (exit after 15 min)
            output_filename,
        ])

    # Update progress while ffmpeg is alive
    old_progress = ""
    while ffmpeg_process.poll() == None:
        try:
            raw_output_size = utils.filesize(output_filename)
        except FileNotFoundError:
            raw_output_size = 0

        if raw_output_size >= MAXIMUM_FILESIZE_ALLOWED:
            update_status_message(text.error.huge_file)
            ffmpeg_process.kill()
            utils.rm(output_filename)

        input_size = utils.bytes2human(raw_input_size)
        output_size = utils.bytes2human(raw_output_size)

        progress = f"{output_size} / {input_size}"
        # Update progress only if it changed
        if progress != old_progress:
            update_status_message(text.converting.format(progress))
            old_progress = progress
        time.sleep(2)

    # Exit in case of error with ffmpeg
    if ffmpeg_process.returncode != 0:
        update_status_message(text.error.converting)
        # Clean up and close pipe explicitly
        utils.rm(output_filename)
        return

    # Check output file size
    output_size = utils.filesize(output_filename)
    if output_size >= MAXIMUM_FILESIZE_ALLOWED:
        update_status_message(text.error.huge_file)
        # Clean up and close pipe explicitly
        utils.rm(output_filename)
        return

    # Default params for sending operation
    data = {
        "chat_id": message.chat.id,
        "reply_to_message_id": message.message_id
    }

    if target_format == "mp4":
        data.update({"supports_streaming": True})
        # 1. Get video duration in seconds
        video_duration = subprocess.run(
            [
                "ffprobe",
                "-v",
                "error",
                "-select_streams",
                "v:0",
                "-show_entries",
                "format=duration",
                "-of",
                "default=noprint_wrappers=1:nokey=1",
                output_filename,
            ],
            stdout=subprocess.PIPE,
        ).stdout.decode("utf-8").strip()

        video_duration = round(float(video_duration))
        data.update({"duration": video_duration})

        # 2. Get video height and width
        video_props = subprocess.run(
            [
                "ffprobe",
                "-v",
                "error",
                "-select_streams",
                "v:0",
                "-show_entries",
                "stream=width,height",
                "-of",
                "csv=s=x:p=0",
                output_filename,
            ],
            stdout=subprocess.PIPE,
        ).stdout.decode("utf-8").strip()

        video_width, video_height = video_props.split("x")
        data.update({"width": video_width, "height": video_height})

        # 3. Take one frame from the middle of the video
        update_status_message(text.generating_thumbnail)
        thumbnail = "".join(
            [config["temp_path"],
             utils.random_string(), ".jpg"])
        generate_thumbnail_process = subprocess.Popen([
            "ffmpeg",
            "-v",
            "error",
            "-i",
            output_filename,
            "-vcodec",
            "mjpeg",
            "-vframes",
            "1",
            "-an",
            "-f",
            "rawvideo",
            "-ss",
            str(int(video_duration / 2)),
            # keep the limit of 90px height/width (Telegram API) while preserving the aspect ratio
            "-vf",
            "scale='if(gt(iw,ih),90,trunc(oh*a/2)*2)':'if(gt(iw,ih),trunc(ow/a/2)*2,90)'",
            thumbnail,
        ])

        # While process is alive (i.e. is working)
        while generate_thumbnail_process.poll() == None:
            time.sleep(1)

        # Exit in case of error with ffmpeg
        if generate_thumbnail_process.returncode != 0:
            update_status_message(text.error.generating_thumbnail)
            return

        update_status_message(text.uploading)
        requests.post(
            "https://api.telegram.org/bot{}/sendVideo".format(
                config["telegram_token"]),
            data=data,
            files=[
                ("video", (utils.random_string() + ".mp4",
                           open(output_filename, "rb"), "video/mp4")),
                ("thumb", (utils.random_string() + ".jpg",
                           open(thumbnail, "rb"), "image/jpeg")),
            ],
        )
        utils.rm(input_filename)
        utils.rm(output_filename)
        utils.rm(thumbnail)

    elif target_format == "png":
        # Upload to Telegram
        update_status_message(text.uploading)
        requests.post(
            "https://api.telegram.org/bot{}/sendPhoto".format(
                config["telegram_token"]),
            data=data,
            files=[("photo", (utils.random_string() + ".png",
                              open(output_filename, "rb"), "image/png"))],
        )
        requests.post(
            "https://api.telegram.org/bot{}/sendDocument".format(
                config["telegram_token"]),
            data=data,
            files=[("document", (utils.random_string() + ".png",
                                 open(output_filename, "rb"), "image/png"))],
        )
        utils.rm(input_filename)
        utils.rm(output_filename)

    bot.delete_message(message.chat.id, status_message.message_id)
Example #5
0
def live(audio_recognizer, audio_producer, audio2video, scaler, host):
    import Oger

    me = mp.current_process()
    print me.name, 'PID', me.pid

    context = zmq.Context()
    mic = context.socket(zmq.SUB)
    mic.connect('tcp://{}:{}'.format(host, MIC))
    mic.setsockopt(zmq.SUBSCRIBE, b'')

    speaker = context.socket(zmq.PUSH)
    speaker.connect('tcp://{}:{}'.format(host, SPEAKER)) 

    camera = context.socket(zmq.SUB)
    camera.connect('tcp://{}:{}'.format(host, CAMERA))
    camera.setsockopt(zmq.SUBSCRIBE, b'')

    projector = context.socket(zmq.PUSH)
    projector.connect('tcp://{}:{}'.format(host, PROJECTOR)) 

    stateQ = context.socket(zmq.SUB)
    stateQ.connect('tcp://{}:{}'.format(host, STATE))
    stateQ.setsockopt(zmq.SUBSCRIBE, b'') 

    eventQ = context.socket(zmq.SUB)
    eventQ.connect('tcp://{}:{}'.format(host, EVENT))
    eventQ.setsockopt(zmq.SUBSCRIBE, b'') 

    snapshot = context.socket(zmq.REQ)
    snapshot.connect('tcp://{}:{}'.format(host, SNAPSHOT))
    snapshot.send(b'Send me the state, please')
    state = snapshot.recv_json()

    sender = context.socket(zmq.PUSH)
    sender.connect('tcp://{}:{}'.format(host, EXTERNAL))
    sender.send_json('register {}'.format(me.name))

    poller = zmq.Poller()
    poller.register(mic, zmq.POLLIN)
    poller.register(camera, zmq.POLLIN)
    poller.register(stateQ, zmq.POLLIN)
    poller.register(eventQ, zmq.POLLIN)

    previous_prediction = []
    # Approximately 10 seconds of audio/video
    error = deque(maxlen=3400)
    audio = deque(maxlen=3400)
    video = deque(maxlen=80)
    while True:
        events = dict(poller.poll())

        if stateQ in events:
            state = stateQ.recv_json()

        if mic in events:
            new_audio = np.atleast_2d(recv_array(mic))
            if state['record']:
                scaled_signals = scaler.transform(new_audio)
                audio.append(np.ndarray.flatten(scaled_signals))
                if len(previous_prediction):
                    error.append(scaled_signals[:,idxs].flatten() - previous_prediction.flatten())
                previous_prediction = audio_recognizer(scaled_signals[:,idxs]) # This would not be necessary in a centralized recognizer

        if camera in events:
            new_video = recv_array(camera)
            if state['record']:
                video.append(new_video)

        if eventQ in events:
            pushbutton = eventQ.recv_json()
            if 'reset' in pushbutton:
                error.clear()
                audio.clear()
                video.clear()
                previous_prediction = []

            if 'rmse' in pushbutton:
                rmse = np.sqrt((np.array(list(error)).flatten() ** 2).mean())
                sender.send_json('{} RMSE {}'.format(me.name, rmse))
                
            if 'respond' in pushbutton and pushbutton['respond'] == me.name:
                audio_data = np.array(list(audio))
                video_data = np.array(list(video))

                print '{} chosen to respond. Audio data: {} Video data: {}'.format(me.name, audio_data.shape, video_data.shape)

                if audio_data.size == 0 and video_data.size == 0:
                    print '*** Audio data and video data arrays are empty. Aborting the response. ***'
                    continue

                row_diff = audio_data.shape[0] - audio_producer.length
                if row_diff < 0:
                    audio_data = np.vstack([ audio_data, np.zeros((-row_diff, audio_data.shape[1])) ])
                else:
                    audio_data = audio_data[:audio_producer.length]

                sound = audio_producer(audio_data)
                
                stride = audio_producer.length/audio2video.length
                projection = audio2video(audio_data[audio_data.shape[0] - stride*audio2video.length::stride])

                # DREAM MODE: You can train a network with zero audio input -> video output, and use this
                # to recreate the original training sequence with scary accuracy...

                for row in projection:
                    send_array(projector, row)

                for row in scaler.inverse_transform(sound):
                    send_array(speaker, row)

            if 'save' in pushbutton:
                filename = '{}.{}'.format(pushbutton['save'], me.name)
                pickle.dump((audio_recognizer, audio_producer, audio2video, scaler, host), file(filename, 'w'))
                print '{} saved as file {} ({})'.format(me.name, filename, filesize(filename))
Example #6
0
def live(audio_recognizer, audio_producer, audio2video, scaler, host):
    import Oger

    me = mp.current_process()
    print me.name, 'PID', me.pid

    context = zmq.Context()
    mic = context.socket(zmq.SUB)
    mic.connect('tcp://{}:{}'.format(host, MIC))
    mic.setsockopt(zmq.SUBSCRIBE, b'')

    speaker = context.socket(zmq.PUSH)
    speaker.connect('tcp://{}:{}'.format(host, SPEAKER))

    camera = context.socket(zmq.SUB)
    camera.connect('tcp://{}:{}'.format(host, CAMERA))
    camera.setsockopt(zmq.SUBSCRIBE, b'')

    projector = context.socket(zmq.PUSH)
    projector.connect('tcp://{}:{}'.format(host, PROJECTOR))

    stateQ = context.socket(zmq.SUB)
    stateQ.connect('tcp://{}:{}'.format(host, STATE))
    stateQ.setsockopt(zmq.SUBSCRIBE, b'')

    eventQ = context.socket(zmq.SUB)
    eventQ.connect('tcp://{}:{}'.format(host, EVENT))
    eventQ.setsockopt(zmq.SUBSCRIBE, b'')

    snapshot = context.socket(zmq.REQ)
    snapshot.connect('tcp://{}:{}'.format(host, SNAPSHOT))
    snapshot.send(b'Send me the state, please')
    state = snapshot.recv_json()

    sender = context.socket(zmq.PUSH)
    sender.connect('tcp://{}:{}'.format(host, EXTERNAL))
    sender.send_json('register {}'.format(me.name))

    poller = zmq.Poller()
    poller.register(mic, zmq.POLLIN)
    poller.register(camera, zmq.POLLIN)
    poller.register(stateQ, zmq.POLLIN)
    poller.register(eventQ, zmq.POLLIN)

    previous_prediction = []
    # Approximately 10 seconds of audio/video
    error = deque(maxlen=3400)
    audio = deque(maxlen=3400)
    video = deque(maxlen=80)
    while True:
        events = dict(poller.poll())

        if stateQ in events:
            state = stateQ.recv_json()

        if mic in events:
            new_audio = np.atleast_2d(recv_array(mic))
            if state['record']:
                scaled_signals = scaler.transform(new_audio)
                audio.append(np.ndarray.flatten(scaled_signals))
                if len(previous_prediction):
                    error.append(scaled_signals[:, idxs].flatten() -
                                 previous_prediction.flatten())
                previous_prediction = audio_recognizer(
                    scaled_signals[:, idxs]
                )  # This would not be necessary in a centralized recognizer

        if camera in events:
            new_video = recv_array(camera)
            if state['record']:
                video.append(new_video)

        if eventQ in events:
            pushbutton = eventQ.recv_json()
            if 'reset' in pushbutton:
                error.clear()
                audio.clear()
                video.clear()
                previous_prediction = []

            if 'rmse' in pushbutton:
                rmse = np.sqrt((np.array(list(error)).flatten()**2).mean())
                sender.send_json('{} RMSE {}'.format(me.name, rmse))

            if 'respond' in pushbutton and pushbutton['respond'] == me.name:
                audio_data = np.array(list(audio))
                video_data = np.array(list(video))

                print '{} chosen to respond. Audio data: {} Video data: {}'.format(
                    me.name, audio_data.shape, video_data.shape)

                if audio_data.size == 0 and video_data.size == 0:
                    print '*** Audio data and video data arrays are empty. Aborting the response. ***'
                    continue

                row_diff = audio_data.shape[0] - audio_producer.length
                if row_diff < 0:
                    audio_data = np.vstack([
                        audio_data,
                        np.zeros((-row_diff, audio_data.shape[1]))
                    ])
                else:
                    audio_data = audio_data[:audio_producer.length]

                sound = audio_producer(audio_data)

                stride = audio_producer.length / audio2video.length
                projection = audio2video(
                    audio_data[audio_data.shape[0] -
                               stride * audio2video.length::stride])

                # DREAM MODE: You can train a network with zero audio input -> video output, and use this
                # to recreate the original training sequence with scary accuracy...

                for row in projection:
                    send_array(projector, row)

                for row in scaler.inverse_transform(sound):
                    send_array(speaker, row)

            if 'save' in pushbutton:
                filename = '{}.{}'.format(pushbutton['save'], me.name)
                pickle.dump((audio_recognizer, audio_producer, audio2video,
                             scaler, host), file(filename, 'w'))
                print '{} saved as file {} ({})'.format(
                    me.name, filename, filesize(filename))