コード例 #1
0
def encode(message):
    try:
        body = base64.b64decode(message.body)
        data = json.loads(body.decode('utf-8'))
    except Exception as e:
        logger.exception(e)
        message.delete()
        return


#    logger.debug(json.dumps(data, ensure_ascii=False, indent=2, sort_keys=True, separators=(',', ': ')))

    (ipath, opath) = ffmpeg.transform_path(data["recorded"])
    ffmpeg.ffmpeg(ipath, opath)

    message.delete()
コード例 #2
0
    def modeswitch(self, mode, opts):
        if mode == "mp3":
            encoder = mp3(opts['lameopts'])
        elif mode == "ogg" or mode == "vorbis":
            encoder = vorbis(opts['oggencopts'])
        elif mode == "aacplus":
            encoder = aacplus(opts['aacplusopts'])
        elif mode == "opus":
            encoder = opus(opts['opusencopts'])
        elif mode == "flac":
            encoder = flac(opts['flacopts'])
        elif mode == "test":
            pass  # 'test' is special as it isn't a converter, it is handled below
        elif mode == "_copy":
            encoder = filecopy(opts)
        elif mode[0:2] == "f:":
            encoder = ffmpeg(opts, mode[2:])  # Second argument is the codec
        else:
            return None

        if mode == "test":
            encoder = flac(opts['flacopts'])
            encf = encoder.flactest
        else:
            encf = encoder.convert
        return encf
コード例 #3
0
 def TwitterImage2Video(self,
                        image_folder,
                        save_file,
                        ffmpeg_location,
                        fps=1):
     ff = ffmpeg.ffmpeg()
     ff.img2vid(image_folder, save_file, fps, ffmpeg_location)
     try:
         if os.path.getsize(save_file) == 0:
             raise ValueError("Error converting images to video")
     except:
         raise ValueError("Error converting images to video")
コード例 #4
0
    def get(self, myHandle):

        try: #try to get twitter keys from keys file
            t = tw.twitter("keys")
        except: #no keys exist, run the stub functions
            resp = noKeys()
            return resp

        f = ff.ffmpeg() #create an ffmpeg object
        numTweets = 20 #number of tweets to get

        handlesQ = queue.Queue() #queue to hold twitter handles in the order the api received the handle
        tweetsQ = queue.Queue() #queue to hold tweets in the order they were tweeted by the handle
        #imagesQ = queue.Queue() #queue to hold tweet images

        handlesQ.put(myHandle) #add twitter handle to queue

        myPic = t.get_profilePic(myHandle) #get the handles profile picture
        myTweets = t.get_tweets(myHandle, numTweets) #get the handles tweets

        #thread to get the tweets
        t1 = threading.Thread(name="producer", target=getTweets, args=(tweetsQ, myHandle, myPic, myTweets))
        t1.setDaemon(True)
        t1.start()

        #thread to convert tweets to images
        t2 = threading.Thread(name="imageConverter", target=tweetsToPics, args=(tweetsQ, f))
        t2.setDaemon(True)
        t2.start()

        #thread to convert the images to video
        t3 = threading.Thread(name="videoCreator", target=videoProcessor, args=(handlesQ, f))
        t3.setDaemon(True)
        t3.start()

        handlesQ.join() #block until the video is created

        myFile = os.getcwd() + '/' + myHandle + '_' + r'twitter_feed.mp4'
        return send_file(myFile)
コード例 #5
0
    def load(self):
        if isinstance(self.data, numpy.ndarray):
            return
        temp_file_handle = None
        if self.filename.lower().endswith(".wav") and (
                self.sampleRate, self.numChannels) == (44100, 2):
            file_to_read = self.filename
        elif self.convertedfile:
            file_to_read = self.convertedfile
        else:
            temp_file_handle, self.convertedfile = tempfile.mkstemp(".wav")
            self.sampleRate, self.numChannels = ffmpeg(
                self.filename,
                self.convertedfile,
                overwrite=True,
                numChannels=self.numChannels,
                sampleRate=self.sampleRate,
                verbose=self.verbose)
            file_to_read = self.convertedfile

        w = wave.open(file_to_read, 'r')
        numFrames = w.getnframes()
        raw = w.readframes(numFrames)
        sampleSize = numFrames * self.numChannels
        data = numpy.frombuffer(raw, dtype="<h", count=sampleSize)
        ndarray = numpy.array(data, dtype=numpy.int16)
        if self.numChannels > 1:
            ndarray.resize((numFrames, self.numChannels))
        self.data = numpy.zeros(ndarray.shape, dtype=numpy.int16)
        self.endindex = 0
        if ndarray is not None:
            self.endindex = len(ndarray)
            self.data = ndarray
        if temp_file_handle is not None:
            os.close(temp_file_handle)
        w.close()
コード例 #6
0
ファイル: scanner.py プロジェクト: AdamJacobMuller/udp-relay
for program in programs:
    channel = program.find("GuideNumber").text
    name = program.find("GuideName").text
    if len(want_channels) > 0:
        if channel not in want_channels:
            #print("skipping %s - %s" % (channel,name))
            continue
    print("scanning %s - %s" % (channel, name))
    current_target = libhdhr.device_get_tuner_target(device)
    if current_target != target:
        libhdhr.device_set_tuner_target(device, target)
    vchannel_result = libhdhr.device_set_tuner_vchannel(device, str(channel))
    vstatus = libhdhr.device_get_tuner_vstatus(device)
    ffprobe_prober = ffprobe.ffprobe()
    ffmpeg_screenshotter = ffmpeg.ffmpeg()
    ffmpeg_screenshotter.output = [
        '/www/hdhr.adam.gs/scanner/%s/%s-%s.png' % (
            dir_timestamp,
            vstatus[2].vchannel.replace("/", "_"),
            vstatus[2].name
        )
    ]
    ffmpeg_screenshotter.debug = False
    ffmpeg_screenshotter.start()
    need_data = True
    last = time.time()
    started = time.time()
    meta = {}
    while need_data is True:
        try:
コード例 #7
0
ファイル: scanner.py プロジェクト: n9yty/udp-relay
for program in programs:
    channel = program.find("GuideNumber").text
    name = program.find("GuideName").text
    if len(want_channels) > 0:
        if channel not in want_channels:
            #print("skipping %s - %s" % (channel,name))
            continue
    print("scanning %s - %s" % (channel, name))
    current_target = libhdhr.device_get_tuner_target(device)
    if current_target != target:
        libhdhr.device_set_tuner_target(device, target)
    vchannel_result = libhdhr.device_set_tuner_vchannel(device, str(channel))
    vstatus = libhdhr.device_get_tuner_vstatus(device)
    ffprobe_prober = ffprobe.ffprobe()
    ffmpeg_screenshotter = ffmpeg.ffmpeg()
    ffmpeg_screenshotter.output = [
        '/www/hdhr.adam.gs/scanner/%s/%s-%s.png' %
        (dir_timestamp, vstatus[2].vchannel.replace("/", "_"), vstatus[2].name)
    ]
    ffmpeg_screenshotter.debug = False
    ffmpeg_screenshotter.start()
    need_data = True
    last = time.time()
    started = time.time()
    meta = {}
    while need_data is True:
        try:
            data = listener.recvfrom(2048)
        except:
            continue
コード例 #8
0
    def encode(self, filename=None, mp3=None):
        """
        Outputs an MP3 or WAVE file to `filename`.
        Format is determined by `mp3` parameter.
        """
        if not mp3 and filename.lower().endswith('.wav'):
            mp3 = False
        else:
            mp3 = True
        if mp3:
            foo, tempfilename = tempfile.mkstemp(".wav")
            os.close(foo)
        else:
            tempfilename = filename
        fid = open(tempfilename, 'wb')
        # Based on Scipy svn
        # http://projects.scipy.org/pipermail/scipy-svn/2007-August/001189.html
        fid.write('RIFF')
        fid.write(struct.pack(
            '<i',
            0))  # write a 0 for length now, we'll go back and add it later
        fid.write('WAVE')
        # fmt chunk
        fid.write('fmt ')
        if self.data.ndim == 1:
            noc = 1
        else:
            noc = self.data.shape[1]
        bits = self.data.dtype.itemsize * 8
        sbytes = self.sampleRate * (bits / 8) * noc
        ba = noc * (bits / 8)
        fid.write(
            struct.pack('<ihHiiHH', 16, 1, noc, self.sampleRate, sbytes, ba,
                        bits))
        # data chunk
        fid.write('data')
        fid.write(struct.pack('<i', self.data.nbytes))
        self.data.tofile(fid)
        # Determine file size and place it in correct
        # position at start of the file.
        size = fid.tell()
        fid.seek(4)
        fid.write(struct.pack('<i', size - 8))
        fid.close()
        if not mp3:
            return tempfilename
        # now convert it to mp3
        if not filename.lower().endswith('.mp3'):
            filename = filename + '.mp3'
        try:
            bitRate = MP3_BITRATE
        except NameError:
            bitRate = 128

        try:
            ffmpeg(tempfilename,
                   filename,
                   bitRate=bitRate,
                   verbose=self.verbose)
        except:
            log.warning("Error converting from %s to %s", tempfilename,
                        filename)

        if tempfilename != filename:
            if self.verbose:
                log.warning(sys.stderr, "Deleting: %s", tempfilename)
            os.remove(tempfilename)
        return filename
コード例 #9
0
# -*- coding: utf-8 -*-
import sys, android, os, time
from protocol2 import Camera
from streamserver import StreamServer
from base64 import b64encode
from ffmpeg import ffmpeg

droid = android.Android()
running = True

global server, droid, init, camera, lastStop, ff
record_buffer = None
droid.makeToast("AIRcable AIRcam Loading....")
lastStop = None
ff = ffmpeg()


def exit():
    droid.bluetoothStop()
    sys.exit(0)


def disconnect():
    droid.bluetoothStop()


def connect():
    ret = droid.startActivityForResult(
        'net.aircable.aircam.DeviceListActivity')
    if ret.error:
        droid.makeToast(ret.error)
コード例 #10
0
ファイル: main.py プロジェクト: aircable/AIRi
# -*- coding: utf-8 -*-
import sys, android, os, time
from protocol2 import Camera
from streamserver import StreamServer
from base64 import b64encode
from ffmpeg import ffmpeg

droid = android.Android()
running = True

global server, droid, init, camera, lastStop, ff
record_buffer = None
droid.makeToast("AIRcable AIRcam Loading....")
lastStop = None
ff = ffmpeg()


def exit():
    droid.bluetoothStop()
    sys.exit(0)


def disconnect():
    droid.bluetoothStop()


def connect():
    ret = droid.startActivityForResult("net.aircable.aircam.DeviceListActivity")
    if ret.error:
        droid.makeToast(ret.error)
        sys.exit(0)
コード例 #11
0
# e.g. we ctrl-c, and want to empty the worker before a
# clean terminate
refuse_tasks = False
terminate = False

# The modetable holds all the "modes" (read: formats we can convert to), in the format:
# [ "codec_name", "description" ]. The codec name is what the end_user will issue to
# flac2all as a mode command, so no spaces, or other special characters, and we will
# keep it lowercase
modetable = [["mp3", "Lame mp3 encoder"], ["vorbis", "Ogg vorbis encoder"],
             ["aacplus", "aac-enc encoder"], ["opus", "Opus Encoder"],
             ["flac", "FLAC encoder"], ["test", "FLAC testing procedure"],
             ["_copy", "Copy non flac files across"]]
# Add the ffmpeg codecs to the modetable, we prefix "f:", so end user knows to use the ffmpeg
# options
modetable.extend([["f:" + x[0], x[1]] for x in ffmpeg(None, None).codeclist()])


# functions
def signal_handler(signal, frame):
    global terminate, log
    log.info("Caught signal: %s" % signal)
    terminate = True


def print_summary(count, total, percentage_execution_rate, successes, failures,
                  percentage_fail, modes):
    out = "\n\n"
    out += ("=" * 80)
    out += "| Summary "
    out += ("-" * 80)