Exemplo n.º 1
0
    def __init__(self, stream, server):
        super(EncodedStreamer, self).__init__()

        self.lock = BoundedSemaphore(value=1)

        self.mainloop_thread = MainLoop()
        self.mainloop_thread.start()

        self.stream = stream
        self.servers = {}
        self.tailbins = []
        self.playing = False

        self.streamname = stream["name"]
        self.description = stream["description"]
        self.genre = ", ".join(stream["genres"])
        # self.default_quality = default_quality

        logging.debug("Streamer::__init__(): Constructing pipeline")
        pipe = gst.Pipeline()

        # threads
        queue = gst.element_factory_make("queue")

        # tee

        # self.oggdemux = gst.element_factory_make('oggdemux')
        self.vorbisparse = gst.element_factory_make('vorbisparse')
        # self.oggdemux.connect("pad-added", partial(self.__on_dynamic_pad, link = self.vorbisparse))

        tee = gst.element_factory_make('tee')
        # self.typefind = gst.element_factory_make('typefind')
        pipe.add(self.vorbisparse, tee)
        gst.element_link_many(self.vorbisparse, tee)

        # fakesink = gst.element_factory_make("fakesink")
        # fakesink.set_property("sync", 1)
        # pipe.add(fakesink)
        # gst.element_link_many(tee, fakesink)

        self.tee = tee
        self.pipe = pipe

        # only one quality, because this is encoded (no reencoding is done)
        # self.tees = {default_quality:tee}

        logging.debug("Streamer::__init__(): Running distribute")

        self.scale(server, sync=1, init=True)

        self.queue = queue

        logging.debug("Streamer::__init__(): Running EncodedChannel")

        self.bus = pipe.get_bus()
        self.bus.add_signal_watch()
        self.bus.connect('message::state-changed',
                         self.on_message_state_changed)
        self.bus.connect('message::eos', self.on_eos)
Exemplo n.º 2
0
    def __init__(self, medias, observers):
        self.artists = OrderedDict({})
        self.history = None
        self.lock = BoundedSemaphore(value=1)

        for media in medias:
            self.__add(media)

        # self.show()
        self.shuffle()
Exemplo n.º 3
0
    def __init__(self, stream_id, history, playlist, observers):
        super(Buffer, self).__init__()

        self.stream_id = stream_id
        self.observers = observers
        self.history = history
        self.playlist = playlist
        self.queue = OrderedDict({})
        self.lock = BoundedSemaphore(value=1)

        while not self.is_full():
            self.lock.acquire()
            t = self.playlist.get_track()
            self.__add_track(t)

            self.lock.release()
Exemplo n.º 4
0
 def __init__(self, maxsize = 0):
     if maxsize <= 0:
         maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
     self._maxsize = maxsize
     self._reader, self._writer = Pipe(duplex=False)
     self._rlock = Lock()
     self._opid = os.getpid()
     if sys.platform == 'win32':
         self._wlock = None
     else:
         self._wlock = Lock()
     self._sem = BoundedSemaphore(maxsize)
     self._after_fork()
     if sys.platform != 'win32':
         register_after_fork(self, Queue._after_fork)
     return
Exemplo n.º 5
0
 def __init__(self, maxsize = 0):
     if maxsize <= 0:
         maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
     self._maxsize = maxsize
     self._reader, self._writer = Pipe(duplex=False)
     self._rlock = Lock()
     self._opid = os.getpid()
     if sys.platform == 'win32':
         self._wlock = None
     else:
         self._wlock = Lock()
     self._sem = BoundedSemaphore(maxsize)
     self._after_fork()
     if sys.platform != 'win32':
         register_after_fork(self, Queue._after_fork)
Exemplo n.º 6
0
    def __init__(self, maxsize=0):
        if maxsize <= 0:
            maxsize = 2147483647L
        self._maxsize = maxsize
        self._reader, self._writer = Pipe(duplex=False)
        self._rlock = Lock()
        self._opid = os.getpid()
        if sys.platform == 'win32':
            self._wlock = None
        else:
            self._wlock = Lock()
        self._sem = BoundedSemaphore(maxsize)

        self._after_fork()

        if sys.platform != 'win32':
            register_after_fork(self, Queue._after_fork)
Exemplo n.º 7
0
class Queue(object):

    def __init__(self, maxsize = 0):
        if maxsize <= 0:
            maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
        self._maxsize = maxsize
        self._reader, self._writer = Pipe(duplex=False)
        self._rlock = Lock()
        self._opid = os.getpid()
        if sys.platform == 'win32':
            self._wlock = None
        else:
            self._wlock = Lock()
        self._sem = BoundedSemaphore(maxsize)
        self._after_fork()
        if sys.platform != 'win32':
            register_after_fork(self, Queue._after_fork)
        return

    def __getstate__(self):
        assert_spawning(self)
        return (self._maxsize,
         self._reader,
         self._writer,
         self._rlock,
         self._wlock,
         self._sem,
         self._opid)

    def __setstate__(self, state):
        self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid = state
        self._after_fork()

    def _after_fork(self):
        debug('Queue._after_fork()')
        self._notempty = threading.Condition(threading.Lock())
        self._buffer = collections.deque()
        self._thread = None
        self._jointhread = None
        self._joincancelled = False
        self._closed = False
        self._close = None
        self._send = self._writer.send
        self._recv = self._reader.recv
        self._poll = self._reader.poll
        return

    def put(self, obj, block = True, timeout = None):
        if not not self._closed:
            raise AssertionError
            raise self._sem.acquire(block, timeout) or Full
        self._notempty.acquire()
        try:
            if self._thread is None:
                self._start_thread()
            self._buffer.append(obj)
            self._notempty.notify()
        finally:
            self._notempty.release()

        return

    def get(self, block = True, timeout = None):
        if block and timeout is None:
            self._rlock.acquire()
            try:
                res = self._recv()
                self._sem.release()
                return res
            finally:
                self._rlock.release()

        else:
            if block:
                deadline = time.time() + timeout
            if not self._rlock.acquire(block, timeout):
                raise Empty
            try:
                if block:
                    timeout = deadline - time.time()
                    if timeout < 0 or not self._poll(timeout):
                        raise Empty
                elif not self._poll():
                    raise Empty
                res = self._recv()
                self._sem.release()
                return res
            finally:
                self._rlock.release()

        return

    def qsize(self):
        return self._maxsize - self._sem._semlock._get_value()

    def empty(self):
        return not self._poll()

    def full(self):
        return self._sem._semlock._is_zero()

    def get_nowait(self):
        return self.get(False)

    def put_nowait(self, obj):
        return self.put(obj, False)

    def close(self):
        self._closed = True
        self._reader.close()
        if self._close:
            self._close()

    def join_thread(self):
        debug('Queue.join_thread()')
        if not self._closed:
            raise AssertionError
            self._jointhread and self._jointhread()

    def cancel_join_thread(self):
        debug('Queue.cancel_join_thread()')
        self._joincancelled = True
        try:
            self._jointhread.cancel()
        except AttributeError:
            pass

    def _start_thread(self):
        debug('Queue._start_thread()')
        self._buffer.clear()
        self._thread = threading.Thread(target=Queue._feed, args=(self._buffer,
         self._notempty,
         self._send,
         self._wlock,
         self._writer.close), name='QueueFeederThread')
        self._thread.daemon = True
        debug('doing self._thread.start()')
        self._thread.start()
        debug('... done self._thread.start()')
        if not self._joincancelled:
            self._jointhread = Finalize(self._thread, Queue._finalize_join, [weakref.ref(self._thread)], exitpriority=-5)
        self._close = Finalize(self, Queue._finalize_close, [self._buffer, self._notempty], exitpriority=10)

    @staticmethod
    def _finalize_join(twr):
        debug('joining queue thread')
        thread = twr()
        if thread is not None:
            thread.join()
            debug('... queue thread joined')
        else:
            debug('... queue thread already dead')
        return

    @staticmethod
    def _finalize_close(buffer, notempty):
        debug('telling queue thread to quit')
        notempty.acquire()
        try:
            buffer.append(_sentinel)
            notempty.notify()
        finally:
            notempty.release()

    @staticmethod
    def _feed(buffer, notempty, send, writelock, close):
        debug('starting thread to feed data to pipe')
        from .util import is_exiting
        nacquire = notempty.acquire
        nrelease = notempty.release
        nwait = notempty.wait
        bpopleft = buffer.popleft
        sentinel = _sentinel
        if sys.platform != 'win32':
            wacquire = writelock.acquire
            wrelease = writelock.release
        else:
            wacquire = None
        try:
            while 1:
                nacquire()
                try:
                    if not buffer:
                        nwait()
                finally:
                    nrelease()

                try:
                    while 1:
                        obj = bpopleft()
                        if obj is sentinel:
                            debug('feeder thread got sentinel -- exiting')
                            close()
                            return
                        if wacquire is None:
                            send(obj)
                        else:
                            wacquire()
                            try:
                                send(obj)
                            finally:
                                wrelease()

                except IndexError:
                    pass

        except Exception as e:
            try:
                if is_exiting():
                    info('error in queue thread: %s', e)
                else:
                    import traceback
                    traceback.print_exc()
            except Exception:
                pass

        return
Exemplo n.º 8
0
class EncodedStreamer(Base, Thread, ScaledStreamer):

    caps = gst.caps_from_string(
        "audio/x-raw-int, channels=2, endianness=1234, rate=44100, width=16, depth=16, signed=true"
    )

    def __init__(self, stream, server):
        super(EncodedStreamer, self).__init__()

        self.lock = BoundedSemaphore(value=1)

        self.mainloop_thread = MainLoop()
        self.mainloop_thread.start()

        self.stream = stream
        self.servers = {}
        self.tailbins = []
        self.playing = False

        self.streamname = stream["name"]
        self.description = stream["description"]
        self.genre = ", ".join(stream["genres"])
        # self.default_quality = default_quality

        logging.debug("Streamer::__init__(): Constructing pipeline")
        pipe = gst.Pipeline()

        # threads
        queue = gst.element_factory_make("queue")

        # tee

        # self.oggdemux = gst.element_factory_make('oggdemux')
        self.vorbisparse = gst.element_factory_make('vorbisparse')
        # self.oggdemux.connect("pad-added", partial(self.__on_dynamic_pad, link = self.vorbisparse))

        tee = gst.element_factory_make('tee')
        # self.typefind = gst.element_factory_make('typefind')
        pipe.add(self.vorbisparse, tee)
        gst.element_link_many(self.vorbisparse, tee)

        # fakesink = gst.element_factory_make("fakesink")
        # fakesink.set_property("sync", 1)
        # pipe.add(fakesink)
        # gst.element_link_many(tee, fakesink)

        self.tee = tee
        self.pipe = pipe

        # only one quality, because this is encoded (no reencoding is done)
        # self.tees = {default_quality:tee}

        logging.debug("Streamer::__init__(): Running distribute")

        self.scale(server, sync=1, init=True)

        self.queue = queue

        logging.debug("Streamer::__init__(): Running EncodedChannel")

        self.bus = pipe.get_bus()
        self.bus.add_signal_watch()
        self.bus.connect('message::state-changed',
                         self.on_message_state_changed)
        self.bus.connect('message::eos', self.on_eos)

    def __on_dynamic_pad(self, demuxer, pad, link=None):
        self.lock.acquire()
        if pad.is_linked():
            pad.unlink(link.get_pad("sink"))
        pad.link(link.get_pad("sink"))
        self.lock.release()

    def on_message_state_changed(self, bus, message):
        if message.src != self.tee:
            return

        old_state, new_state, pending = message.parse_state_changed()

        if new_state == gst.STATE_PLAYING and not self.playing:
            self.playing = True
            self.send({"signal": 'streamer_initialized', "streamer": self})

    def on_eos(self, bus, message):

        logging.debug("Streamer::run(): End of stream")
        self.send({"signal": "eos"})

    def run(self):

        logging.debug("Streamer::run(): Starting pipeline")

        # The MainLoop
        self.mainloop = gobject.MainLoop()

        # And off we go!
        self.pipe.set_state(gst.STATE_PLAYING)

        self.mainloop.run()
Exemplo n.º 9
0
def BoundedSemaphore(value=1):
    """
    Returns a bounded semaphore object
    """
    from multiprocessing.synchronize import BoundedSemaphore
    return BoundedSemaphore(value)
Exemplo n.º 10
0
def copy_s3_bucket(SOURCE_BUCKET, DEST_BUCKET, prefix=None, threads=10):
    """
	Example usage: copy_s3_bucket(SOURCE_BUCKET='my-source-bucket', DEST_BUCKET='my-destination-bucket', prefix='parent/child/dir/', threads=20)
	"""
    # Init s3
    conn = S3Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
    bucket = conn.get_bucket(SOURCE_BUCKET)
    dest_bucket = conn.get_bucket(DEST_BUCKET)

    # Filter by prefix
    rs = bucket.list()
    if prefix: rs = bucket.list(prefix)

    class CopyKey(Thread):
        def __init__(self, key_name):
            Thread.__init__(self)
            self.key_name = key_name
            self.status = False

        def run(self):
            # We must create new bucket instances for each thread, passing the key is not threadsafe
            thread_conn = S3Connection(AWS_ACCESS_KEY_ID,
                                       AWS_SECRET_ACCESS_KEY)
            thread_bucket = conn.get_bucket(SOURCE_BUCKET)
            thread_dest_bucket = conn.get_bucket(DEST_BUCKET)
            thread_key = thread_bucket.get_key(self.key_name)

            # Only copy if not exists on dest bucket
            if not thread_dest_bucket.get_key(self.key_name):
                pool_sema.acquire()
                self.status = "%s : Sempahore Acquired, Copy Next" % datetime.datetime.now(
                )
                try:
                    thread_key.copy(DEST_BUCKET, self.key_name, None, False,
                                    True)
                    self.status = "%s : Copy Success : %s" % (
                        datetime.datetime.now(), self.key_name)
                except:
                    self.status = "%s : Copy Error : %s" % (
                        datetime.datetime.now(), sys.exc_info())
                finally:
                    pool_sema.release()
            else:
                self.status = "%s : Key Already Exists, will not overwrite." % datetime.datetime.now(
                )

    key_copy_thread_list = []
    pool_sema = BoundedSemaphore(value=threads)
    total_keys = 0

    # Request threads
    for key in rs:
        total_keys += 1
        print "%s : Requesting copy thread for key %s" % (
            datetime.datetime.now(), key.name)
        current = CopyKey(key.name)
        key_copy_thread_list.append(current)
        current.start()

        # Pause if max threads reached - note that enumerate returns all threads, including this parent thread
        if len(threading.enumerate()) >= threads:
            print "%s : Max Threads (%s) Reached: Pausing until threadcount reduces." % (
                datetime.datetime.now(), threads)
            while 1:
                if len(threading.enumerate()) < threads:
                    print "%s : Continuing thread creation." % datetime.datetime.now(
                    )
                    break
                time.sleep(1)

    for key_copy_thread in key_copy_thread_list:
        key_copy_thread.join(
            30
        )  # Bring this particular thread to this current "parent" thread, blocks parent until joined or 30s timeout
        if key_copy_thread.isAlive():
            print "%s : TIMEOUT on key %s" % (datetime.datetime.now(),
                                              key_copy_thread.key_name)
            continue
        print "%s : Status Output: %s" % (datetime.datetime.now(),
                                          key_copy_thread.status)

    print "%s : Complete : %s Total Keys Requested" % (datetime.datetime.now(),
                                                       total_keys)
Exemplo n.º 11
0
class Streamer(Base, ScaledStreamer):

    caps = gst.caps_from_string(
        "audio/x-raw-int, channels=2, endianness=1234, rate=44100, width=16, depth=16, signed=true"
    )

    def link_quiet_source(self):
        """
        Link silet stream to adder. This works also with vorbisenc and oggmux.
        """
        source = QuietSource(Streamer.caps)
        audioconverter = gst.element_factory_make("audioconvert")
        audiofilter = gst.element_factory_make("capsfilter")
        audiofilter.set_property("caps", Streamer.caps)
        audioresample = gst.element_factory_make("audioresample")
        audiorate = gst.element_factory_make("audiorate")
        queue = gst.element_factory_make("queue")
        self.pipe.add(source, audioconverter, audioresample, audiofilter,
                      audiorate, queue)

        logging.debug("Streamer::link_quiet_source(): Linking quiet source")
        gst.element_link_many(source, audioconverter, audioresample,
                              audiofilter, audiorate, queue, self.adder)

    def __init__(self, stream):
        super(Streamer, self).__init__()

        logging.debug("Streamer::__init__(): Constructing pipeline")

        pipe = gst.Pipeline("pipeline")

        self.lock = BoundedSemaphore(value=1)

        self.mainloop_thread = MainLoop()
        self.mainloop_thread.start()

        self.stream = stream
        self.servers = {}
        self.tees = {}
        self.encodebins = {}
        self.quality_tee_srcs = {}
        self.tailbins = []
        self.playing = False
        self.streamname = stream["name"]
        self.description = stream["description"]
        self.genre = ", ".join(stream["genres"])

        # mix audio
        adder = gst.element_factory_make("adder")
        self.quality_tee = gst.element_factory_make("tee")
        # queue = gst.element_factory_make("queue")

        pipe.add(adder, self.quality_tee)
        gst.element_link_many(adder, self.quality_tee)

        self.pipe = pipe
        self.bus = pipe.get_bus()
        self.bus.add_signal_watch()
        self.bus.connect('message::state-changed',
                         self.on_message_state_changed)
        self.bus.connect('message::eos', self.on_eos)
        self.bus.connect('message::error', self.on_error)
        self.bus.connect("message::tag", self.on_tag)
        # self.bus.connect('message', self.on_message)
        self.adder = adder

        quietbin = QuietBin(self)
        sink = self.adder.get_request_pad("sink%d")
        quietbin.get_pad("src").link(sink)

        for quality in stream["quality"]:
            # this has to be to ensure double from database work as expected and key is found
            quality = float(quality)

            # if quality == min(stream["quality"]):
            eb = EncodeBin(self, quality)

            quality_tee_src = self.quality_tee.get_request_pad("src%d")
            quality_tee_src.link(eb.get_pad("sink"))

            self.quality_tee_srcs[quality] = quality_tee_src

            tee = gst.element_factory_make("tee")
            pipe.add(tee)

            sink = tee.get_pad("sink")
            src = eb.get_pad("src")
            src.link(sink)

            self.tees[quality] = tee
            self.encodebins[quality] = eb

            if quality == min(stream["quality"]):
                self.tee = tee

            self.scale(None, quality, sync=1, init=True)

    def __on_dynamic_pad(self, demuxer, pad, link=None):
        pad.link(link.get_pad("sink"))

    # def on_message(self, bus, message):
    #
    #     print message

    def on_tag(self, bus, message):
        pass
        # if self.playing:
        #
        #     self.lock.acquire()
        #     # self.encodebins[float(1)].on_tag()
        #     # for tailbin in self.tailbins:
        #     #     tailbin.on_tag()
        #     self.lock.release()

    def set_metadata(self, artist=" ", title=" ", album=" "):
        pass
        return
        # print "setting metadata "+artist+" "+title+" "+album
        #
        # self.tags = gst.TagList()
        #
        # self.tags[gst.TAG_ARTIST] = artist
        # self.tags[gst.TAG_TITLE] = title
        # self.tags[gst.TAG_ALBUM] = album
        #
        # event = gst.event_new_tag(self.tags)
        # self.pipe.send_event(event)

    def on_error(self, bus, message):
        if message.src.get_name().startswith("shout2send"):
            # print "ON ERROR"
            self.lock.acquire()
            for server in self.servers:
                if self.servers[server].shout2send == message.src:
                    tailbin = self.servers[server]
                    server_id = unicode(tailbin.server["_id"])
                    res = tailbin.unlink_and_dispose()
                    if res:
                        del self.servers[server_id]
                    break

            self.lock.release()

    def on_message_state_changed(self, bus, message):
        old_state, new_state, pending = message.parse_state_changed()
        # print str(message.src.get_name())+": "+str(old_state)+" ---> "+str(new_state)

        if message.src != self.tees.itervalues().next():
            return

        old_state, new_state, pending = message.parse_state_changed()

        if new_state == gst.STATE_PLAYING and not self.playing:
            self.playing = True
            self.send({"signal": 'streamer_initialized', "streamer": self})

    def on_eos(self, bus, message):

        logging.debug("Streamer::run(): End of stream")
        self.send({"signal": "eos"})
Exemplo n.º 12
0
class BaseScheduler(Base):
    @asynchronous
    def init(self, player, respond=None):
        super(BaseScheduler, self).__init__()

        self.playing = True
        self.player = player
        self.last_artist = None
        self.last_stop_unused = time.time()

        self.observers = UpdateObservers()

        # db = Db()
        # self.db = db

        self.tracks_selector = TracksSelector(self.player.stream["_id"])
        self.history = History(observers=self.observers)
        self.program_id = unicode(self.player.stream["default_program_id"])

        self.groups = yield task(self.call,
                                 self.get_default_group_ids,
                                 program_id=self.program_id)

        self.fade = yield task(self.call,
                               self.get_fade_length,
                               program_id=self.program_id)

        self.playlist_lock = BoundedSemaphore(value=1)
        self.playlist_lock.acquire()
        selection = yield task(self.call,
                               self.get_selection,
                               program_id=self.program_id)

        if selection == "shuffle":
            tracks = yield task(self.call,
                                self.tracks_selector.select_by_groups,
                                groups=self.groups,
                                sorted=False)
            print tracks
            self.playlist = RandomPlaylist(tracks, observers=self.observers)
            self.playlist.apply_history(self.history)
        else:
            tracks = yield task(self.call,
                                self.tracks_selector.select_by_groups,
                                groups=self.groups,
                                sorted=True)
            self.playlist = Playlist(tracks, observers=self.observers)

        self.buffer = Buffer(self.player.stream["_id"],
                             self.history,
                             self.playlist,
                             observers=self.observers)

        self.playlist_lock.release()

        track = self.buffer.get_track()
        file_id = track["file_id"]

        filesrc = GridFSSource(file_id)
        if self.__class__.__name__ == "EncodedScheduler":
            self.stream_bin = EncodedStreamBin(self.player, filesrc, self)
        else:
            self.stream_bin = StreamBin(self.player, filesrc, self)

        self.stream_bin.link_and_unblock()

        respond("bitch, yo!")

    @asynchronous
    def playlist_update(self, group, respond):
        if group in self.groups:

            self.playlist_lock.acquire()
            selection = yield task(self.call,
                                   self.get_selection,
                                   program_id=self.program_id)

            if selection == "shuffle":
                tracks = yield task(self.call,
                                    self.tracks_selector.select_by_groups,
                                    groups=self.groups,
                                    sorted=False)
                self.playlist = RandomPlaylist(tracks,
                                               observers=self.observers)
                self.playlist.apply_history(self.history)
            else:
                try:
                    current_track = self.playlist.current_track
                except:
                    current_track = 0
                tracks = yield task(self.call,
                                    self.tracks_selector.select_by_groups,
                                    groups=self.groups,
                                    sorted=True)
                self.playlist = Playlist(tracks, observers=self.observers)
                self.playlist.set_current_track_if_possible(current_track)
            if hasattr(self.buffer, 'current_track_serialized'):
                current_track_serialized = self.buffer.current_track_serialized
            else:
                current_track_serialized = None
            self.buffer = Buffer(self.player.stream["_id"],
                                 self.history,
                                 self.playlist,
                                 observers=self.observers)
            if current_track_serialized is not None:
                self.buffer.current_track_serialized = current_track_serialized
            self.playlist_lock.release()

        respond({"msg": "OK"})

    @asynchronous
    def register_updates_observer(self, handler, respond):

        self.playlist_lock.acquire()
        self.observers.register(handler)
        yield task(self.call, self.buffer.notify_current_track, unblock=True)
        yield task(self.call, self.buffer.notify_buffer_update, unblock=True)
        yield task(self.call,
                   self.history.notify_previous_update,
                   unblock=True)
        self.playlist_lock.release()

        respond({
            "msg": "Registering observer",
            "server_time": int(time.time())
        })

    @asynchronous
    def unregister_updates_observer(self, handler, respond):
        self.observers.unregister(handler)
        handler = None
        respond({"msg": "Unregistering observer"})

    @asynchronous
    def update_buffer(self, buffer, respond):

        self.playlist_lock.acquire()
        tracks = yield task(self.call,
                            self.tracks_selector.select_by_ids,
                            ids=buffer["buffer"])
        self.buffer.update(tracks)

        yield task(self.call, self.buffer.notify_buffer_update, unblock=True)
        yield task(self.call,
                   self.history.notify_previous_update,
                   unblock=True)

        self.playlist_lock.release()

        respond({"msg": "OK"})

    @asynchronous
    def change_selection(self, respond):

        selection = yield task(self.call,
                               self.get_selection,
                               program_id=self.program_id)

        if selection == "shuffle":
            selection = "sequence"
        else:
            selection = "shuffle"

        yield task(self.query, self.db.programs.update,
                   {"_id": ObjectId(self.program_id)},
                   {"$set": {
                       "selection": selection
                   }})

        yield task(self.call, self.playlist_update, group=self.groups[0])
        yield task(self.call, self.buffer.notify_buffer_update, unblock=True)
        respond({"msg": "OK"})

    def next(self):
        self.__stream_eos(self.stream_bin)
        return {"msg": "OK"}

    def trigger_rescale(self):
        t = time.time() - self.last_stop_unused
        trig_mins = 5
        # 1 minute
        if t > (60 * trig_mins):
            self.last_stop_unused = time.time()
            return True
        else:
            return False

    def is_stoped(self):
        return not self.playing

    @asynchronous
    def get_selection(self, program_id, respond):
        program = yield task(self.query, self.db.programs.find_one,
                             {"_id": ObjectId(program_id)}, {
                                 "_id": 0,
                                 "selection": 1
                             })

        if program is not None and "selection" in program:
            respond(program["selection"])
        else:
            respond("sequence")

    @asynchronous
    def get_fade_length(self, program_id, respond):
        group = yield task(self.query, self.db.programs.find_one,
                           {"_id": ObjectId(program_id)}, {
                               "_id": 0,
                               "fade_in": 1,
                               "fade_out": 1
                           })

        if group is not None:
            respond(group)
        else:
            respond("sequence")

    @asynchronous
    def get_default_group_ids(self, program_id, respond):

        program = yield task(self.query, self.db.programs.find_one,
                             {"_id": ObjectId(program_id)}, {
                                 "_id": 0,
                                 "groups": 1
                             })

        respond(program["groups"])

    def print_playlist(self):

        self.buffer.show()
        self.playlist.show()

        return {"msg": "OK"}
Exemplo n.º 13
0
class RandomPlaylist(Randomizer):
    def __init__(self, medias, observers):
        self.artists = OrderedDict({})
        self.history = None
        self.lock = BoundedSemaphore(value=1)

        for media in medias:
            self.__add(media)

        # self.show()
        self.shuffle()

    def __add(self, media, played=0):
        self.lock.acquire()
        if not media["artist"] in self.artists:
            artist = Artist(media["artist"])
            artist.parent = self
            self.artists[media["artist"]] = artist
        else:
            artist = self.artists[media["artist"]]

        if not media["album"] in self.artists[media["artist"]].albums:
            album = Album(media["album"])
            album.parent = artist
            self.artists[media["artist"]].albums[media["album"]] = album
        else:
            album = self.artists[media["artist"]].albums[media["album"]]

        if not media["_id"] in self.artists[media["artist"]].albums[
                media["album"]].tracks:
            media_id = unicode(media["_id"])
            track = Track(media_id, media["file_id"], media["title"],
                          media["tags"]["duration"], media)
            track.played = played
            track.parent = album
            self.artists[media["artist"]].albums[
                media["album"]].tracks[media_id] = track

            self.artists[media["artist"]].tracks_count += 1
            self.artists[media["artist"]].played += played
            self.artists[media["artist"]].albums[
                media["album"]].tracks_count += 1
            self.artists[media["artist"]].albums[
                media["album"]].played += played

            for i in range(played):
                track.play()
        self.lock.release()

    def shuffle(self):
        self.randomize_ordered_dict(self.artists)
        for artist in self.artists:
            self.artists[artist].shuffle()

    def apply_history(self, history):
        """
        Try to find track and play it so it would not be played next time
        """
        with history.q.mutex:
            items = list(history.q.queue)

        for media in items:
            if media["artist"] in self.artists:
                artist = self.artists[media["artist"]]
            else:
                continue

            if media["album"] in artist.albums:
                album = artist.albums[media["album"]]
            else:
                continue

            if media["_id"] in album.tracks:
                track = artist.albums[media["album"]].tracks[media["_id"]]
            else:
                continue

            track.play()

        self.history = history

    def get_track(self):

        self.lock.acquire()
        artist = self.get_first(self.artists.items())
        if artist is None:
            return None
        artist = self.artists.items()[0][1]

        album = self.get_first(artist.albums.items())
        if album is None:
            return None
        album = artist.albums.items()[0][1]

        track = self.get_first(album.tracks.items())
        if track is None:
            return None
        track = album.tracks.items()[0][1]

        # print len(self.artists)
        # self.show()
        # print "choosing "+str(track.id)

        # print "artist play index "+str(track.parent.parent.play_index)+" album play index "+str(track.parent.play_index)+" track played"+str(track.played)

        play = track.play()
        self.lock.release()
        return play

    def add_track(self, media):

        artist = self.get_first(self.artists.items())
        if artist is None:
            return None
        artist = self.artists.items()[0][1]

        album = self.get_first(artist.albums.items())
        if album is None:
            return None
        album = artist.albums.items()[0][1]

        track = self.get_first(album.tracks.items())
        if track is None:
            return None
        track = album.tracks.items()[0][1]

        self.__add(media, track.played)

    def get_first(self, iterable, default=None):
        if iterable:
            for item in iterable:
                return item
        return default

    def show(self):
        for artist in self.artists:
            print "-- Artist " + unicode(
                self.artists[artist].played) + "/" + unicode(
                    self.artists[artist].tracks_count) + "=" + unicode(
                        self.artists[artist].play_index
                    ) + self.artists[artist].name
            for album in self.artists[artist].albums:
                print "---- Album " + unicode(
                    self.artists[artist].albums[album].played) + "/" + unicode(
                        self.artists[artist].albums[album].tracks_count
                    ) + "=" + unicode(self.artists[artist].albums[album].
                                      play_index) + " " + album
                for track in self.artists[artist].albums[album].tracks:
                    print "------ Track " + unicode(
                        self.artists[artist].albums[album].tracks[track].played
                    ) + " " + self.artists[artist].albums[album].tracks[
                        track].id

    def update(self):
        self.artists = OrderedDict(
            sorted(self.artists.items(), key=lambda t: t[1].play_index))
Exemplo n.º 14
0
class Buffer(Base):

    queue = OrderedDict({})

    def __init__(self, stream_id, history, playlist, observers):
        super(Buffer, self).__init__()

        self.stream_id = stream_id
        self.observers = observers
        self.history = history
        self.playlist = playlist
        self.queue = OrderedDict({})
        self.lock = BoundedSemaphore(value=1)

        while not self.is_full():
            self.lock.acquire()
            t = self.playlist.get_track()
            self.__add_track(t)

            self.lock.release()

    def update(self, medias):
        self.lock.acquire()
        del self.queue
        self.queue = OrderedDict({})
        for media in medias:
            self.__add_track({
                "artist": media["artist"],
                "album": media["album"],
                "_id": media["_id"],
                "file_id": media["file_id"],
                "title": media["title"],
                "duration": media["tags"]["duration"],
            })
        self.lock.release()

        return True

    def get_track(self):
        self.lock.acquire()

        while not self.is_full():
            self.__add_track(self.playlist.get_track())

        items = self.queue.items()
        track = self.get_first(items)
        if not track:
            return None
        key = track[0]
        track = track[1]
        self.current_track_serialized = track
        self.current_track_serialized["_id"] = unicode(
            self.current_track_serialized["_id"])
        self.current_track_serialized["started"] = time.time()
        self.current_track_serialized["position"] = 0
        # self.observers.notify({
        #     "playing": self.current_track_serialized
        # })
        del self.queue[key]

        self.history.add(track)
        self.lock.release()
        return track

    @asynchronous
    def notify_current_track(self, respond):
        if hasattr(self, 'current_track_serialized'):
            self.current_track_serialized["position"] = time.time(
            ) - self.current_track_serialized["started"]
            yield task(self.call,
                       self.observers.notify,
                       message={"playing": self.current_track_serialized},
                       unblock=True)

            # yield task(self.query, self.db.streams.update, {  "_id": ObjectId(self.stream_id) },
            #            {  "$set": {"current": self.current_track_serialized} })

        respond(None)

    def get_first(self, iterable, default=None):
        if iterable:
            for item in iterable:
                return item
        return default

    def __add_track(self, track):
        id = self.__get_id()
        self.queue[id] = track
        return id

    def is_full(self):
        return len(self.queue) >= 6

    def __get_id(self):
        # now = datetime.datetime.now()
        return uuid.uuid4()

    def __wait_until(self, some_predicate, timeout, period=0.25):
        must_end = time.time() + timeout
        while time.time() < must_end:
            if some_predicate(): return True
            time.sleep(period)
        return False

    @asynchronous
    def notify_buffer_update(self, respond):
        buffer = []
        for q in self.queue:
            track_serialized = self.queue[q]
            track_serialized["_id"] = unicode(track_serialized["_id"])
            buffer.append(self.queue[q])

        if self.playlist.__class__.__name__ == "RandomPlaylist":
            selection = "shuffle"
        else:
            selection = "sequence"

        yield task(self.call,
                   self.observers.notify,
                   message={
                       "buffer_update": buffer,
                       "selection": selection
                   },
                   unblock=True)
        respond(None)

    def show(self):
        for q in self.queue:
            print unicode(self.queue[q])
Exemplo n.º 15
0
class StreamManagement(Base, Singleton):

    __metaclass__ = classmaker()

    @staticmethod
    def aspects():
        def stream_to_unicode(*args, **kwargs):
            if "stream" in kwargs:
                kwargs["stream"] = unicode(kwargs["stream"])
            return kwargs

        aspect1 = {"pointcut": ".*", "advise": {"before": stream_to_unicode}}

        def no_such_stream(*args, **kwargs):
            if kwargs["stream"] not in kwargs["self"].streams:
                kwargs["respond"]({"error": "No such stream"})
                return Call.stop
            else:
                return Call.proceed

        aspect2 = {
            "pointcut": "^(?!((start_streaming))$).*",
            "advise": {
                "before": no_such_stream
            }
        }

        def stream_exists(*args, **kwargs):
            if kwargs["stream"] in kwargs["self"].streams:
                kwargs["respond"]({"error": "Stream exists"})
                return Call.stop
            else:
                return Call.proceed

        aspect3 = {
            "pointcut": "^start_streaming$",
            "advise": {
                "before": stream_exists
            }
        }

        return [aspect1, aspect2, aspect3]

    def __init__(self, port):
        super(StreamManagement, self).__init__()
        self.streams = {}

        # one command at the time
        self.lock = BoundedSemaphore(value=1)

        local_ip = Helpers.ip.local_ip()
        public_ip = Helpers.ip.public_ip()

        self.db.servers.update(
            {
                "type": "pipeline",
                "local_ip": local_ip,
                "port": port
            }, {
                "level": float(0),
                "type": "pipeline",
                "local_ip": local_ip,
                "public_ip": public_ip,
                "port": port,
                "down": False
            },
            upsert=True)
        server = self.db.servers.find_one(
            {
                "type": "pipeline",
                "local_ip": local_ip,
                "port": port
            }, {
                "_id": 1,
            })
        # db = Db()
        # server = db.conn.Server.find_one({"type": "pipeline", "local_ip": local_ip, "port": port})
        # if server == None:
        #     server = db.conn.Server()
        #
        # server["level"] = float(0)
        # server["type"] = "pipeline"
        # server["local_ip"] = local_ip
        # server["public_ip"] = public_ip
        # server["port"] = port
        # server["down"] = False
        # server.save()

        self.pipeline_server_id = unicode(server["_id"])

        PipelineLoadBalancer(self.pipeline_server_id).start()

        # self.db = db

        Helpers.globals.set_id_pipeline(server["_id"])

        self.connect(handler=self.dump_dot_file, signal="dump_dot_file")
        self.connect(handler=self.playlist_update, signal="playlist_update")
        self.connect(handler=self.change_selection, signal="change_selection")
        self.connect(handler=self.next, signal="next")
        self.connect(handler=self.is_alive, signal="is_alive")
        self.connect(handler=self.scale_streaming, signal="scale")
        self.connect(handler=self.start_streaming, signal="start")
        self.connect(handler=self.print_playlist, signal="print_playlist")
        self.connect(handler=self.register_updates_observer,
                     signal="register_updates_observer")
        self.connect(handler=self.notify_current_track,
                     signal="notify_current_track")
        self.connect(handler=self.unregister_updates_observer,
                     signal="unregister_updates_observer")
        self.connect(handler=self.update_buffer, signal="update_buffer")
        self.connect(handler=self.start_live, signal="start_live")
        self.connect(handler=self.stop_streaming, signal="stop")
        self.connect(handler=self.rescale_streaming, signal="rescale")
        self.connect(handler=self.__streamer_initialized,
                     signal="streamer_initialized")

    @in_context([])
    def __streamer_initialized(self, streamer, respond):
        logging.debug("__streamer_initialized(): New initialized streamer")
        self.streams[unicode(streamer.stream["_id"])] = streamer
        self.lock.release()
        yield task(self.query,
                   self.db.streams.update, {"_id": streamer.stream["_id"]},
                   {"$set": {
                       "status": "playing"
                   }},
                   upsert=False,
                   multi=False)
        respond('OK')

    @in_context(["StreamManagement"])
    def help(self, respond):
        pr = ""
        with open('README.TXT', 'r') as content_file:
            lines = content_file.readlines()
        for line in lines:
            if line.startswith("#"):
                pr += line[1:]
        del lines
        print pr
        respond({"msg": pr})

    @in_context(["StreamManagement"])
    def start_streaming(self, stream, quality, respond):
        self.lock.acquire()

        stream = yield task(self.query, self.db.streams.find_one,
                            {"_id": ObjectId(stream)}, {
                                "_id": 1,
                                "reencoding": 1,
                                "user_id": 1,
                                "name": 1,
                                "description": 1,
                                "genres": 1,
                                "quality": 1,
                                "default_program_id": 1
                            })

        if stream["reencoding"]:

            result = yield task(
                self.call,
                StartStreaming(self, self.pipeline_server_id, stream,
                               quality).run)

        else:
            result = yield task(
                self.call,
                StartEncodedStreaming(self, self.pipeline_server_id,
                                      stream).run)

        if "error" in result:
            self.lock.release()

        respond(result)

    @in_context(["StreamManagement"])
    def is_alive(self, stream, respond):
        result = yield task(
            self.call,
            IsStreamingAlive(self, stream, streamer=self.streams[stream]).run)
        respond(result)

    @in_context(["StreamManagement"])
    def playlist_update(self, stream, group, respond):
        result = yield task(self.call,
                            self.streams[stream].scheduler.playlist_update,
                            group=group)
        respond(result)

    @in_context(["StreamManagement"])
    def change_selection(self, stream, respond):
        result = yield task(self.call,
                            self.streams[stream].scheduler.change_selection)
        respond(result)

    @in_context(["StreamManagement"])
    def next(self, stream, respond, fade_in=None, fade_out=None):
        result = yield task(
            self.call,
            NextTrack(streamer=self.streams[stream],
                      fade_in=fade_in,
                      fade_out=fade_out).run)
        respond(result)

    @in_context(["StreamManagement"])
    def stop_streaming(self, stream, respond):
        self.lock.acquire()
        result = yield task(
            self.call,
            StopStreaming(self, stream, streamer=self.streams[stream]).run)
        self.lock.release()
        respond(result)

    @in_context(["StreamManagement"])
    def scale_streaming(self, stream, respond, quality=None):
        result = yield task(
            self.call,
            ScaleStreaming(stream,
                           quality=quality,
                           streamer=self.streams[stream]).run)
        respond(result)

    @in_context(["StreamManagement"])
    def rescale_streaming(self, stream, respond, stop=True):
        result = yield task(
            self.call,
            RescaleStreaming(stream,
                             streamer=self.streams[stream],
                             stop=stop,
                             stream_servers=len(
                                 self.streams[stream].servers.items())).run)

        respond(result)

    @in_context(["StreamManagement"])
    def start_live(self, stream, appsrc, respond, loop=None):
        result = yield task(
            self.call,
            LiveStreaming(stream,
                          streamer=self.streams[stream],
                          appsrc=appsrc,
                          loop=loop).run)
        respond(result)

    @in_context(["StreamManagement"])
    def register_updates_observer(self, stream, handler, respond):
        result = yield task(
            self.call,
            self.streams[stream].scheduler.register_updates_observer,
            handler=handler)
        respond(result)

    @in_context(["StreamManagement"])
    def notify_current_track(self, stream, respond):
        result = yield task(
            self.call,
            self.streams[stream].scheduler.buffer.notify_current_track)
        respond(result)

    @in_context(["StreamManagement"])
    def unregister_updates_observer(self, stream, handler, respond):
        result = yield task(
            self.call,
            self.streams[stream].scheduler.unregister_updates_observer,
            handler=handler)
        respond(result)

    @in_context(["StreamManagement"])
    def update_buffer(self, stream, buffer, respond):
        result = yield task(self.call,
                            self.streams[stream].scheduler.update_buffer,
                            buffer=buffer,
                            unblock=True)
        respond(result)

    @in_context(["StreamManagement"])
    def dump_dot_file(self, stream, respond):
        result = yield task(self.call,
                            self.streams[stream].dump_dot_file,
                            unblock=True)
        respond({"msg": "OK", "result": result})

    @in_context(["StreamManagement"])
    def print_playlist(self, stream, respond):
        result = self.streams[stream].scheduler.print_playlist()
        respond(result)

    @in_context(["StreamManagement"])
    def run_command(self, command, respond):
        terminal = Terminal(self)
        result = terminal.parse_and_execute(command)
        respond(result)

    def __results_to_dict(self, results):
        res_arr = []
        for result in results:
            res_arr.append(result)
        return res_arr
Exemplo n.º 16
0
    def __init__(self, port):
        super(StreamManagement, self).__init__()
        self.streams = {}

        # one command at the time
        self.lock = BoundedSemaphore(value=1)

        local_ip = Helpers.ip.local_ip()
        public_ip = Helpers.ip.public_ip()

        self.db.servers.update(
            {
                "type": "pipeline",
                "local_ip": local_ip,
                "port": port
            }, {
                "level": float(0),
                "type": "pipeline",
                "local_ip": local_ip,
                "public_ip": public_ip,
                "port": port,
                "down": False
            },
            upsert=True)
        server = self.db.servers.find_one(
            {
                "type": "pipeline",
                "local_ip": local_ip,
                "port": port
            }, {
                "_id": 1,
            })
        # db = Db()
        # server = db.conn.Server.find_one({"type": "pipeline", "local_ip": local_ip, "port": port})
        # if server == None:
        #     server = db.conn.Server()
        #
        # server["level"] = float(0)
        # server["type"] = "pipeline"
        # server["local_ip"] = local_ip
        # server["public_ip"] = public_ip
        # server["port"] = port
        # server["down"] = False
        # server.save()

        self.pipeline_server_id = unicode(server["_id"])

        PipelineLoadBalancer(self.pipeline_server_id).start()

        # self.db = db

        Helpers.globals.set_id_pipeline(server["_id"])

        self.connect(handler=self.dump_dot_file, signal="dump_dot_file")
        self.connect(handler=self.playlist_update, signal="playlist_update")
        self.connect(handler=self.change_selection, signal="change_selection")
        self.connect(handler=self.next, signal="next")
        self.connect(handler=self.is_alive, signal="is_alive")
        self.connect(handler=self.scale_streaming, signal="scale")
        self.connect(handler=self.start_streaming, signal="start")
        self.connect(handler=self.print_playlist, signal="print_playlist")
        self.connect(handler=self.register_updates_observer,
                     signal="register_updates_observer")
        self.connect(handler=self.notify_current_track,
                     signal="notify_current_track")
        self.connect(handler=self.unregister_updates_observer,
                     signal="unregister_updates_observer")
        self.connect(handler=self.update_buffer, signal="update_buffer")
        self.connect(handler=self.start_live, signal="start_live")
        self.connect(handler=self.stop_streaming, signal="stop")
        self.connect(handler=self.rescale_streaming, signal="rescale")
        self.connect(handler=self.__streamer_initialized,
                     signal="streamer_initialized")
Exemplo n.º 17
0
    def init(self, player, respond=None):
        super(BaseScheduler, self).__init__()

        self.playing = True
        self.player = player
        self.last_artist = None
        self.last_stop_unused = time.time()

        self.observers = UpdateObservers()

        # db = Db()
        # self.db = db

        self.tracks_selector = TracksSelector(self.player.stream["_id"])
        self.history = History(observers=self.observers)
        self.program_id = unicode(self.player.stream["default_program_id"])

        self.groups = yield task(self.call,
                                 self.get_default_group_ids,
                                 program_id=self.program_id)

        self.fade = yield task(self.call,
                               self.get_fade_length,
                               program_id=self.program_id)

        self.playlist_lock = BoundedSemaphore(value=1)
        self.playlist_lock.acquire()
        selection = yield task(self.call,
                               self.get_selection,
                               program_id=self.program_id)

        if selection == "shuffle":
            tracks = yield task(self.call,
                                self.tracks_selector.select_by_groups,
                                groups=self.groups,
                                sorted=False)
            print tracks
            self.playlist = RandomPlaylist(tracks, observers=self.observers)
            self.playlist.apply_history(self.history)
        else:
            tracks = yield task(self.call,
                                self.tracks_selector.select_by_groups,
                                groups=self.groups,
                                sorted=True)
            self.playlist = Playlist(tracks, observers=self.observers)

        self.buffer = Buffer(self.player.stream["_id"],
                             self.history,
                             self.playlist,
                             observers=self.observers)

        self.playlist_lock.release()

        track = self.buffer.get_track()
        file_id = track["file_id"]

        filesrc = GridFSSource(file_id)
        if self.__class__.__name__ == "EncodedScheduler":
            self.stream_bin = EncodedStreamBin(self.player, filesrc, self)
        else:
            self.stream_bin = StreamBin(self.player, filesrc, self)

        self.stream_bin.link_and_unblock()

        respond("bitch, yo!")
Exemplo n.º 18
0
    def __init__(self, stream):
        super(Streamer, self).__init__()

        logging.debug("Streamer::__init__(): Constructing pipeline")

        pipe = gst.Pipeline("pipeline")

        self.lock = BoundedSemaphore(value=1)

        self.mainloop_thread = MainLoop()
        self.mainloop_thread.start()

        self.stream = stream
        self.servers = {}
        self.tees = {}
        self.encodebins = {}
        self.quality_tee_srcs = {}
        self.tailbins = []
        self.playing = False
        self.streamname = stream["name"]
        self.description = stream["description"]
        self.genre = ", ".join(stream["genres"])

        # mix audio
        adder = gst.element_factory_make("adder")
        self.quality_tee = gst.element_factory_make("tee")
        # queue = gst.element_factory_make("queue")

        pipe.add(adder, self.quality_tee)
        gst.element_link_many(adder, self.quality_tee)

        self.pipe = pipe
        self.bus = pipe.get_bus()
        self.bus.add_signal_watch()
        self.bus.connect('message::state-changed',
                         self.on_message_state_changed)
        self.bus.connect('message::eos', self.on_eos)
        self.bus.connect('message::error', self.on_error)
        self.bus.connect("message::tag", self.on_tag)
        # self.bus.connect('message', self.on_message)
        self.adder = adder

        quietbin = QuietBin(self)
        sink = self.adder.get_request_pad("sink%d")
        quietbin.get_pad("src").link(sink)

        for quality in stream["quality"]:
            # this has to be to ensure double from database work as expected and key is found
            quality = float(quality)

            # if quality == min(stream["quality"]):
            eb = EncodeBin(self, quality)

            quality_tee_src = self.quality_tee.get_request_pad("src%d")
            quality_tee_src.link(eb.get_pad("sink"))

            self.quality_tee_srcs[quality] = quality_tee_src

            tee = gst.element_factory_make("tee")
            pipe.add(tee)

            sink = tee.get_pad("sink")
            src = eb.get_pad("src")
            src.link(sink)

            self.tees[quality] = tee
            self.encodebins[quality] = eb

            if quality == min(stream["quality"]):
                self.tee = tee

            self.scale(None, quality, sync=1, init=True)
Exemplo n.º 19
0
class Queue(object):

    def __init__(self, maxsize = 0):
        if maxsize <= 0:
            maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
        self._maxsize = maxsize
        self._reader, self._writer = Pipe(duplex=False)
        self._rlock = Lock()
        self._opid = os.getpid()
        if sys.platform == 'win32':
            self._wlock = None
        else:
            self._wlock = Lock()
        self._sem = BoundedSemaphore(maxsize)
        self._after_fork()
        if sys.platform != 'win32':
            register_after_fork(self, Queue._after_fork)
        return

    def __getstate__(self):
        assert_spawning(self)
        return (self._maxsize,
         self._reader,
         self._writer,
         self._rlock,
         self._wlock,
         self._sem,
         self._opid)

    def __setstate__(self, state):
        self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid = state
        self._after_fork()

    def _after_fork(self):
        debug('Queue._after_fork()')
        self._notempty = threading.Condition(threading.Lock())
        self._buffer = collections.deque()
        self._thread = None
        self._jointhread = None
        self._joincancelled = False
        self._closed = False
        self._close = None
        self._send = self._writer.send
        self._recv = self._reader.recv
        self._poll = self._reader.poll
        return

    def put(self, obj, block = True, timeout = None):
        if not not self._closed:
            raise AssertionError
            raise self._sem.acquire(block, timeout) or Full
        self._notempty.acquire()
        try:
            if self._thread is None:
                self._start_thread()
            self._buffer.append(obj)
            self._notempty.notify()
        finally:
            self._notempty.release()

        return

    def get(self, block = True, timeout = None):
        if block and timeout is None:
            self._rlock.acquire()
            try:
                res = self._recv()
                self._sem.release()
                return res
            finally:
                self._rlock.release()

        else:
            if block:
                deadline = time.time() + timeout
            if not self._rlock.acquire(block, timeout):
                raise Empty
            try:
                if block:
                    timeout = deadline - time.time()
                    if timeout < 0 or not self._poll(timeout):
                        raise Empty
                elif not self._poll():
                    raise Empty
                res = self._recv()
                self._sem.release()
                return res
            finally:
                self._rlock.release()

        return

    def qsize(self):
        return self._maxsize - self._sem._semlock._get_value()

    def empty(self):
        return not self._poll()

    def full(self):
        return self._sem._semlock._is_zero()

    def get_nowait(self):
        return self.get(False)

    def put_nowait(self, obj):
        return self.put(obj, False)

    def close(self):
        self._closed = True
        self._reader.close()
        if self._close:
            self._close()

    def join_thread(self):
        debug('Queue.join_thread()')
        if not self._closed:
            raise AssertionError
            self._jointhread and self._jointhread()

    def cancel_join_thread(self):
        debug('Queue.cancel_join_thread()')
        self._joincancelled = True
        try:
            self._jointhread.cancel()
        except AttributeError:
            pass

    def _start_thread(self):
        debug('Queue._start_thread()')
        self._buffer.clear()
        self._thread = threading.Thread(target=Queue._feed, args=(self._buffer,
         self._notempty,
         self._send,
         self._wlock,
         self._writer.close), name='QueueFeederThread')
        self._thread.daemon = True
        debug('doing self._thread.start()')
        self._thread.start()
        debug('... done self._thread.start()')
        if not self._joincancelled:
            self._jointhread = Finalize(self._thread, Queue._finalize_join, [weakref.ref(self._thread)], exitpriority=-5)
        self._close = Finalize(self, Queue._finalize_close, [self._buffer, self._notempty], exitpriority=10)

    @staticmethod
    def _finalize_join(twr):
        debug('joining queue thread')
        thread = twr()
        if thread is not None:
            thread.join()
            debug('... queue thread joined')
        else:
            debug('... queue thread already dead')
        return

    @staticmethod
    def _finalize_close(buffer, notempty):
        debug('telling queue thread to quit')
        notempty.acquire()
        try:
            buffer.append(_sentinel)
            notempty.notify()
        finally:
            notempty.release()

    @staticmethod
    def _feed(buffer, notempty, send, writelock, close):
        debug('starting thread to feed data to pipe')
        from .util import is_exiting
        nacquire = notempty.acquire
        nrelease = notempty.release
        nwait = notempty.wait
        bpopleft = buffer.popleft
        sentinel = _sentinel
        if sys.platform != 'win32':
            wacquire = writelock.acquire
            wrelease = writelock.release
        else:
            wacquire = None
        try:
            while 1:
                nacquire()
                try:
                    if not buffer:
                        nwait()
                finally:
                    nrelease()

                try:
                    while 1:
                        obj = bpopleft()
                        if obj is sentinel:
                            debug('feeder thread got sentinel -- exiting')
                            close()
                            return
                        if wacquire is None:
                            send(obj)
                        else:
                            wacquire()
                            try:
                                send(obj)
                            finally:
                                wrelease()

                except IndexError:
                    pass

        except Exception as e:
            try:
                if is_exiting():
                    info('error in queue thread: %s', e)
                else:
                    import traceback
                    traceback.print_exc()
            except Exception:
                pass

        return
Exemplo n.º 20
0
class Queue(object):

    def __init__(self, maxsize=0):
        if maxsize <= 0:
            maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
        self._maxsize = maxsize
        self._reader, self._writer = Pipe(duplex=False)
        self._rlock = Lock()
        self._opid = os.getpid()
        if sys.platform == 'win32':
            self._wlock = None
        else:
            self._wlock = Lock()
        self._sem = BoundedSemaphore(maxsize)

        self._after_fork()

        if sys.platform != 'win32':
            register_after_fork(self, Queue._after_fork)

    def __getstate__(self):
        assert_spawning(self)
        return (self._maxsize, self._reader, self._writer,
                self._rlock, self._wlock, self._sem, self._opid)

    def __setstate__(self, state):
        (self._maxsize, self._reader, self._writer,
         self._rlock, self._wlock, self._sem, self._opid) = state
        self._after_fork()

    def _after_fork(self):
        debug('Queue._after_fork()')
        self._notempty = threading.Condition(threading.Lock())
        self._buffer = collections.deque()
        self._thread = None
        self._jointhread = None
        self._joincancelled = False
        self._closed = False
        self._close = None
        self._send = self._writer.send
        self._recv = self._reader.recv
        self._poll = self._reader.poll

    def put(self, obj, block=True, timeout=None):
        assert not self._closed
        if not self._sem.acquire(block, timeout):
            raise Full

        self._notempty.acquire()
        try:
            if self._thread is None:
                self._start_thread()
            self._buffer.append(obj)
            self._notempty.notify()
        finally:
            self._notempty.release()

    def get(self, block=True, timeout=None):
        if block and timeout is None:
            self._rlock.acquire()
            try:
                res = self._recv()
                self._sem.release()
                return res
            finally:
                self._rlock.release()

        else:
            if block:
                deadline = time.time() + timeout
            if not self._rlock.acquire(block, timeout):
                raise Empty
            try:
                if not self._poll(block and (deadline-time.time()) or 0.0):
                    raise Empty
                res = self._recv()
                self._sem.release()
                return res
            finally:
                self._rlock.release()

    def qsize(self):
        # Raises NotImplementedError on Mac OSX because of broken sem_getvalue()
        return self._maxsize - self._sem._semlock._get_value()

    def empty(self):
        return not self._poll()

    def full(self):
        return self._sem._semlock._is_zero()

    def get_nowait(self):
        return self.get(False)

    def put_nowait(self, obj):
        return self.put(obj, False)

    def close(self):
        self._closed = True
        self._reader.close()
        if self._close:
            self._close()

    def join_thread(self):
        debug('Queue.join_thread()')
        assert self._closed
        if self._jointhread:
            self._jointhread()

    def cancel_join_thread(self):
        debug('Queue.cancel_join_thread()')
        self._joincancelled = True
        try:
            self._jointhread.cancel()
        except AttributeError:
            pass

    def _start_thread(self):
        debug('Queue._start_thread()')

        # Start thread which transfers data from buffer to pipe
        self._buffer.clear()
        self._thread = threading.Thread(
            target=Queue._feed,
            args=(self._buffer, self._notempty, self._send,
                  self._wlock, self._writer.close),
            name='QueueFeederThread'
            )
        self._thread.daemon = True

        debug('doing self._thread.start()')
        self._thread.start()
        debug('... done self._thread.start()')

        # On process exit we will wait for data to be flushed to pipe.
        #
        # However, if this process created the queue then all
        # processes which use the queue will be descendants of this
        # process.  Therefore waiting for the queue to be flushed
        # is pointless once all the child processes have been joined.
        created_by_this_process = (self._opid == os.getpid())
        if not self._joincancelled and not created_by_this_process:
            self._jointhread = Finalize(
                self._thread, Queue._finalize_join,
                [weakref.ref(self._thread)],
                exitpriority=-5
                )

        # Send sentinel to the thread queue object when garbage collected
        self._close = Finalize(
            self, Queue._finalize_close,
            [self._buffer, self._notempty],
            exitpriority=10
            )

    @staticmethod
    def _finalize_join(twr):
        debug('joining queue thread')
        thread = twr()
        if thread is not None:
            thread.join()
            debug('... queue thread joined')
        else:
            debug('... queue thread already dead')

    @staticmethod
    def _finalize_close(buffer, notempty):
        debug('telling queue thread to quit')
        notempty.acquire()
        try:
            buffer.append(_sentinel)
            notempty.notify()
        finally:
            notempty.release()

    @staticmethod
    def _feed(buffer, notempty, send, writelock, close):
        debug('starting thread to feed data to pipe')
        from .util import is_exiting

        nacquire = notempty.acquire
        nrelease = notempty.release
        nwait = notempty.wait
        bpopleft = buffer.popleft
        sentinel = _sentinel
        if sys.platform != 'win32':
            wacquire = writelock.acquire
            wrelease = writelock.release
        else:
            wacquire = None

        try:
            while 1:
                nacquire()
                try:
                    if not buffer:
                        nwait()
                finally:
                    nrelease()
                try:
                    while 1:
                        obj = bpopleft()
                        if obj is sentinel:
                            debug('feeder thread got sentinel -- exiting')
                            close()
                            return

                        if wacquire is None:
                            send(obj)
                        else:
                            wacquire()
                            try:
                                send(obj)
                            finally:
                                wrelease()
                except IndexError:
                    pass
        except Exception as e:
            # Since this runs in a daemon thread the resources it uses
            # may be become unusable while the process is cleaning up.
            # We ignore errors which happen after the process has
            # started to cleanup.
            try:
                if is_exiting():
                    info('error in queue thread: %s', e)
                else:
                    import traceback
                    traceback.print_exc()
            except Exception:
                pass
Exemplo n.º 21
0
def BoundedSemaphore(value=1):
    from multiprocessing.synchronize import BoundedSemaphore
    return BoundedSemaphore(value)
Exemplo n.º 22
0
class Queue(object):
    def __init__(self, maxsize=0):
        if maxsize <= 0:
            maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
        self._maxsize = maxsize
        self._reader, self._writer = Pipe(duplex=False)
        self._rlock = Lock()
        self._opid = os.getpid()
        if sys.platform == 'win32':
            self._wlock = None
        else:
            self._wlock = Lock()
        self._sem = BoundedSemaphore(maxsize)

        self._after_fork()

        if sys.platform != 'win32':
            register_after_fork(self, Queue._after_fork)

    def __getstate__(self):
        assert_spawning(self)
        return (self._maxsize, self._reader, self._writer, self._rlock,
                self._wlock, self._sem, self._opid)

    def __setstate__(self, state):
        (self._maxsize, self._reader, self._writer, self._rlock, self._wlock,
         self._sem, self._opid) = state
        self._after_fork()

    def _after_fork(self):
        debug('Queue._after_fork()')
        self._notempty = threading.Condition(threading.Lock())
        self._buffer = collections.deque()
        self._thread = None
        self._jointhread = None
        self._joincancelled = False
        self._closed = False
        self._close = None
        self._send = self._writer.send
        self._recv = self._reader.recv
        self._poll = self._reader.poll

    def put(self, obj, block=True, timeout=None):
        assert not self._closed
        if not self._sem.acquire(block, timeout):
            raise Full

        self._notempty.acquire()
        try:
            if self._thread is None:
                self._start_thread()
            self._buffer.append(obj)
            self._notempty.notify()
        finally:
            self._notempty.release()

    def get(self, block=True, timeout=None):
        if block and timeout is None:
            self._rlock.acquire()
            try:
                res = self._recv()
                self._sem.release()
                return res
            finally:
                self._rlock.release()

        else:
            if block:
                deadline = time.time() + timeout
            if not self._rlock.acquire(block, timeout):
                raise Empty
            try:
                if not self._poll(block and (deadline - time.time()) or 0.0):
                    raise Empty
                res = self._recv()
                self._sem.release()
                return res
            finally:
                self._rlock.release()

    def qsize(self):
        # Raises NotImplementError on Mac OSX because of broken sem_getvalue()
        return self._maxsize - self._sem._semlock._get_value()

    def empty(self):
        return not self._poll()

    def full(self):
        return self._sem._semlock._is_zero()

    def get_nowait(self):
        return self.get(False)

    def put_nowait(self, obj):
        return self.put(obj, False)

    def close(self):
        self._closed = True
        self._reader.close()
        if self._close:
            self._close()

    def join_thread(self):
        debug('Queue.join_thread()')
        assert self._closed
        if self._jointhread:
            self._jointhread()

    def cancel_join_thread(self):
        debug('Queue.cancel_join_thread()')
        self._joincancelled = True
        try:
            self._jointhread.cancel()
        except AttributeError:
            pass

    def _start_thread(self):
        debug('Queue._start_thread()')

        # Start thread which transfers data from buffer to pipe
        self._buffer.clear()
        self._thread = threading.Thread(target=Queue._feed,
                                        args=(self._buffer, self._notempty,
                                              self._send, self._wlock,
                                              self._writer.close),
                                        name='QueueFeederThread')
        self._thread.daemon = True

        debug('doing self._thread.start()')
        self._thread.start()
        debug('... done self._thread.start()')

        # On process exit we will wait for data to be flushed to pipe.
        #
        # However, if this process created the queue then all
        # processes which use the queue will be descendants of this
        # process.  Therefore waiting for the queue to be flushed
        # is pointless once all the child processes have been joined.
        created_by_this_process = (self._opid == os.getpid())
        if not self._joincancelled and not created_by_this_process:
            self._jointhread = Finalize(self._thread,
                                        Queue._finalize_join,
                                        [weakref.ref(self._thread)],
                                        exitpriority=-5)

        # Send sentinel to the thread queue object when garbage collected
        self._close = Finalize(self,
                               Queue._finalize_close,
                               [self._buffer, self._notempty],
                               exitpriority=10)

    @staticmethod
    def _finalize_join(twr):
        debug('joining queue thread')
        thread = twr()
        if thread is not None:
            thread.join()
            debug('... queue thread joined')
        else:
            debug('... queue thread already dead')

    @staticmethod
    def _finalize_close(buffer, notempty):
        debug('telling queue thread to quit')
        notempty.acquire()
        try:
            buffer.append(_sentinel)
            notempty.notify()
        finally:
            notempty.release()

    @staticmethod
    def _feed(buffer, notempty, send, writelock, close):
        debug('starting thread to feed data to pipe')
        from .util import is_exiting

        nacquire = notempty.acquire
        nrelease = notempty.release
        nwait = notempty.wait
        bpopleft = buffer.popleft
        sentinel = _sentinel
        if sys.platform != 'win32':
            wacquire = writelock.acquire
            wrelease = writelock.release
        else:
            wacquire = None

        try:
            while 1:
                nacquire()
                try:
                    if not buffer:
                        nwait()
                finally:
                    nrelease()
                try:
                    while 1:
                        obj = bpopleft()
                        if obj is sentinel:
                            debug('feeder thread got sentinel -- exiting')
                            close()
                            return

                        if wacquire is None:
                            send(obj)
                        else:
                            wacquire()
                            try:
                                send(obj)
                            finally:
                                wrelease()
                except IndexError:
                    pass
        except Exception, e:
            # Since this runs in a daemon thread the resources it uses
            # may be become unusable while the process is cleaning up.
            # We ignore errors which happen after the process has
            # started to cleanup.
            try:
                if is_exiting():
                    info('error in queue thread: %s', e)
                else:
                    import traceback
                    traceback.print_exc()
            except Exception:
                pass
Exemplo n.º 23
0
        os.path.join(os.path.dirname(__file__), "static"),
        "ui_methods":
        ui_methods,
        "cookie_secret":
        str("__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__"),
        "login_url":
        "/admin/login.html",
        "debug":
        True,
        "db":
        motor.MotorClient(
            "mongodb://*****:*****@127.0.0.1:27017/pipeline"
        ).pipeline,
        "upload_processor": {
            "connection": parent_conn,
            "lock": BoundedSemaphore(value=1)
        },
        "facebook_api_key":
        "..",
        "facebook_secret":
        "..",
        "executor":
        ThreadPoolExecutor(max_workers=4),
        # websockets connection cache for receiving metadata and live
        "ws_cache": {}
    }

    application = tornado.web.Application([
        (r"/image.json", JSONImageHandler),
        (r"/image.jpg", JPGImageHandler),
        (r"/", MainHandler),