示例#1
0
class latest(Stream):
    """ Drop held-up data and emit the latest result

    This allows you to skip intermediate elements in the stream if there is
    some back pressure causing a slowdown.  Use this when you only care about
    the latest elements, and are willing to lose older data.

    This passes through values without modification otherwise.

    Examples
    --------
    >>> source.map(f).latest().map(g)  # doctest: +SKIP
    """
    _graphviz_shape = 'octagon'

    def __init__(self, upstream, **kwargs):
        self.condition = Condition()
        self.next = []

        Stream.__init__(self, upstream, ensure_io_loop=True, **kwargs)

        self.loop.add_callback(self.cb)

    def update(self, x, who=None):
        self.next = [x]
        self.loop.add_callback(self.condition.notify)

    @gen.coroutine
    def cb(self):
        while True:
            yield self.condition.wait()
            [x] = self.next
            yield self._emit(x)
示例#2
0
文件: sheet.py 项目: vizydrop/apps
    def get_data(cls, account, source_filter, limit=100, skip=0):
        """
        Gathers card information from Google Sheets
        GET https://spreadsheets.google.com/feeds/list/[spreadsheet]/[worksheet]/private/full
        """
        if not account or not account.enabled:
            raise ValueError('cannot gather information without an account')
        client = AsyncHTTPClient()

        if source_filter.spreadsheet is None:
            raise ValueError('required parameter spreadsheet missing')
        if source_filter.worksheet is None:
            raise ValueError('required parameter worksheet missing')
        uri = "https://docs.google.com/spreadsheets/d/{}/export?format=csv&gid={}".format(
            source_filter.spreadsheet, source_filter.worksheet
        )

        app_log.info(
            "Start retrieval of worksheet {}/{} for {}".format(source_filter.spreadsheet, source_filter.worksheet,
                                                               account._id))

        lock = Condition()
        oauth_client = account.get_client()
        uri, headers, body = oauth_client.add_token(uri)
        req = HTTPRequest(uri, headers=headers, body=body, streaming_callback=lambda c: cls.write(c))

        client.fetch(req, callback=lambda r: lock.notify())
        yield lock.wait(timeout=timedelta(seconds=MAXIMUM_REQ_TIME))

        app_log.info(
            "Finished retrieving worksheet for {}".format(account._id))
示例#3
0
    def get_data(cls, account, source_filter, limit=100, skip=0):
        """
        Gathers card information from Google Sheets
        GET https://spreadsheets.google.com/feeds/list/[spreadsheet]/[worksheet]/private/full
        """
        if not account or not account.enabled:
            raise ValueError('cannot gather information without an account')
        client = AsyncHTTPClient()

        if source_filter.spreadsheet is None:
            raise ValueError('required parameter spreadsheet missing')
        if source_filter.worksheet is None:
            raise ValueError('required parameter worksheet missing')
        uri = "https://docs.google.com/spreadsheets/d/{}/export?format=csv&gid={}".format(
            source_filter.spreadsheet, source_filter.worksheet)

        app_log.info("Start retrieval of worksheet {}/{} for {}".format(
            source_filter.spreadsheet, source_filter.worksheet, account._id))

        lock = Condition()
        oauth_client = account.get_client()
        uri, headers, body = oauth_client.add_token(uri)
        req = HTTPRequest(uri,
                          headers=headers,
                          body=body,
                          streaming_callback=lambda c: cls.write(c))

        client.fetch(req, callback=lambda r: lock.notify())
        yield lock.wait(timeout=timedelta(seconds=MAXIMUM_REQ_TIME))

        app_log.info("Finished retrieving worksheet for {}".format(
            account._id))
示例#4
0
class FlowControlWindow(object):

    __slots__ = ['condition', 'value']

    def __init__(self, initial_value=DEFAULT_WINDOW_SIZE):
        self.condition = Condition()
        self.value = initial_value

    @gen.coroutine
    def available(self, timeout=None):
        if self.value > 0:
            raise gen.Return(self.value)

        yield self.condition.wait(timeout=timeout)
        raise gen.Return(self.value)

    def consume(self, n):
        """Tries to consume n from value"""
        consumed = min(self.value, n)
        self.value -= consumed
        return consumed

    def produce(self, n):
        self.value += n
        self.condition.notify_all()
示例#5
0
文件: dask.py 项目: kszucs/streams
class gather(Stream):
    def __init__(self, child, limit=10, client=None):
        self.client = client or default_client()
        self.queue = Queue(maxsize=limit)
        self.condition = Condition()

        Stream.__init__(self, child)

        self.client.loop.add_callback(self.cb)

    def update(self, x, who=None):
        return self.queue.put(x)

    @gen.coroutine
    def cb(self):
        while True:
            x = yield self.queue.get()
            L = [x]
            while not self.queue.empty():
                L.append(self.queue.get_nowait())
            results = yield self.client._gather(L)
            for x in results:
                yield self.emit(x)
            if self.queue.empty():
                self.condition.notify_all()

    @gen.coroutine
    def flush(self):
        while not self.queue.empty():
            yield self.condition.wait()
示例#6
0
class zip(Stream):
    """ Combine streams together into a stream of tuples

    We emit a new tuple once all streams have produce a new tuple.

    See also
    --------
    combine_latest
    zip_latest
    """
    _graphviz_orientation = 270
    _graphviz_shape = 'triangle'

    def __init__(self, *upstreams, **kwargs):
        self.maxsize = kwargs.pop('maxsize', 10)
        self.condition = Condition()
        self.literals = [(i, val) for i, val in enumerate(upstreams)
                         if not isinstance(val, Stream)]

        self.buffers = {
            upstream: deque()
            for upstream in upstreams if isinstance(upstream, Stream)
        }

        upstreams2 = [
            upstream for upstream in upstreams if isinstance(upstream, Stream)
        ]

        Stream.__init__(self, upstreams=upstreams2, **kwargs)

    def pack_literals(self, tup):
        """ Fill buffers for literals whenever we empty them """
        inp = list(tup)[::-1]
        out = []
        for i, val in self.literals:
            while len(out) < i:
                out.append(inp.pop())
            out.append(val)

        while inp:
            out.append(inp.pop())

        return tuple(out)

    def update(self, x, who=None):
        L = self.buffers[who]  # get buffer for stream
        L.append(x)
        if len(L) == 1 and all(self.buffers.values()):
            tup = tuple(self.buffers[up][0] for up in self.upstreams)
            for buf in self.buffers.values():
                buf.popleft()
            self.condition.notify_all()
            if self.literals:
                tup = self.pack_literals(tup)
            return self._emit(tup)
        elif len(L) > self.maxsize:
            return self.condition.wait()
示例#7
0
class EventSource(object):

    def __init__(self):
        self.lock = Condition()
        self.events = None

    @tornado.gen.coroutine
    def publish(self, events):
        self.events = events
        self.lock.notify_all()

    @tornado.gen.coroutine
    def wait(self):
        yield self.lock.wait()
示例#8
0
class Window(object):
    def __init__(self, parent, stream_id, initial_window_size):
        self.parent = parent
        self.stream_id = stream_id
        self.cond = Condition()
        self.closed = False
        self.size = initial_window_size

    def close(self):
        self.closed = True
        self.cond.notify_all()

    def _raise_error(self, code, message):
        if self.parent is None:
            raise ConnectionError(code, message)
        else:
            raise StreamError(self.stream_id, code)

    def adjust(self, amount):
        self.size += amount
        if self.size > constants.MAX_WINDOW_SIZE:
            self._raise_error(constants.ErrorCode.FLOW_CONTROL_ERROR,
                              "flow control window too large")
        self.cond.notify_all()

    def apply_window_update(self, frame):
        try:
            window_update, = struct.unpack('>I', frame.data)
        except struct.error:
            raise ConnectionError(constants.ErrorCode.FRAME_SIZE_ERROR,
                                  "WINDOW_UPDATE incorrect size")
        # strip reserved bit
        window_update = window_update & 0x7fffffff
        if window_update == 0:
            self._raise_error(constants.ErrorCode.PROTOCOL_ERROR,
                              "window update must not be zero")
        self.adjust(window_update)

    @gen.coroutine
    def consume(self, amount):
        while not self.closed and self.size <= 0:
            yield self.cond.wait()
        if self.closed:
            raise StreamClosedError()
        if self.size < amount:
            amount = self.size
        if self.parent is not None:
            amount = yield self.parent.consume(amount)
        self.size -= amount
        raise gen.Return(amount)
示例#9
0
class zip(Stream):
    """ Combine streams together into a stream of tuples

    We emit a new tuple once all streams have produce a new tuple.

    See also
    --------
    combine_latest
    zip_latest
    """
    _graphviz_orientation = 270
    _graphviz_shape = 'triangle'

    def __init__(self, *upstreams, **kwargs):
        self.maxsize = kwargs.pop('maxsize', 10)
        self.buffers = [deque() for _ in upstreams]
        self.condition = Condition()
        self.literals = [(i, val) for i, val in enumerate(upstreams)
                         if not isinstance(val, Stream)]
        self.pack_literals()

        self.buffers_by_stream = {
            upstream: buffer
            for upstream, buffer in builtins.zip(upstreams, self.buffers)
            if isinstance(upstream, Stream)
        }

        upstreams2 = [
            upstream for upstream in upstreams if isinstance(upstream, Stream)
        ]

        Stream.__init__(self, upstreams=upstreams2, **kwargs)

    def pack_literals(self):
        """ Fill buffers for literals whenver we empty them """
        for i, val in self.literals:
            self.buffers[i].append(val)

    def update(self, x, who=None):
        L = self.buffers_by_stream[who]  # get buffer for stream
        L.append(x)
        if len(L) == 1 and all(self.buffers):
            tup = tuple(buf.popleft() for buf in self.buffers)
            self.condition.notify_all()
            if self.literals:
                self.pack_literals()
            return self._emit(tup)
        elif len(L) > self.maxsize:
            return self.condition.wait()
示例#10
0
文件: core.py 项目: zjw0358/streamz3
class zip(Stream):
    def __init__(self, *children, **kwargs):
        self.maxsize = kwargs.pop('maxsize', 10)
        self.buffers = [deque() for _ in children]
        self.condition = Condition()
        Stream.__init__(self, children=children)

    def update(self, x, who=None):
        L = self.buffers[self.children.index(who)]
        L.append(x)
        if len(L) == 1 and all(self.buffers):
            tup = tuple(buf.popleft() for buf in self.buffers)
            self.condition.notify_all()
            return self.emit(tup)
        elif len(L) > self.maxsize:
            return self.condition.wait()
示例#11
0
class CounterCondition(object):
    def __init__(self):
        self.condition = Condition()
        self.counter = 0

    def increment(self, value=1):
        self.counter += value
        self.condition.notify_all()

    @gen.coroutine
    def wait_until(self, value):
        while True:
            yield self.condition.wait()
            if self.counter >= value:
                self.counter -= value
                return
示例#12
0
class PingHandler(firenado.core.TornadoHandler):

    def __init__(self, application, request, **kwargs):
        super(PingHandler, self).__init__(application, request, **kwargs)
        self.callback_queue = None
        self.condition = Condition()
        self.response = None
        self.corr_id = str(uuid.uuid4())
        self.in_channel = self.application.get_app_component().rabbitmq[
            'client'].channels['in']

    @gen.coroutine
    def post(self):
        self.in_channel.queue_declare(exclusive=True,
                                      callback=self.on_request_queue_declared)
        yield self.condition.wait()
示例#13
0
文件: hgame.py 项目: lpenz/slickbird
class GameScrapperWorker(object):

    def __init__(self, session, home):
        self.scrapper = Scrapper(session, home)
        self.session = session
        self.home = home
        self.condition = Condition()
        tornado.ioloop.IOLoop.current()\
            .spawn_callback(self.main)

    @tornado.gen.coroutine
    def main(self):
        _log().info('scrapper sleeping')
        yield self.condition.wait()
        _log().info('scrapper woke up')
        self.scrapper.scrap_missing()
        tornado.ioloop.IOLoop.current()\
            .spawn_callback(self.main)
        raise tornado.gen.Return(False)
示例#14
0
class PingHandler(firenado.tornadoweb.TornadoHandler):

    def __init__(self, application, request, **kwargs):
        super(PingHandler, self).__init__(application, request, **kwargs)
        self.callback_queue = None
        self.condition = Condition()
        self.response = None
        self.corr_id = str(uuid.uuid4())
        self.in_channel = self.application.get_app_component().rabbitmq[
            'client'].channels['in']

    @gen.coroutine
    def post(self):
        self.in_channel.queue_declare(exclusive=True,
                                      callback=self.on_request_queue_declared)
        yield self.condition.wait()

        self.write(self.response)

    def on_request_queue_declared(self, response):
        logger.info('Request temporary queue declared.')
        self.callback_queue = response.method.queue
        self.in_channel.basic_consume(self.on_response, no_ack=True,
                                      queue=self.callback_queue)
        self.in_channel.basic_publish(
            exchange='',
            routing_key='ping_rpc_queue',
            properties=pika.BasicProperties(
                reply_to=self.callback_queue,
                correlation_id=self.corr_id,
            ),
            body=self.request.body)

    def on_response(self, ch, method, props, body):
        if self.corr_id == props.correlation_id:
            self.response = {
                'data': body.decode("utf-8"),
                'date': datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
            }
            self.in_channel.queue_delete(queue=self.callback_queue)
            self.condition.notify()
示例#15
0
class PingHandler(firenado.tornadoweb.TornadoHandler):

    def __init__(self, application, request, **kwargs):
        super(PingHandler, self).__init__(application, request, **kwargs)
        self.callback_queue = None
        self.condition = Condition()
        self.response = None
        self.corr_id = str(uuid.uuid4())
        self.in_channel = self.application.get_app_component().rabbitmq[
            'client'].channels['in']

    @gen.coroutine
    def post(self):
        self.in_channel.queue_declare(exclusive=True,
                                      callback=self.on_request_queue_declared)
        yield self.condition.wait()
        self.write(self.response)

    def on_request_queue_declared(self, response):
        logger.info('Request temporary queue declared.')
        self.callback_queue = response.method.queue
        self.in_channel.basic_consume(self.on_response, no_ack=True,
                                      queue=self.callback_queue)
        self.in_channel.basic_publish(
            exchange='',
            routing_key='ping_rpc_queue',
            properties=pika.BasicProperties(
                reply_to=self.callback_queue,
                correlation_id=self.corr_id,
            ),
            body=self.request.body)

    def on_response(self, ch, method, props, body):
        if self.corr_id == props.correlation_id:
            self.response = {
                'data': body.decode("utf-8"),
                'date': datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
            }
            self.in_channel.queue_delete(queue=self.callback_queue)
            self.condition.notify()
示例#16
0
    def test_future_close_callback(self):
        # Regression test for interaction between the Future read interfaces
        # and IOStream._maybe_add_error_listener.
        rs, ws = yield self.make_iostream_pair()
        closed = [False]
        cond = Condition()

        def close_callback():
            closed[0] = True
            cond.notify()
        rs.set_close_callback(close_callback)
        try:
            ws.write(b'a')
            res = yield rs.read_bytes(1)
            self.assertEqual(res, b'a')
            self.assertFalse(closed[0])
            ws.close()
            yield cond.wait()
            self.assertTrue(closed[0])
        finally:
            rs.close()
            ws.close()
示例#17
0
    def test_future_close_callback(self):
        # Regression test for interaction between the Future read interfaces
        # and IOStream._maybe_add_error_listener.
        rs, ws = yield self.make_iostream_pair()
        closed = [False]
        cond = Condition()

        def close_callback():
            closed[0] = True
            cond.notify()

        rs.set_close_callback(close_callback)
        try:
            ws.write(b"a")
            res = yield rs.read_bytes(1)
            self.assertEqual(res, b"a")
            self.assertFalse(closed[0])
            ws.close()
            yield cond.wait()
            self.assertTrue(closed[0])
        finally:
            rs.close()
            ws.close()
示例#18
0
class ImporterWorker(object):

    def __init__(self, session, home, scrapper):
        self.session = session
        self.home = home
        self.condition = Condition()
        self.scrapper = scrapper
        tornado.ioloop.IOLoop.current()\
            .spawn_callback(self.main)

    @tornado.gen.coroutine
    def main(self):
        _log().info('importer sleeping')
        yield self.condition.wait()
        _log().info('importer woke up')
        changed = True
        while changed:
            changed = yield self.work()
        tornado.ioloop.IOLoop.current()\
            .spawn_callback(self.main)

    @tornado.gen.coroutine
    def work(self):
        changed = False
        fi = slickbird.FileImporter(self.session, self.home)
        for f in self.session.query(orm.Importerfile)\
                .filter(orm.Importerfile.status == 'scanning'):
            changed = True
            r, status = fi.file_import(f.filename)
            f.status = status
            if status == 'moved':
                self.scrapper.condition.notify()
            yield tornado.gen.moment
        self.session.commit()
        self.scrapper.condition.notify()
        raise tornado.gen.Return(changed)
示例#19
0
class WebUpdater:
    def __init__(self, config, cmd_helper):
        self.server = cmd_helper.get_server()
        self.cmd_helper = cmd_helper
        self.repo = config.get('repo').strip().strip("/")
        self.owner, self.name = self.repo.split("/", 1)
        if hasattr(config, "get_name"):
            self.name = config.get_name().split()[-1]
        self.path = os.path.realpath(os.path.expanduser(config.get("path")))
        self.persistent_files = []
        pfiles = config.get('persistent_files', None)
        if pfiles is not None:
            self.persistent_files = [
                pf.strip().strip("/") for pf in pfiles.split("\n")
                if pf.strip()
            ]
            if ".version" in self.persistent_files:
                raise config.error(
                    "Invalid value for option 'persistent_files': "
                    "'.version' can not be persistent")

        self.version = self.remote_version = self.dl_url = "?"
        self.etag = None
        self.init_evt = Event()
        self.refresh_condition = None
        self._get_local_version()
        logging.info(f"\nInitializing Client Updater: '{self.name}',"
                     f"\nversion: {self.version}"
                     f"\npath: {self.path}")

    def _get_local_version(self):
        version_path = os.path.join(self.path, ".version")
        if os.path.isfile(os.path.join(self.path, ".version")):
            with open(version_path, "r") as f:
                v = f.read()
            self.version = v.strip()

    async def check_initialized(self, timeout=None):
        if self.init_evt.is_set():
            return
        if timeout is not None:
            timeout = IOLoop.current().time() + timeout
        await self.init_evt.wait(timeout)

    async def refresh(self):
        if self.refresh_condition is None:
            self.refresh_condition = Condition()
        else:
            self.refresh_condition.wait()
            return
        try:
            self._get_local_version()
            await self._get_remote_version()
        except Exception:
            logging.exception("Error Refreshing Client")
        self.init_evt.set()
        self.refresh_condition.notify_all()
        self.refresh_condition = None

    async def _get_remote_version(self):
        # Remote state
        url = f"https://api.github.com/repos/{self.repo}/releases/latest"
        try:
            result = await self.cmd_helper.github_api_request(url,
                                                              etag=self.etag)
        except Exception:
            logging.exception(f"Client {self.repo}: Github Request Error")
            result = {}
        if result is None:
            # No change, update not necessary
            return
        self.etag = result.get('etag', None)
        self.remote_version = result.get('name', "?")
        release_assets = result.get('assets', [{}])[0]
        self.dl_url = release_assets.get('browser_download_url', "?")
        logging.info(f"Github client Info Received:\nRepo: {self.name}\n"
                     f"Local Version: {self.version}\n"
                     f"Remote Version: {self.remote_version}\n"
                     f"url: {self.dl_url}")

    async def update(self, *args):
        await self.check_initialized(20.)
        if self.refresh_condition is not None:
            # wait for refresh if in progess
            self.refresh_condition.wait()
        if self.remote_version == "?":
            await self.refresh()
            if self.remote_version == "?":
                raise self.server.error(
                    f"Client {self.repo}: Unable to locate update")
        if self.dl_url == "?":
            raise self.server.error(
                f"Client {self.repo}: Invalid download url")
        if self.version == self.remote_version:
            # Already up to date
            return
        self.cmd_helper.notify_update_response(
            f"Downloading Client: {self.name}")
        archive = await self.cmd_helper.http_download_request(self.dl_url)
        with tempfile.TemporaryDirectory(suffix=self.name,
                                         prefix="client") as tempdir:
            if os.path.isdir(self.path):
                # find and move persistent files
                for fname in os.listdir(self.path):
                    src_path = os.path.join(self.path, fname)
                    if fname in self.persistent_files:
                        dest_dir = os.path.dirname(os.path.join(
                            tempdir, fname))
                        os.makedirs(dest_dir, exist_ok=True)
                        shutil.move(src_path, dest_dir)
                shutil.rmtree(self.path)
            os.mkdir(self.path)
            with zipfile.ZipFile(io.BytesIO(archive)) as zf:
                zf.extractall(self.path)
            # Move temporary files back into
            for fname in os.listdir(tempdir):
                src_path = os.path.join(tempdir, fname)
                dest_dir = os.path.dirname(os.path.join(self.path, fname))
                os.makedirs(dest_dir, exist_ok=True)
                shutil.move(src_path, dest_dir)
        self.version = self.remote_version
        version_path = os.path.join(self.path, ".version")
        if not os.path.exists(version_path):
            with open(version_path, "w") as f:
                f.write(self.version)
        self.cmd_helper.notify_update_response(
            f"Client Update Finished: {self.name}", is_complete=True)

    def get_update_status(self):
        return {
            'name': self.name,
            'owner': self.owner,
            'version': self.version,
            'remote_version': self.remote_version
        }
class HttpChunkedRecognizeHandler(tornado.web.RequestHandler):
    """
    Provides a HTTP POST/PUT interface supporting chunked transfer requests, similar to that provided by
    http://github.com/alumae/ruby-pocketsphinx-server.
    """
    def prepare(self):
        self.id = str(uuid.uuid4())
        self.final_hyp = ""
        self.worker_done = Condition()
        self.user_id = self.request.headers.get("device-id", "none")
        self.content_id = self.request.headers.get("content-id", "none")
        logging.info("%s: OPEN: user='******', content='%s'" %
                     (self.id, self.user_id, self.content_id))
        self.worker = None
        self.error_status = 0
        self.error_message = None
        #Waiter thread for final hypothesis:
        #self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
        try:
            self.worker = self.application.available_workers.pop()
            self.application.send_status_update()
            logging.info("%s: Using worker %s" % (self.id, self.__str__()))
            self.worker.set_client_socket(self)

            content_type = self.request.headers.get("Content-Type", None)
            if content_type:
                content_type = content_type_to_caps(content_type)
                logging.info("%s: Using content type: %s" %
                             (self.id, content_type))

            self.worker.write_message(
                json.dumps(
                    dict(id=self.id,
                         content_type=content_type,
                         user_id=self.user_id,
                         content_id=self.content_id)))
        except KeyError:
            logging.warn("%s: No worker available for client request" %
                         self.id)
            self.set_status(503)
            self.finish("No workers available")

    @tornado.gen.coroutine
    def data_received(self, chunk):
        assert self.worker is not None
        logging.debug("%s: Forwarding client message of length %d to worker" %
                      (self.id, len(chunk)))
        self.worker.write_message(chunk, binary=True)

    @tornado.gen.coroutine
    def post(self, *args, **kwargs):
        yield self.end_request(args, kwargs)

    @tornado.gen.coroutine
    def put(self, *args, **kwargs):
        yield self.end_request(args, kwargs)

    @tornado.gen.coroutine
    def end_request(self, *args, **kwargs):
        logging.info("%s: Handling the end of chunked recognize request" %
                     self.id)
        assert self.worker is not None
        self.worker.write_message("EOS", binary=False)
        logging.info("%s: Waiting for worker to finish" % self.id)
        yield self.worker_done.wait()
        if self.error_status == 0:
            logging.info("%s: Final hyp: %s" % (self.id, self.final_hyp))
            response = {
                "status": 0,
                "id": self.id,
                "hypotheses": [{
                    "utterance": self.final_hyp
                }]
            }
            self.write(response)
        else:
            logging.info("%s: Error (status=%d) processing HTTP request: %s" %
                         (self.id, self.error_status, self.error_message))
            response = {
                "status": self.error_status,
                "id": self.id,
                "message": self.error_message
            }
            self.write(response)
        self.application.num_requests_processed += 1
        self.application.send_status_update()
        self.worker.set_client_socket(None)
        self.worker.close()
        self.finish()
        logging.info("Everything done")

    @tornado.gen.coroutine
    def send_event(self, event):
        event_str = str(event)
        if len(event_str) > 100:
            event_str = event_str[:97] + "..."
        logging.info("%s: Receiving event %s from worker" %
                     (self.id, event_str))
        if event["status"] == 0 and ("result" in event):
            try:
                if len(event["result"]
                       ["hypotheses"]) > 0 and event["result"]["final"]:
                    if len(self.final_hyp) > 0:
                        self.final_hyp += " "
                    self.final_hyp += event["result"]["hypotheses"][0][
                        "transcript"]
            except:
                e = sys.exc_info()[0]
                logging.warn(
                    "Failed to extract hypothesis from recognition result:" +
                    e)
        elif event["status"] != 0:
            self.error_status = event["status"]
            self.error_message = event.get("message", "")

    @tornado.gen.coroutine
    def close(self):
        logging.info("%s: Receiving 'close' from worker" % (self.id))
        self.worker_done.notify()
示例#21
0
文件: files.py 项目: vizydrop/apps
    def get_data(cls, account, source_filter, limit=100, skip=0):
        source_filter = OneDriveFileFilter(source_filter)

        if source_filter.file is None:
            raise ValueError("required parameter file missing")

        app_log.info("Starting to retrieve file for {}".format(account._id))

        client = AsyncHTTPClient()
        uri = "https://api.onedrive.com/v1.0/drive/items/{}/content".format(source_filter.file)
        lock = Condition()

        def crawl_url(url):
            # some yummy regex
            location_header_regex = re.compile(r"^Location:\s?(?P<uri>http:/{2}\S+)")
            http_status_regex = re.compile(r"^HTTP/[\d\.]+\s(?P<status>\d+)")
            receiving_file = False

            # define our callbacks
            def header_callback(header):
                m = http_status_regex.match(header)
                if m is not None:
                    # process our HTTP status header
                    status = m.group("status")
                    if int(status) == 200:
                        # if we're 200, we're receiving the file, not just a redirect
                        app_log.info("Receiving file {} for account {}".format(source_filter.file, account._id))
                        global receiving_file
                        receiving_file = True
                m = location_header_regex.match(header)
                if m is not None:
                    # process our location header
                    uri = m.group("uri")
                    # and grab _that_ url
                    app_log.info("Following redirect for file {}".format(source_filter.file))
                    crawl_url(uri)

            def stream_callback(chunk):
                # only dump out chunks that are of the file we're looking for
                global receiving_file
                if receiving_file:
                    app_log.info("Writing chunk of {}B".format(chunk.__len__()))
                    cls.write(chunk)

            def on_completed(resp):
                if 200 <= resp.code <= 299:
                    lock.notify()

            oauth_client = account.get_client()
            uri, headers, body = oauth_client.add_token(url)
            req = HTTPRequest(
                uri, headers=headers, body=body, header_callback=header_callback, streaming_callback=stream_callback
            )
            client.fetch(req, callback=on_completed)

        crawl_url(uri)
        # wait for us to complete
        try:
            yield lock.wait(timeout=timedelta(seconds=MAXIMUM_REQ_TIME))
            app_log.info("File {} retrieved successfully".format(source_filter.file))
        except gen.TimeoutError:
            app_log.error("Request for file {} => {} timed out!".format(source_filter.file, account._id))
示例#22
0
class InMemStream(Stream):

    def __init__(self, buf=None, auto_close=True):
        """In-Memory based stream

        :param buf: the buffer for the in memory stream
        """
        self._stream = deque()
        if buf:
            self._stream.append(buf)
        self.state = StreamState.init
        self._condition = Condition()
        self.auto_close = auto_close

        self.exception = None

    def clone(self):
        new_stream = InMemStream()
        new_stream.state = self.state
        new_stream.auto_close = self.auto_close
        new_stream._stream = deque(self._stream)
        return new_stream

    def read(self):

        def read_chunk(future):
            if self.exception:
                future.set_exception(self.exception)
                return future

            chunk = ""

            while len(self._stream) and len(chunk) < common.MAX_PAYLOAD_SIZE:
                chunk += self._stream.popleft()

            future.set_result(chunk)
            return future

        read_future = tornado.concurrent.Future()

        # We're not ready yet
        if self.state != StreamState.completed and not len(self._stream):
            wait_future = self._condition.wait()
            wait_future.add_done_callback(
                lambda f: f.exception() or read_chunk(read_future)
            )
            return read_future

        return read_chunk(read_future)

    def write(self, chunk):
        if self.exception:
            raise self.exception

        if self.state == StreamState.completed:
            raise UnexpectedError("Stream has been closed.")

        if chunk:
            self._stream.append(chunk)
            self._condition.notify()

        # This needs to return a future to match the async interface.
        r = tornado.concurrent.Future()
        r.set_result(None)
        return r

    def set_exception(self, exception):
        self.exception = exception
        self.close()

    def close(self):
        self.state = StreamState.completed
        self._condition.notify()
示例#23
0
class WebUpdater:
    def __init__(self, umgr, config):
        self.umgr = umgr
        self.server = umgr.server
        self.notify_update_response = umgr.notify_update_response
        self.repo = config.get('repo').strip().strip("/")
        self.name = self.repo.split("/")[-1]
        if hasattr(config, "get_name"):
            self.name = config.get_name().split()[-1]
        self.path = os.path.realpath(os.path.expanduser(config.get("path")))
        self.version = self.remote_version = self.dl_url = "?"
        self.etag = None
        self.init_evt = Event()
        self.refresh_condition = None
        self._get_local_version()
        logging.info(f"\nInitializing Client Updater: '{self.name}',"
                     f"\nversion: {self.version}"
                     f"\npath: {self.path}")

    def _get_local_version(self):
        version_path = os.path.join(self.path, ".version")
        if os.path.isfile(os.path.join(self.path, ".version")):
            with open(version_path, "r") as f:
                v = f.read()
            self.version = v.strip()

    async def check_initialized(self, timeout=None):
        if self.init_evt.is_set():
            return
        if timeout is not None:
            timeout = IOLoop.current().time() + timeout
        await self.init_evt.wait(timeout)

    async def refresh(self):
        if self.refresh_condition is None:
            self.refresh_condition = Condition()
        else:
            self.refresh_condition.wait()
            return
        try:
            self._get_local_version()
            await self._get_remote_version()
        except Exception:
            logging.exception("Error Refreshing Client")
        self.init_evt.set()
        self.refresh_condition.notify_all()
        self.refresh_condition = None

    async def _get_remote_version(self):
        # Remote state
        url = f"https://api.github.com/repos/{self.repo}/releases/latest"
        try:
            result = await self.umgr.github_api_request(url, etag=self.etag)
        except Exception:
            logging.exception(f"Client {self.repo}: Github Request Error")
            result = {}
        if result is None:
            # No change, update not necessary
            return
        self.etag = result.get('etag', None)
        self.remote_version = result.get('name', "?")
        release_assets = result.get('assets', [{}])[0]
        self.dl_url = release_assets.get('browser_download_url', "?")
        logging.info(f"Github client Info Received:\nRepo: {self.name}\n"
                     f"Local Version: {self.version}\n"
                     f"Remote Version: {self.remote_version}\n"
                     f"url: {self.dl_url}")

    async def update(self, *args):
        await self.check_initialized(20.)
        if self.refresh_condition is not None:
            # wait for refresh if in progess
            self.refresh_condition.wait()
        if self.remote_version == "?":
            await self.refresh()
            if self.remote_version == "?":
                raise self.server.error(
                    f"Client {self.repo}: Unable to locate update")
        if self.dl_url == "?":
            raise self.server.error(
                f"Client {self.repo}: Invalid download url")
        if self.version == self.remote_version:
            # Already up to date
            return
        if os.path.isdir(self.path):
            shutil.rmtree(self.path)
        os.mkdir(self.path)
        self.notify_update_response(f"Downloading Client: {self.name}")
        archive = await self.umgr.http_download_request(self.dl_url)
        with zipfile.ZipFile(io.BytesIO(archive)) as zf:
            zf.extractall(self.path)
        self.version = self.remote_version
        version_path = os.path.join(self.path, ".version")
        if not os.path.exists(version_path):
            with open(version_path, "w") as f:
                f.write(self.version)
        self.notify_update_response(f"Client Update Finished: {self.name}",
                                    is_complete=True)

    def get_update_status(self):
        return {
            'name': self.name,
            'version': self.version,
            'remote_version': self.remote_version
        }
示例#24
0
class InMemStream(Stream):
    def __init__(self, buf=None, auto_close=True):
        """In-Memory based stream

        :param buf: the buffer for the in memory stream
        """
        self._stream = deque()
        if buf:
            self._stream.append(buf)
        self.state = StreamState.init
        self._condition = Condition()
        self.auto_close = auto_close

        self.exception = None
        self.exc_info = None

    def clone(self):
        new_stream = InMemStream()
        new_stream.state = self.state
        new_stream.auto_close = self.auto_close
        new_stream._stream = deque(self._stream)
        return new_stream

    def read(self):
        def read_chunk(future):
            if self.exception:
                if self.exc_info:
                    future.set_exc_info(self.exc_info)
                else:
                    future.set_exception(self.exception)
                return future

            chunk = b""

            while len(self._stream) and len(chunk) < common.MAX_PAYLOAD_SIZE:
                new_chunk = self._stream.popleft()
                if six.PY3 and isinstance(new_chunk, str):
                    new_chunk = new_chunk.encode('utf8')
                chunk += new_chunk

            future.set_result(chunk)
            return future

        read_future = tornado.concurrent.Future()

        # We're not ready yet
        if self.state != StreamState.completed and not len(self._stream):
            wait_future = self._condition.wait()
            tornado.ioloop.IOLoop.current().add_future(
                wait_future,
                lambda f: f.exception() or read_chunk(read_future))
            return read_future

        return read_chunk(read_future)

    def write(self, chunk):
        if self.exception:
            raise self.exception

        if self.state == StreamState.completed:
            raise UnexpectedError("Stream has been closed.")

        if chunk:
            self._stream.append(chunk)
            self._condition.notify()

        # This needs to return a future to match the async interface.
        r = tornado.concurrent.Future()
        r.set_result(None)
        return r

    def set_exception(self, exception, exc_info=None):
        self.exception = exception
        self.exc_info = exc_info
        self.close()

    def close(self):
        self.state = StreamState.completed
        self._condition.notify()
class TornadoCoroutineExecutor(Executor):
    def __init__(self,
                 core_pool_size,
                 queue,
                 reject_handler,
                 coroutine_pool_name=None):
        self._core_pool_size = core_pool_size
        self._queue = queue
        self._reject_handler = reject_handler
        self._coroutine_pool_name = coroutine_pool_name or \
            'tornado-coroutine-pool-%s' % uuid.uuid1().hex
        self._core_coroutines_condition = Condition()
        self._core_coroutines = {}
        self._core_coroutines_wait_condition = Condition()
        self._shutting_down = False
        self._shuted_down = False
        self._initialize_core_coroutines()

    def _initialize_core_coroutines(self):
        for ind in range(self._core_pool_size):
            self._core_coroutines[ind] = self._core_coroutine_run(ind)
            LOGGER.info("core coroutine: %s is intialized" %
                        self._get_coroutine_name(ind))

    def _get_coroutine_name(self, ind):
        return '%s:%d' % (self._coroutine_pool_name, ind)

    @gen.coroutine
    def _core_coroutine_run(self, ind):
        coroutine_name = self._get_coroutine_name(ind)
        while not self._shutting_down and not self._shuted_down:
            try:
                task_item = self._queue.get_nowait()
            except QueueEmpty:
                LOGGER.debug("coroutine: %s will enter into waiting pool" %
                             coroutine_name)
                if self._shutting_down or self._shuted_down:
                    break
                yield self._core_coroutines_wait_condition.wait()
                LOGGER.debug("coroutine: %s was woken up from waiting pool" %
                             coroutine_name)
                continue

            async_result = task_item.async_result
            async_result.set_time_info("consumed_from_queue_at")
            if not async_result.set_running_or_notify_cancel():
                continue
            time_info_key = "executed_completion_at"
            try:
                result = yield task_item.function(*task_item.args,
                                                  **task_item.kwargs)
                async_result.set_time_info(time_info_key).set_result(result)
            except Exception as ex:
                async_result.set_time_info(time_info_key).set_exception(ex)

        LOGGER.info("coroutine: %s is stopped" % coroutine_name)
        self._core_coroutines.pop(ind)
        if not self._core_coroutines:
            LOGGER.info("all coroutines in %s are stopped" %
                        self._coroutine_pool_name)
            self._core_coroutines_condition.notify_all()

    def submit_task(self, function, *args, **kwargs):
        async_result = AsyncResult()
        if self._shutting_down or self._shuted_down:
            async_result.set_exception(
                ShutedDownError(self._coroutine_pool_name))
            return async_result
        if not gen.is_coroutine_function(function):
            async_result.set_exception(
                RuntimeError("function must be tornado coroutine function"))
            return async_result

        is_full = False
        task_item = TaskItem(function, args, kwargs, async_result)
        try:
            self._queue.put_nowait(task_item)
            async_result.set_time_info("submitted_to_queue_at")
        except QueueFull:
            is_full = True

        if is_full:
            return self._reject_handler(self._queue, task_item)
        else:
            self._core_coroutines_wait_condition.notify()
            return async_result

    @gen.coroutine
    def shutdown(self, wait_time=None):
        if self._shutting_down or self._shuted_down:
            raise gen.Return()

        self._shutting_down = True
        self._shuted_down = False

        LOGGER.info("begin to notify all coroutines")
        self._core_coroutines_wait_condition.notify_all()
        if self._core_coroutines:
            yield self._core_coroutines_condition.wait(wait_time)

        while True:
            try:
                task_item = self._queue.get_nowait()
            except QueueEmpty:
                break
            else:
                task_item.async_result.set_exception(
                    ShutedDownError(self._coroutine_pool_name))

        self._shutting_down = False
        self._shuted_down = True
示例#26
0
class TaskLoggerTornadoTest(AsyncHTTPTestCase):
    def setUp(self):
        self.condition = Condition()
        self.log_str = 'aaaaaa' * 100000
        self.exc = None
        super(TaskLoggerTornadoTest, self).setUp()

    def get_app(self):
        receives = []

        class LogTestHandler(web.RequestHandler):
            def post(hdlr):
                try:
                    self.assertEqual(hdlr.request.query_arguments['task_id'],
                                     ['1'])
                    self.assertIn(hdlr.request.query_arguments['seq'],
                                  [['1'], ['2']])
                    payload = json.loads(hdlr.request.body)
                    seq = payload['messages']['body'].pop('seq')
                    self.assertIn(seq, [1, 2])
                    self.assertEqual(payload['messages']['body'], {
                        'log': self.log_str + '\n',
                        'task_id': 1,
                    })
                    hdlr.write('ok')
                    receives.append('a')
                except Exception as exc:
                    self.exc = exc

        class ResultTestHandler(web.RequestHandler):
            def post(hdlr):
                try:
                    self.assertEqual(hdlr.request.headers['Content-Type'],
                                     'application/json')
                    self.assertEqual(hdlr.request.query_arguments, {
                        'task_id': ['1'],
                        'seq': ['3'],
                        'exit_code': ['0']
                    })
                    payload = json.loads(hdlr.request.body)
                    self.assertEqual(
                        payload['messages']['body'], {
                            'exit_code': 0,
                            'is_aborted': False,
                            'is_timeout': False,
                            'result': 'b\n',
                            'task_id': 1,
                            'seq': 3
                        })
                    hdlr.write('ok')
                    receives.append('b')
                    self.assertEqual(receives, ['a', 'a', 'b'])
                except Exception as exc:
                    self.exc = exc
                    print(traceback.format_exc())
                finally:
                    self.condition.notify()

        return web.Application([(r'/log', LogTestHandler),
                                (r'/result', ResultTestHandler)])

    @gen_test
    def test_tornado(self):
        task_logger = TaskLogger(task_id=1,
                                 engine=EngineType.TORNADO,
                                 task_url=self.get_url('/'),
                                 wrap=True)
        self.io_loop.spawn_callback(task_logger.log, self.log_str)
        self.io_loop.spawn_callback(task_logger.log, self.log_str)
        self.io_loop.spawn_callback(task_logger.result, 'b')
        yield self.condition.wait()
        if self.exc:
            raise self.exc
示例#27
0
class MockFitsWriterClient(object):
    """
    Wrapper class for a KATCP client to a EddFitsWriterServer
    """
    def __init__(self, address):
        """
        @brief      Construct new instance
        """
        self._address = address
        self._ioloop = IOLoop.current()
        self._stop_event = Event()
        self._is_stopped = Condition()
        self._socket = None

    def reset_connection(self):
        self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self._socket.setblocking(False)
        try:
            self._socket.connect(self._address)
        except socket.error as error:
            if error.args[0] == errno.EINPROGRESS:
                pass
            else:
                raise error

    @coroutine
    def recv_nbytes(self, nbytes):
        received_bytes = 0
        data = b''
        while received_bytes < nbytes:
            if self._stop_event.is_set():
                raise StopEvent
            try:
                log.debug("Requesting {} bytes".format(nbytes -
                                                       received_bytes))
                current_data = self._socket.recv(nbytes - received_bytes)
                received_bytes += len(current_data)
                data += current_data
                log.debug("Received {} bytes ({} of {} bytes)".format(
                    len(current_data), received_bytes, nbytes))
            except socket.error as error:
                error_id = error.args[0]
                if error_id == errno.EAGAIN or error_id == errno.EWOULDBLOCK:
                    yield sleep(0.1)
                else:
                    log.exception("Unexpected error on socket recv: {}".format(
                        str(error)))
                    raise error
        raise Return(data)

    @coroutine
    def recv_loop(self):
        try:
            header, sections = yield self.recv_packet()
        except StopEvent:
            log.debug("Notifying that recv calls have stopped")
            self._is_stopped.notify()
        except Exception:
            log.exception("Failure while receiving packet")
        else:
            self._ioloop.add_callback(self.recv_loop)

    def start(self):
        self._stop_event.clear()
        self.reset_connection()
        self._ioloop.add_callback(self.recv_loop)

    @coroutine
    def stop(self, timeout=2):
        self._stop_event.set()
        try:
            success = yield self._is_stopped.wait(timeout=self._ioloop.time() +
                                                  timeout)
            if not success:
                raise TimeoutError
        except TimeoutError:
            log.error(("Could not stop the client within "
                       "the {} second limit").format(timeout))
        except Exception:
            log.exception("Fucup")

    @coroutine
    def recv_packet(self):
        log.debug("Receiving packet header")
        raw_header = yield self.recv_nbytes(C.sizeof(FWHeader))
        log.debug("Converting packet header")
        header = FWHeader.from_buffer_copy(raw_header)
        log.info("Received header: {}".format(header))
        fw_data_type = header.channel_data_type.strip().upper()
        c_data_type, np_data_type = TYPE_MAP[fw_data_type]
        sections = []
        for section in range(header.nsections):
            log.debug("Receiving section {} of {}".format(
                section + 1, header.nsections))
            raw_section_header = yield self.recv_nbytes(
                C.sizeof(FWSectionHeader))
            section_header = FWSectionHeader.from_buffer_copy(
                raw_section_header)
            log.info("Section {} header: {}".format(section, section_header))
            log.debug("Receiving section data")
            raw_bytes = yield self.recv_nbytes(
                C.sizeof(c_data_type) * section_header.nchannels)
            data = np.frombuffer(raw_bytes, dtype=np_data_type)
            log.info("Section {} data: {}".format(section, data[:10]))
            sections.append((section_header, data))
        raise Return((header, sections))
示例#28
0
class GitUpdater:
    def __init__(self, umgr, config, path=None, env=None):
        self.server = umgr.server
        self.execute_cmd = umgr.execute_cmd
        self.execute_cmd_with_response = umgr.execute_cmd_with_response
        self.notify_update_response = umgr.notify_update_response
        self.name = config.get_name().split()[-1]
        self.repo_path = path
        if path is None:
            self.repo_path = config.get('path')
        self.env = config.get("env", env)
        dist_packages = None
        if self.env is not None:
            self.env = os.path.expanduser(self.env)
            dist_packages = config.get('python_dist_packages', None)
            self.python_reqs = os.path.join(self.repo_path,
                                            config.get("requirements"))
        self.origin = config.get("origin").lower()
        self.install_script = config.get('install_script', None)
        if self.install_script is not None:
            self.install_script = os.path.abspath(
                os.path.join(self.repo_path, self.install_script))
        self.venv_args = config.get('venv_args', None)
        self.python_dist_packages = None
        self.python_dist_path = None
        self.env_package_path = None
        if dist_packages is not None:
            self.python_dist_packages = [
                p.strip() for p in dist_packages.split('\n') if p.strip()
            ]
            self.python_dist_path = os.path.abspath(
                config.get('python_dist_path'))
            if not os.path.exists(self.python_dist_path):
                raise config.error(
                    "Invalid path for option 'python_dist_path'")
            self.env_package_path = os.path.abspath(
                os.path.join(os.path.dirname(self.env), "..",
                             config.get('env_package_path')))
        for opt in [
                "repo_path", "env", "python_reqs", "install_script",
                "python_dist_path", "env_package_path"
        ]:
            val = getattr(self, opt)
            if val is None:
                continue
            if not os.path.exists(val):
                raise config.error("Invalid path for option '%s': %s" %
                                   (val, opt))

        self.version = self.cur_hash = "?"
        self.remote_version = self.remote_hash = "?"
        self.init_evt = Event()
        self.refresh_condition = None
        self.debug = umgr.repo_debug
        self.remote = "origin"
        self.branch = "master"
        self.is_valid = self.is_dirty = self.detached = False

    def _get_version_info(self):
        ver_path = os.path.join(self.repo_path, "scripts/version.txt")
        vinfo = {}
        if os.path.isfile(ver_path):
            data = ""
            with open(ver_path, 'r') as f:
                data = f.read()
            try:
                entries = [e.strip() for e in data.split('\n') if e.strip()]
                vinfo = dict([i.split('=') for i in entries])
                vinfo = {
                    k: tuple(re.findall(r"\d+", v))
                    for k, v in vinfo.items()
                }
            except Exception:
                pass
            else:
                self._log_info(f"Version Info Found: {vinfo}")
        vinfo['version'] = tuple(re.findall(r"\d+", self.version))
        return vinfo

    def _log_exc(self, msg, traceback=True):
        log_msg = f"Repo {self.name}: {msg}"
        if traceback:
            logging.exception(log_msg)
        else:
            logging.info(log_msg)
        return self.server.error(msg)

    def _log_info(self, msg):
        log_msg = f"Repo {self.name}: {msg}"
        logging.info(log_msg)

    def _notify_status(self, msg, is_complete=False):
        log_msg = f"Repo {self.name}: {msg}"
        logging.debug(log_msg)
        self.notify_update_response(log_msg, is_complete)

    async def check_initialized(self, timeout=None):
        if self.init_evt.is_set():
            return
        if timeout is not None:
            timeout = IOLoop.current().time() + timeout
        await self.init_evt.wait(timeout)

    async def refresh(self):
        if self.refresh_condition is None:
            self.refresh_condition = Condition()
        else:
            self.refresh_condition.wait()
            return
        try:
            await self._check_version()
        except Exception:
            logging.exception("Error Refreshing git state")
        self.init_evt.set()
        self.refresh_condition.notify_all()
        self.refresh_condition = None

    async def _check_version(self, need_fetch=True):
        self.is_valid = self.detached = False
        self.cur_hash = self.branch = self.remote = "?"
        self.version = self.remote_version = "?"
        try:
            blist = await self.execute_cmd_with_response(
                f"git -C {self.repo_path} branch --list")
            if blist.startswith("fatal:"):
                self._log_info(f"Invalid git repo at path '{self.repo_path}'")
                return
            branch = None
            for b in blist.split("\n"):
                b = b.strip()
                if b[0] == "*":
                    branch = b[2:]
                    break
            if branch is None:
                self._log_info(
                    "Unable to retreive current branch from branch list\n"
                    f"{blist}")
                return
            if "HEAD detached" in branch:
                bparts = branch.split()[-1].strip("()")
                self.remote, self.branch = bparts.split("/")
                self.detached = True
            else:
                self.branch = branch.strip()
                self.remote = await self.execute_cmd_with_response(
                    f"git -C {self.repo_path} config --get"
                    f" branch.{self.branch}.remote")
            if need_fetch:
                await self.execute_cmd(
                    f"git -C {self.repo_path} fetch {self.remote} --prune -q",
                    retries=3)
            remote_url = await self.execute_cmd_with_response(
                f"git -C {self.repo_path} remote get-url {self.remote}")
            cur_hash = await self.execute_cmd_with_response(
                f"git -C {self.repo_path} rev-parse HEAD")
            remote_hash = await self.execute_cmd_with_response(
                f"git -C {self.repo_path} rev-parse "
                f"{self.remote}/{self.branch}")
            repo_version = await self.execute_cmd_with_response(
                f"git -C {self.repo_path} describe --always "
                "--tags --long --dirty")
            remote_version = await self.execute_cmd_with_response(
                f"git -C {self.repo_path} describe {self.remote}/{self.branch}"
                " --always --tags --long")
        except Exception:
            self._log_exc("Error retreiving git info")
            return

        self.is_dirty = repo_version.endswith("dirty")
        versions = []
        for ver in [repo_version, remote_version]:
            tag_version = "?"
            ver_match = re.match(r"v\d+\.\d+\.\d-\d+", ver)
            if ver_match:
                tag_version = ver_match.group()
            versions.append(tag_version)
        self.version, self.remote_version = versions
        self.cur_hash = cur_hash.strip()
        self.remote_hash = remote_hash.strip()
        self._log_info(
            f"Repo Detected:\nPath: {self.repo_path}\nRemote: {self.remote}\n"
            f"Branch: {self.branch}\nRemote URL: {remote_url}\n"
            f"Current SHA: {self.cur_hash}\n"
            f"Remote SHA: {self.remote_hash}\nVersion: {self.version}\n"
            f"Remote Version: {self.remote_version}\n"
            f"Is Dirty: {self.is_dirty}\nIs Detached: {self.detached}")
        if self.debug:
            self.is_valid = True
            self._log_info("Debug enabled, bypassing official repo check")
        elif self.branch == "master" and self.remote == "origin":
            if self.detached:
                self._log_info("Detached HEAD detected, repo invalid")
                return
            remote_url = remote_url.lower()
            if remote_url[-4:] != ".git":
                remote_url += ".git"
            if remote_url == self.origin:
                self.is_valid = True
                self._log_info("Validity check for git repo passed")
            else:
                self._log_info(f"Invalid git origin url '{remote_url}'")
        else:
            self._log_info("Git repo not on offical remote/branch: "
                           f"{self.remote}/{self.branch}")

    async def update(self, update_deps=False):
        await self.check_initialized(20.)
        if self.refresh_condition is not None:
            self.refresh_condition.wait()
        if not self.is_valid:
            raise self._log_exc("Update aborted, repo is not valid", False)
        if self.is_dirty:
            raise self._log_exc("Update aborted, repo is has been modified",
                                False)
        if self.remote_hash == self.cur_hash:
            # No need to update
            return
        self._notify_status("Updating Repo...")
        try:
            if self.detached:
                await self.execute_cmd(
                    f"git -C {self.repo_path} fetch {self.remote} -q",
                    retries=3)
                await self.execute_cmd(f"git -C {self.repo_path} checkout"
                                       f" {self.remote}/{self.branch} -q")
            else:
                await self.execute_cmd(f"git -C {self.repo_path} pull -q",
                                       retries=3)
        except Exception:
            raise self._log_exc("Error running 'git pull'")
        # Check Semantic Versions
        vinfo = self._get_version_info()
        cur_version = vinfo.get('version', ())
        update_deps |= cur_version < vinfo.get('deps_version', ())
        need_env_rebuild = cur_version < vinfo.get('env_version', ())
        if update_deps:
            await self._install_packages()
            await self._update_virtualenv(need_env_rebuild)
        elif need_env_rebuild:
            await self._update_virtualenv(True)
        # Refresh local repo state
        await self._check_version(need_fetch=False)
        if self.name == "moonraker":
            # Launch restart async so the request can return
            # before the server restarts
            self._notify_status("Update Finished...", is_complete=True)
            IOLoop.current().call_later(.1, self.restart_service)
        else:
            await self.restart_service()
            self._notify_status("Update Finished...", is_complete=True)

    async def _install_packages(self):
        if self.install_script is None:
            return
        # Open install file file and read
        inst_path = self.install_script
        if not os.path.isfile(inst_path):
            self._log_info(f"Unable to open install script: {inst_path}")
            return
        with open(inst_path, 'r') as f:
            data = f.read()
        packages = re.findall(r'PKGLIST="(.*)"', data)
        packages = [p.lstrip("${PKGLIST}").strip() for p in packages]
        if not packages:
            self._log_info(f"No packages found in script: {inst_path}")
            return
        # TODO: Log and notify that packages will be installed
        pkgs = " ".join(packages)
        logging.debug(f"Repo {self.name}: Detected Packages: {pkgs}")
        self._notify_status("Installing system dependencies...")
        # Install packages with apt-get
        try:
            await self.execute_cmd(f"{APT_CMD} update",
                                   timeout=300.,
                                   notify=True)
            await self.execute_cmd(f"{APT_CMD} install --yes {pkgs}",
                                   timeout=3600.,
                                   notify=True)
        except Exception:
            self._log_exc("Error updating packages via apt-get")
            return

    async def _update_virtualenv(self, rebuild_env=False):
        if self.env is None:
            return
        # Update python dependencies
        bin_dir = os.path.dirname(self.env)
        env_path = os.path.normpath(os.path.join(bin_dir, ".."))
        if rebuild_env:
            self._notify_status(f"Creating virtualenv at: {env_path}...")
            if os.path.exists(env_path):
                shutil.rmtree(env_path)
            try:
                await self.execute_cmd(
                    f"virtualenv {self.venv_args} {env_path}", timeout=300.)
            except Exception:
                self._log_exc(f"Error creating virtualenv")
                return
            if not os.path.exists(self.env):
                raise self._log_exc("Failed to create new virtualenv", False)
        reqs = self.python_reqs
        if not os.path.isfile(reqs):
            self._log_exc(f"Invalid path to requirements_file '{reqs}'")
            return
        pip = os.path.join(bin_dir, "pip")
        self._notify_status("Updating python packages...")
        try:
            await self.execute_cmd(f"{pip} install -r {reqs}",
                                   timeout=1200.,
                                   notify=True,
                                   retries=3)
        except Exception:
            self._log_exc("Error updating python requirements")
        self._install_python_dist_requirements()

    def _install_python_dist_requirements(self):
        dist_reqs = self.python_dist_packages
        if dist_reqs is None:
            return
        dist_path = self.python_dist_path
        site_path = self.env_package_path
        for pkg in dist_reqs:
            for f in os.listdir(dist_path):
                if f.startswith(pkg):
                    src = os.path.join(dist_path, f)
                    dest = os.path.join(site_path, f)
                    self._notify_status(f"Linking to dist package: {pkg}")
                    if os.path.islink(dest):
                        os.remove(dest)
                    elif os.path.exists(dest):
                        self._notify_status(
                            f"Error symlinking dist package: {pkg}, "
                            f"file already exists: {dest}")
                        continue
                    os.symlink(src, dest)
                    break

    async def restart_service(self):
        self._notify_status("Restarting Service...")
        try:
            await self.execute_cmd(f"sudo systemctl restart {self.name}")
        except Exception:
            raise self._log_exc("Error restarting service")

    def get_update_status(self):
        return {
            'remote_alias': self.remote,
            'branch': self.branch,
            'version': self.version,
            'remote_version': self.remote_version,
            'current_hash': self.cur_hash,
            'remote_hash': self.remote_hash,
            'is_dirty': self.is_dirty,
            'is_valid': self.is_valid,
            'detached': self.detached,
            'debug_enabled': self.debug
        }
示例#29
0
class MockFitsWriterClient(object):
    """
    Wrapper class for a KATCP client to a EddFitsWriterServer
    """
    def __init__(self, address, record_dest):
        """
        @brief      Construct new instance
                    If record_dest is not empty, create a folder named record_dest and record the received packages there.
        """
        self._address = address
        self.__record_dest = record_dest
        if record_dest:
            if not os.path.isdir(record_dest):
                os.makedirs(record_dest)
        self._ioloop = IOLoop.current()
        self._stop_event = Event()
        self._is_stopped = Condition()
        self._socket = None
        self.__last_package = 0

    def reset_connection(self):
        self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self._socket.setblocking(False)
        try:
            self._socket.connect(self._address)
        except socket.error as error:
            if error.args[0] == errno.EINPROGRESS:
                pass
            else:
                raise error

    @coroutine
    def recv_nbytes(self, nbytes):
        received_bytes = 0
        data = b''
        while received_bytes < nbytes:
            if self._stop_event.is_set():
                raise StopEvent
            try:
                log.debug("Requesting {} bytes".format(nbytes -
                                                       received_bytes))
                current_data = self._socket.recv(nbytes - received_bytes)
                received_bytes += len(current_data)
                data += current_data
                log.debug("Received {} bytes ({} of {} bytes)".format(
                    len(current_data), received_bytes, nbytes))
            except socket.error as error:
                error_id = error.args[0]
                if error_id == errno.EAGAIN or error_id == errno.EWOULDBLOCK:
                    yield sleep(0.1)
                else:
                    log.exception("Unexpected error on socket recv: {}".format(
                        str(error)))
                    raise error
        raise Return(data)

    @coroutine
    def recv_loop(self):
        while not self._stop_event.is_set():
            try:
                header, sections = yield self.recv_packet()
            except StopEvent:
                log.debug("Notifying that recv calls have stopped")
            except Exception as E:
                log.exception("Failure while receiving packet: {}".format(E))

    def start(self):
        self._stop_event.clear()
        self.reset_connection()
        self._ioloop.add_callback(self.recv_loop)

    @coroutine
    def stop(self, timeout=2):
        self._stop_event.set()
        try:
            success = yield self._is_stopped.wait(timeout=self._ioloop.time() +
                                                  timeout)
            if not success:
                raise TimeoutError
        except TimeoutError:
            log.error(("Could not stop the client within "
                       "the {} second limit").format(timeout))
        except Exception:
            log.exception("Fucup")

    @coroutine
    def recv_packet(self):
        log.debug("Receiving packet header")
        raw_header = yield self.recv_nbytes(C.sizeof(FWHeader))
        log.debug("Converting packet header")
        header = FWHeader.from_buffer_copy(raw_header)
        log.info("Received header: {}".format(header))
        if header.timestamp < self.__last_package:
            log.error("Timestamps out of order!")
        else:
            self.__last_package = header.timestamp

        if self.__record_dest:
            filename = os.path.join(self.__record_dest,
                                    "FWP_{}.dat".format(header.timestamp))
            while os.path.isfile(filename):
                log.warning('Filename {} already exists. Add suffix _'.format(
                    filename))
                filename += '_'
            log.info('Recording to file {}'.format(filename))
            ofile = open(filename, 'wb')
            ofile.write(raw_header)

        fw_data_type = header.channel_data_type.strip().upper()
        c_data_type, np_data_type = TYPE_MAP[fw_data_type]
        sections = []
        for section in range(header.nsections):
            log.debug("Receiving section {} of {}".format(
                section + 1, header.nsections))
            raw_section_header = yield self.recv_nbytes(
                C.sizeof(FWSectionHeader))
            if self.__record_dest:
                ofile.write(raw_section_header)

            section_header = FWSectionHeader.from_buffer_copy(
                raw_section_header)
            log.info("Section {} header: {}".format(section, section_header))
            log.debug("Receiving section data")
            raw_bytes = yield self.recv_nbytes(
                C.sizeof(c_data_type) * section_header.nchannels)
            if self.__record_dest:
                ofile.write(raw_bytes)
            data = np.frombuffer(raw_bytes, dtype=np_data_type)
            log.info("Section {} data: {}".format(section, data[:10]))
            sections.append((section_header, data))

        if self.__record_dest:
            ofile.close()
        raise Return((header, sections))
示例#30
0
class PackageUpdater:
    def __init__(self, umgr):
        self.server = umgr.server
        self.execute_cmd = umgr.execute_cmd
        self.execute_cmd_with_response = umgr.execute_cmd_with_response
        self.notify_update_response = umgr.notify_update_response
        self.available_packages = []
        self.init_evt = Event()
        self.refresh_condition = None

    async def refresh(self, fetch_packages=True):
        # TODO: Use python-apt python lib rather than command line for updates
        if self.refresh_condition is None:
            self.refresh_condition = Condition()
        else:
            self.refresh_condition.wait()
            return
        try:
            if fetch_packages:
                await self.execute_cmd(f"{APT_CMD} update",
                                       timeout=300.,
                                       retries=3)
            res = await self.execute_cmd_with_response("apt list --upgradable",
                                                       timeout=60.)
            pkg_list = [p.strip() for p in res.split("\n") if p.strip()]
            if pkg_list:
                pkg_list = pkg_list[2:]
                self.available_packages = [
                    p.split("/", maxsplit=1)[0] for p in pkg_list
                ]
            pkg_list = "\n".join(self.available_packages)
            logging.info(
                f"Detected {len(self.available_packages)} package updates:"
                f"\n{pkg_list}")
        except Exception:
            logging.exception("Error Refreshing System Packages")
        self.init_evt.set()
        self.refresh_condition.notify_all()
        self.refresh_condition = None

    async def check_initialized(self, timeout=None):
        if self.init_evt.is_set():
            return
        if timeout is not None:
            timeout = IOLoop.current().time() + timeout
        await self.init_evt.wait(timeout)

    async def update(self, *args):
        await self.check_initialized(20.)
        if self.refresh_condition is not None:
            self.refresh_condition.wait()
        self.notify_update_response("Updating packages...")
        try:
            await self.execute_cmd(f"{APT_CMD} update",
                                   timeout=300.,
                                   notify=True)
            await self.execute_cmd(f"{APT_CMD} upgrade --yes",
                                   timeout=3600.,
                                   notify=True)
        except Exception:
            raise self.server.error("Error updating system packages")
        self.available_packages = []
        self.notify_update_response("Package update finished...",
                                    is_complete=True)

    def get_update_status(self):
        return {
            'package_count': len(self.available_packages),
            'package_list': self.available_packages
        }
示例#31
0
class DecodeRequestHandler(tornado.web.RequestHandler):
    SUPPORTED_METHOD = ('POST')

    #Called at the beginning of a request before get/post/etc
    def prepare(self):
        self.worker = None
        self.filePath = None
        self.uuid = str(uuid.uuid4())
        self.set_status(200, "Initial statut")
        self.waitResponse = Condition()
        self.waitWorker = Condition()

        if self.request.method != 'POST':
            logging.debug("Received a non-POST request")
            self.set_status(
                403, "Wrong request, server handles only POST requests")
            self.finish()
        #File Retrieval
        # TODO: Adapt input to existing controller API
        if 'wavFile' not in self.request.files.keys():
            self.set_status(
                403, "POST request must contain a 'file_to_transcript' field.")
            self.finish()
            logging.debug(
                "POST request from %s does not contain 'file_to_transcript' field."
            )
        temp_file = self.request.files['wavFile'][0]['body']
        self.temp_file = temp_file

        #Writing file
        try:
            f = open(TEMP_FILE_PATH + self.uuid + '.wav', 'wb')
        except IOError:
            logging.error("Could not write file.")
            self.set_status(
                500, "Server error: Counldn't write file on server side.")
            self.finish()
        else:
            f.write(temp_file)
            self.filePath = TEMP_FILE_PATH + self.uuid + '.wav'
            logging.debug("File correctly received from client")

    @gen.coroutine
    def post(self, *args, **kwargs):
        logging.debug("Allocating Worker to %s" % self.uuid)

        yield self.allocate_worker()
        self.worker.write_message(
            json.dumps({
                'uuid': self.uuid,
                'file': self.temp_file.encode('base64')
            }))
        yield self.waitResponse.wait()
        self.finish()

    @gen.coroutine
    def allocate_worker(self):
        while self.worker == None:
            try:
                self.worker = self.application.available_workers.pop()
            except:
                self.worker = None
                self.application.waiting_client.add(self)
                self.application.display_server_status()
                yield self.waitWorker.wait()
            else:
                self.worker.client_handler = self
                logging.debug("Worker allocated to client %s" % self.uuid)
                self.application.display_server_status()

    @gen.coroutine
    def receive_response(self, message):
        os.remove(TEMP_FILE_PATH + self.uuid + '.wav')
        self.set_status(200, "Transcription succeded")
        self.set_header("Content-Type", "application/json")
        self.set_header("Access-Control-Allow-Origin", "*")
        self.write({'transcript': message})
        self.application.num_requests_processed += 1
        self.waitResponse.notify()

    def on_finish(self):
        #CLEANUP
        pass
示例#32
0
文件: files.py 项目: fstfwd/apps
    def get_data(cls, account, source_filter, limit=100, skip=0):
        source_filter = OneDriveFileFilter(source_filter)

        if source_filter.file is None:
            raise ValueError('required parameter file missing')

        app_log.info("Starting to retrieve file for {}".format(account._id))

        client = AsyncHTTPClient()
        uri = "https://api.onedrive.com/v1.0/drive/items/{}/content".format(
            source_filter.file)
        lock = Condition()

        def crawl_url(url):
            # some yummy regex
            location_header_regex = re.compile(
                r'^Location:\s?(?P<uri>http:/{2}\S+)')
            http_status_regex = re.compile(r'^HTTP/[\d\.]+\s(?P<status>\d+)')
            receiving_file = False

            # define our callbacks
            def header_callback(header):
                m = http_status_regex.match(header)
                if m is not None:
                    # process our HTTP status header
                    status = m.group('status')
                    if int(status) == 200:
                        # if we're 200, we're receiving the file, not just a redirect
                        app_log.info("Receiving file {} for account {}".format(
                            source_filter.file, account._id))
                        global receiving_file
                        receiving_file = True
                m = location_header_regex.match(header)
                if m is not None:
                    # process our location header
                    uri = m.group('uri')
                    # and grab _that_ url
                    app_log.info("Following redirect for file {}".format(
                        source_filter.file))
                    crawl_url(uri)

            def stream_callback(chunk):
                # only dump out chunks that are of the file we're looking for
                global receiving_file
                if receiving_file:
                    app_log.info("Writing chunk of {}B".format(
                        chunk.__len__()))
                    cls.write(chunk)

            def on_completed(resp):
                if 200 <= resp.code <= 299:
                    lock.notify()

            oauth_client = account.get_client()
            uri, headers, body = oauth_client.add_token(url)
            req = HTTPRequest(uri,
                              headers=headers,
                              body=body,
                              header_callback=header_callback,
                              streaming_callback=stream_callback)
            client.fetch(req, callback=on_completed)

        crawl_url(uri)
        # wait for us to complete
        try:
            yield lock.wait(timeout=timedelta(seconds=MAXIMUM_REQ_TIME))
            app_log.info("File {} retrieved successfully".format(
                source_filter.file))
        except gen.TimeoutError:
            app_log.error("Request for file {} => {} timed out!".format(
                source_filter.file, account._id))
示例#33
0
文件: peer.py 项目: oibe/tchannel
class PeerGroup(object):
    """A PeerGroup represents a collection of Peers.

    Requests routed through a PeerGroup can be sent to either a specific peer
    or a peer chosen at random.
    """

    def __init__(self, tchannel, score_threshold=None):
        """Initializes a new PeerGroup.

        :param tchannel:
            TChannel used for communication by this PeerGroup
        :param score_threshold:
            A value in the ``[0, 1]`` range. If specifiede, this requires that
            chosen peers havea score higher than this value when performing
            requests.
        """
        self.tchannel = tchannel

        self._score_threshold = score_threshold

        # Dictionary from hostport to Peer.
        self._peers = {}

        # Notified when a reset is performed. This allows multiple coroutines
        # to block on the same reset.
        self._resetting = False
        self._reset_condition = Condition()

    def __str__(self):
        return "<PeerGroup peers=%s>" % str(self._peers)

    @gen.coroutine
    def clear(self):
        """Reset this PeerGroup.

        This closes all connections to all known peers and forgets about
        these peers.

        :returns:
            A Future that resolves with a value of None when the operation
            has finished
        """
        if self._resetting:
            # If someone else is already resetting the PeerGroup, just block
            # on them to be finished.
            yield self._reset_condition.wait()
            raise gen.Return(None)

        self._resetting = True
        try:
            yield [peer.close() for peer in self._peers.values()]
        finally:
            self._peers = {}
            self._resetting = False
            self._reset_condition.notify_all()

    def get(self, hostport):
        """Get a Peer for the given destination.

        A new Peer is added and returned if one does not already exist for the
        given host-port. Otherwise, the existing Peer is returned.
        """
        assert hostport, "hostport is required"
        if hostport not in self._peers:
            self._peers[hostport] = Peer(self.tchannel, hostport)
        return self._peers[hostport]

    def lookup(self, hostport):
        """Look up a Peer for the given host and port.

        Returns None if a Peer for the given host-port does not exist.
        """
        assert hostport, "hostport is required"
        return self._peers.get(hostport, None)

    def remove(self, hostport):
        """Delete the Peer for the given host port.

        Does nothing if a matching Peer does not exist.

        :returns: The removed Peer
        """
        assert hostport, "hostport is required"
        return self._peers.pop(hostport, None)

    def add(self, peer):
        """Add an existing Peer to this group.

        A peer for the given host-port must not already exist in the group.
        """
        assert peer, "peer is required"

        if isinstance(peer, basestring):
            # Assume strings are host-ports
            peer = Peer(self.tchannel, peer)

        assert peer.hostport not in self._peers, (
            "%s already has a peer" % peer.hostport
        )

        self._peers[peer.hostport] = peer

    @property
    def hosts(self):
        """Get all host-ports managed by this PeerGroup."""
        return self._peers.keys()

    @property
    def peers(self):
        """Get all Peers managed by this PeerGroup."""
        return self._peers.values()

    def request(self, **kwargs):
        """Initiate a new request through this PeerGroup.

        :param hostport:
            If specified, requests will be sent to the specific host.
            Otherwise, a known peer will be picked at random.
        :param service:
            Name of the service being called. Defaults to an empty string.
        :param service_threshold:
            If ``hostport`` was not specified, this specifies the score
            threshold at or below which peers will be ignored.
        :param blacklist:
            Peers on the blacklist won't be chosen.
        """
        peer = self.choose(
            hostport=kwargs.get('hostport', None),
            score_threshold=kwargs.get('score_threshold', None),
            blacklist=kwargs.get('blacklist', None),
        )
        if peer:
            return peer.request(**kwargs)
        else:
            raise NoAvailablePeerError("Can't find available peer.")

    def choose(self, hostport=None, score_threshold=None, blacklist=None):
        """Choose a Peer that matches the given criteria.

        The Peer with the highest score will be chosen.

        :param hostport:
            Specifies that the returned Peer must be for the given host-port.
            Without this, all peers managed by this PeerGroup are
            candidates. If this is present, ``score_threshold`` is ignored.
        :param score_threshold:
            If specified, Peers with a score equal to or below this will be
            ignored. Defaults to the value specified when the PeerGroup was
            initialized.
        :param blacklist:
            Peers on the blacklist won't be chosen.
        :returns:
            A Peer that matches all the requested criteria or None if no such
            Peer was found.
        """

        blacklist = blacklist or set()
        if hostport:
            return self.get(hostport)

        score_threshold = score_threshold or self._score_threshold or 0
        chosen_peer = None
        chosen_score = 0
        hosts = self._peers.viewkeys() - blacklist

        for host in hosts:
            peer = self.get(host)
            score = peer.state.score()

            if score <= score_threshold:
                continue

            if score > chosen_score:
                chosen_peer = peer
                chosen_score = score

        return chosen_peer
示例#34
0
class InMemStream(Stream):
    def __init__(self, buf=None, auto_close=True):
        """In-Memory based stream

        :param buf: the buffer for the in memory stream
        """
        self._stream = deque()
        if buf:
            self._stream.append(buf)
        self.state = StreamState.init
        self._condition = Condition()
        self.auto_close = auto_close

        self.exception = None

    def clone(self):
        new_stream = InMemStream()
        new_stream.state = self.state
        new_stream.auto_close = self.auto_close
        new_stream._stream = deque(self._stream)
        return new_stream

    def read(self):
        def read_chunk(future):
            if self.exception:
                future.set_exception(self.exception)
                return future

            chunk = ""

            while len(self._stream) and len(chunk) < common.MAX_PAYLOAD_SIZE:
                chunk += self._stream.popleft()

            future.set_result(chunk)
            return future

        read_future = tornado.concurrent.Future()

        # We're not ready yet
        if self.state != StreamState.completed and not len(self._stream):
            wait_future = self._condition.wait()
            wait_future.add_done_callback(
                lambda f: f.exception() or read_chunk(read_future))
            return read_future

        return read_chunk(read_future)

    def write(self, chunk):
        if self.exception:
            raise self.exception

        if self.state == StreamState.completed:
            raise StreamingError("Stream has been closed.")

        if chunk:
            self._stream.append(chunk)
            self._condition.notify()

        # This needs to return a future to match the async interface.
        r = tornado.concurrent.Future()
        r.set_result(None)
        return r

    def set_exception(self, exception):
        self.exception = exception
        self.close()

    def close(self):
        self.state = StreamState.completed
        self._condition.notify()
示例#35
0
class foreach_zip(Pipeline):

    _graphviz_orientation = 270
    _graphviz_shape = 'triangle'

    def __init__(self, *upstreams, **kwargs):
        self.maxsize = kwargs.pop('maxsize', 10)
        self.condition = Condition()
        self.literals = [(i, val) for i, val in enumerate(upstreams)
                         if not isinstance(val, Pipeline)]

        self.buffers = {
            upstream: deque()
            for upstream in upstreams if isinstance(upstream, Pipeline)
        }

        upstreams2 = [
            upstream for upstream in upstreams
            if isinstance(upstream, Pipeline)
        ]

        Pipeline.__init__(self, upstreams=upstreams2, **kwargs)
        _global_sinks.add(self)

    def _add_upstream(self, upstream):
        # Override method to handle setup of buffer for new stream
        self.buffers[upstream] = deque()
        super(zip, self)._add_upstream(upstream)

    def _remove_upstream(self, upstream):
        # Override method to handle removal of buffer for stream
        self.buffers.pop(upstream)
        super(zip, self)._remove_upstream(upstream)

    def pack_literals(self, tup):
        """ Fill buffers for literals whenever we empty them """
        inp = list(tup)[::-1]
        out = []
        for i, val in self.literals:
            while len(out) < i:
                out.append(inp.pop())
            out.append(val)

        while inp:
            out.append(inp.pop())

        return out

    def update(self, x, who=None):
        L = self.buffers[who]  # get buffer for stream
        L.append(x)
        if len(L) == 1 and all(self.buffers.values()):
            tup = tuple(self.buffers[up][0] for up in self.upstreams)
            for buf in self.buffers.values():
                buf.popleft()
            self.condition.notify_all()
            if self.literals:
                tup = self.pack_literals(tup)

            tup = tuple(zipping(*tup))
            return self._emit(tup)
        elif len(L) > self.maxsize:
            return self.condition.wait()