class GConnection(Async):
    def __init__(self, *args, **kwargs):
        """
        This class is a 'GEvent'-optimized subclass of libcouchbase
        which utilizes the underlying IOPS structures and the gevent
        event primitives to efficiently utilize couroutine switching.
        """
        experimental.enabled_or_raise()
        super(GConnection, self).__init__(IOPS(), *args, **kwargs)

    def _do_ctor_connect(self):
        if self.connected:
            return

        self._connect()
        self._evconn = AsyncResult()
        self._conncb = self._on_connected
        self._evconn.get()
        self._evconn = None

    def _on_connected(self, err):
        if err:
            self._evconn.set_exception(err)
        else:
            self._evconn.set(None)

    def _waitwrap(self, cbasync):
        cur_thread = getcurrent()
        cbasync.callback = cur_thread.switch
        cbasync.errback = lambda r, x, y, z: cur_thread.throw(x, y, z)

        return get_hub().switch()

    def _meth_factory(meth, name):
        def ret(self, *args, **kwargs):
            return self._waitwrap(meth(self, *args, **kwargs))
        return ret

    def _http_request(self, **kwargs):
        res = super(GConnection, self)._http_request(**kwargs)
        if kwargs.get('chunked', False):
            return res #views

        e = Event()
        res._callback = lambda x, y: e.set()

        e.wait()

        res._maybe_raise()
        return res

    def query(self, *args, **kwargs):
        kwargs['itercls'] = GView
        ret = super(GConnection, self).query(*args, **kwargs)
        ret.start()
        return ret

    locals().update(Async._gen_memd_wrappers(_meth_factory))
Beispiel #2
0
class DataMonitor(object):
    _STOP_REQUEST = object()

    def __init__(self, client, path, callback, args, kwargs):
        self.client = client
        self.path = path
        self.callback = callback
        self.args = args
        self.kwargs = kwargs
        self.started = AsyncResult()
        self.queue = Queue()
        self._delay = 1.343
        self.max_delay = 180

    def _monitor(self):
        """Run the monitoring loop."""
        def watcher(event):
            self.queue.put(event)

        while True:
            try:
                data, stat = self.client.get(self.path, watcher)
            except zookeeper.NoNodeException:
                if not self.started.ready():
                    self.started.set(None)
                gevent.sleep(1)
                continue
            except (zookeeper.ConnectionLossException,
                    zookeeper.SessionExpiredException,
                    zookeeper.InvalidStateException), err:
                if not self.started.ready():
                    self.started.set_exception(err)
                    break
                logging.error("got %r while monitoring %s", str(err),
                              self.path)
                gevent.sleep(self._delay)
                self._delay += self._delay * random.random()
                self._delay = min(self._delay, self.max_delay)
                continue
            except Exception, err:
                if not self.started.ready():
                    self.started.set_exception(err)
                    break
                raise

            self.callback(data, *self.args, **self.kwargs)

            if not self.started.ready():
                self.started.set(None)

            event = self.queue.get()
            if event is self._STOP_REQUEST:
                break
Beispiel #3
0
class Task(object):
  def __init__(self, _id, func, args, kwargs):
    self.id = _id
    self.result = AsyncResult()
    self.func = func
    self.args = args
    self.kwargs = kwargs

  def execute(self):
    try:
      self.result.set(self.func(*self.args, **self.kwargs))
    except Exception, e:
      log.exception('Task.execute: execution failed.')
      self.result.set_exception(e)
Beispiel #4
0
    def test_set_exc(self):
        log = []
        e = AsyncResult()

        def waiter():
            try:
                result = e.get()
                log.append(('received', result))
            except Exception as ex:
                log.append(('catched', ex))
        gevent.spawn(waiter)
        obj = Exception()
        e.set_exception(obj)
        gevent.sleep(0)
        assert log == [('catched', obj)], log
Beispiel #5
0
    def test_set_exc(self):
        log = []
        e = AsyncResult()
        self.assertEqual(e.exc_info, ())
        self.assertEqual(e.exception, None)

        def waiter():
            with self.assertRaises(MyException) as exc:
                e.get()
            log.append(('caught', exc.exception))
        gevent.spawn(waiter)
        obj = MyException()
        e.set_exception(obj)
        gevent.sleep(0)
        self.assertEqual(log, [('caught', obj)])
Beispiel #6
0
class _SyncCall(object):
    def __init__(self, name, *args, **kwargs):
        self.name = name
        self._args = args
        self._kwargs = kwargs
        self._result = AsyncResult()

    def wait(self, timeout):
        return self._result.get(timeout=timeout)

    def execute(self, target):
        try:
            function = getattr(target, self.name)
            self._result.set(function(*self._args, **self._kwargs))
        except Exception as error:
            self._result.set_exception(error)
Beispiel #7
0
class FutureResult(object):
    """
    Future results for asynchronous operations.
    """
    def __init__(self):
        self._result = AsyncResult()
        self.created_at = time.time()

    def get(self, timeout=None):
        return self._result.get(block=True, timeout=timeout)

    def set(self, value):
        self._result.set(value)

    def set_exception(self, exception):
        self._result.set_exception(exception)
    def execution_result(executor):
        result = AsyncResult()
        deferred = executor()
        assert isinstance(deferred, Deferred), 'Another middleware has converted the execution result ' \
                                               'away from a Deferred.'

        deferred.add_callbacks(result.set, lambda e: result.set_exception(e.value, (e.type, e.value, e.traceback)))
        return result.get()
Beispiel #9
0
    def test_set_exc(self):
        log = []
        e = AsyncResult()

        def waiter():
            try:
                result = e.get()
                log.append(("received", result))
            except Exception as ex:
                log.append(("catched", ex))
                if PY3:
                    ex.__traceback__ = None

        gevent.spawn(waiter)
        obj = Exception()
        e.set_exception(obj)
        gevent.sleep(0)
        assert log == [("catched", obj)], log
Beispiel #10
0
class FileRequest(object):
    """ File Request Class 
    Uses gevent's AsyncResult to notify of download completion"""
    __slots__ = ('node_address', 'filename', 'result')
    def __init__(self, node_address, filename):
        self.node_address = node_address
        self.filename = filename
        self.result = AsyncResult()        

    def wait(self):
        """ block until result set """
        self.result.get()

    def complete(self):
        """ request completed, set result,
        causes wait() to return"""
        self.result.set()

    def exception(self, exception):
        """ exception ocurred during request """
        self.result.set_exception(exception)
Beispiel #11
0
class mock_server(object):
    def __init__(self, handler):
        self.handler = handler
        self.result = AsyncResult()
        self.server = StreamServer(('127.0.0.1', 0), self)

    def __call__(self, socket, address):
        try:
            self.result.set(self.handler(socket, address))
        except Exception as error:
            self.result.set_exception(error)
        finally:
            socket.close()

    def __enter__(self):
        self.server.start()
        return self.server

    def __exit__(self, exc_type, exc_value, traceback):
        if exc_type is None:
            self.result.get()
        self.server.stop()
Beispiel #12
0
class DataMonitor(object):
    _STOP_REQUEST = object()

    def __init__(self, client, path, callback, args, kwargs):
        self.client = client
        self.path = path
        self.callback = callback
        self.args = args
        self.kwargs = kwargs
        self.started = AsyncResult()
        self.queue = Queue()

    def _monitor(self):
        """Run the monitoring loop."""
        def watcher(event):
            self.queue.put(event)

        while True:
            try:
                data, stat = self.client.get(self.path, watcher)
            except zookeeper.NoNodeException:
                if not self.started.ready():
                    self.started.set(None)
                gevent.sleep(1)
                continue
            except Exception, err:
                if not self.started.ready():
                    self.started.set_exception(err)
                    break
                
            self.callback(data, *self.args, **self.kwargs)

            if not self.started.ready():
                self.started.set(None)

            event = self.queue.get()
            if event is self._STOP_REQUEST:
                break
Beispiel #13
0
    def register_secret_batch(self, secrets: List[typing.Secret]):
        secret_batch = list()
        secret_registry_transaction = AsyncResult()

        for secret in secrets:
            secrethash = sha3(secret)
            if not self.check_registered(secrethash):
                if secret not in self.open_secret_transactions:
                    secret_batch.append(secret)
                    self.open_secret_transactions[secret] = secret_registry_transaction
            else:
                log.info(
                    'secret already registered',
                    node=pex(self.node_address),
                    contract=pex(self.address),
                    secrethash=encode_hex(secrethash),
                )

        if not secret_batch:
            return

        log.info(
            'registerSecretBatch called',
            node=pex(self.node_address),
            contract=pex(self.address),
        )

        try:
            transaction_hash = self._register_secret_batch(secret_batch)
        except Exception as e:
            secret_registry_transaction.set_exception(e)
            raise
        else:
            secret_registry_transaction.set(transaction_hash)
        finally:
            for secret in secret_batch:
                self.open_secret_transactions.pop(secret, None)
Beispiel #14
0
class _Socket(_original_Socket):
    """Green version of :class:`zmq.core.socket.Socket`

    The following methods are overridden:

        * send
        * recv

    To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or recieving
    is deferred to the hub if a ``zmq.EAGAIN`` (retry) error is raised.
    
    The `__state_changed` method is triggered when the zmq.FD for the socket is
    marked as readable and triggers the necessary read and write events (which
    are waited for in the recv and send methods).

    Some double underscore prefixes are used to minimize pollution of
    :class:`zmq.core.socket.Socket`'s namespace.
    """

    def __init__(self, context, socket_type):
        self.__setup_events()

    def close(self, linger=None):
        # close the _state_event event, keeps the number of active file descriptors down
        if not self._closed and getattr(self, '_state_event', None):
            try:
                self._state_event.stop()
            except AttributeError as e:
                # gevent<1.0 compat
                self._state_event.cancel()
        super(_Socket, self).close(linger)

    def __setup_events(self):
        self.__readable = AsyncResult()
        self.__writable = AsyncResult()
        try:
            self._state_event = get_hub().loop.io(self.getsockopt(FD), 1) # read state watcher
            self._state_event.start(self.__state_changed)
        except AttributeError:
            # for gevent<1.0 compatibility
            from gevent.core import read_event
            self._state_event = read_event(self.getsockopt(FD), self.__state_changed, persist=True)

    def __state_changed(self, event=None, _evtype=None):
        try:
            if self.closed:
                # if the socket has entered a close state resume any waiting greenlets
                self.__writable.set()
                self.__readable.set()
                return
            events = self.getsockopt(zmq.EVENTS)
        except ZMQError as exc:
            self.__writable.set_exception(exc)
            self.__readable.set_exception(exc)
        else:
            if events & zmq.POLLOUT:
                self.__writable.set()
            if events & zmq.POLLIN:
                self.__readable.set()

    def _wait_write(self):
        self.__writable = AsyncResult()
        self.__writable.get()

    def _wait_read(self):
        self.__readable = AsyncResult()
        self.__readable.get()

    def send(self, data, flags=0, copy=True, track=False):
        # if we're given the NOBLOCK flag act as normal and let the EAGAIN get raised
        if flags & zmq.NOBLOCK:
            return super(_Socket, self).send(data, flags, copy, track)
        # ensure the zmq.NOBLOCK flag is part of flags
        flags |= zmq.NOBLOCK
        while True: # Attempt to complete this operation indefinitely, blocking the current greenlet
            try:
                # attempt the actual call
                return super(_Socket, self).send(data, flags, copy, track)
            except zmq.ZMQError as e:
                # if the raised ZMQError is not EAGAIN, reraise
                if e.errno != zmq.EAGAIN:
                    raise
            # defer to the event loop until we're notified the socket is writable
            self._wait_write()

    def recv(self, flags=0, copy=True, track=False):
        if flags & zmq.NOBLOCK:
            return super(_Socket, self).recv(flags, copy, track)
        flags |= zmq.NOBLOCK
        while True:
            try:
                return super(_Socket, self).recv(flags, copy, track)
            except zmq.ZMQError as e:
                if e.errno != zmq.EAGAIN:
                    raise
            self._wait_read()
class TaskExecutor(object):
    def __init__(self, balancer, index):
        self.balancer = balancer
        self.index = index
        self.task = None
        self.proc = None
        self.pid = None
        self.conn = None
        self.state = None
        self.key = str(uuid.uuid4())
        self.checked_in = Event()
        self.result = AsyncResult()
        self.exiting = False
        self.thread = gevent.spawn(self.executor)

    def checkin(self, conn):
        self.balancer.logger.debug('Check-in of worker #{0} (key {1})'.format(self.index, self.key))
        self.conn = conn
        self.state = WorkerState.IDLE
        self.checked_in.set()

    def get_status(self):
        if not self.conn:
            return None

        try:
            st = TaskStatus(0)
            if issubclass(self.task.clazz, MasterProgressTask):
                progress_subtask_info = self.conn.call_client_sync(
                    'taskproxy.get_master_progress_info'
                )
                if progress_subtask_info['increment_progress'] != 0:
                    progress_subtask_info['progress'] += progress_subtask_info['increment_progress']
                    progress_subtask_info['increment_progress'] = 0
                    self.conn.call_client_sync(
                        'taskproxy.set_master_progress_detail',
                        {
                            'progress': progress_subtask_info['progress'],
                            'increment_progress': progress_subtask_info['increment_progress']
                        }
                    )
                if progress_subtask_info['active_tids']:
                    progress_to_increment = 0
                    concurent_weight = progress_subtask_info['concurent_subtask_detail']['average_weight']
                    for tid in progress_subtask_info['concurent_subtask_detail']['tids']:
                        subtask_status = self.balancer.get_task(tid).executor.get_status()
                        progress_to_increment += subtask_status.percentage * concurent_weight * \
                            progress_subtask_info['subtask_weights'][str(tid)]
                    for tid in set(progress_subtask_info['active_tids']).symmetric_difference(
                        set(progress_subtask_info['concurent_subtask_detail']['tids'])
                    ):
                        subtask_status = self.balancer.get_task(tid).executor.get_status()
                        progress_to_increment += subtask_status.percentage * \
                            progress_subtask_info['subtask_weights'][str(tid)]
                    progress_subtask_info['progress'] += int(progress_to_increment)
                    if progress_subtask_info['pass_subtask_details']:
                        progress_subtask_info['message'] = subtask_status.message
                st = TaskStatus(
                    progress_subtask_info['progress'], progress_subtask_info['message']
                )
            else:
                st.__setstate__(self.conn.call_client_sync('taskproxy.get_status'))
            return st

        except RpcException as err:
            self.balancer.logger.error(
                "Cannot obtain status from task #{0}: {1}".format(self.task.id, str(err))
            )
            self.proc.terminate()

    def put_status(self, status):
        # Try to collect rusage at this point, when process is still alive
        try:
            kinfo = bsd.kinfo_getproc(self.pid)
            self.task.rusage = kinfo.rusage
        except LookupError:
            pass

        if status['status'] == 'ROLLBACK':
            self.task.set_state(TaskState.ROLLBACK)

        if status['status'] == 'FINISHED':
            self.result.set(status['result'])

        if status['status'] == 'FAILED':
            error = status['error']
            cls = TaskException

            if error['type'] == 'task.TaskAbortException':
                cls = TaskAbortException

            if error['type'] == 'ValidationException':
                cls = ValidationException

            self.result.set_exception(cls(
                code=error['code'],
                message=error['message'],
                stacktrace=error['stacktrace'],
                extra=error.get('extra')
            ))

    def put_warning(self, warning):
        self.task.add_warning(warning)

    def run(self, task):
        self.result = AsyncResult()
        self.task = task
        self.task.set_state(TaskState.EXECUTING)

        filename = None
        module_name = inspect.getmodule(task.clazz).__name__
        for dir in self.balancer.dispatcher.plugin_dirs:
            found = False
            try:
                for root, _, files in os.walk(dir):
                    file = first_or_default(lambda f: module_name in f, files)
                    if file:
                        filename = os.path.join(root, file)
                        found = True
                        break

                if found:
                    break
            except FileNotFoundError:
                continue

        self.conn.call_client_sync('taskproxy.run', {
            'id': task.id,
            'class': task.clazz.__name__,
            'filename': filename,
            'args': task.args,
            'debugger': task.debugger,
            'environment': task.environment
        })

        try:
            self.result.get()
        except BaseException as e:
            if not isinstance(e, TaskException):
                self.balancer.dispatcher.report_error(
                    'Task {0} raised exception other than TaskException'.format(self.task.name),
                    e
                )

            if isinstance(e, TaskAbortException):
                self.task.set_state(TaskState.ABORTED, TaskStatus(0, 'aborted'))
            else:
                self.task.error = serialize_error(e)
                self.task.set_state(TaskState.FAILED, TaskStatus(0, str(e), extra={
                    "stacktrace": traceback.format_exc()
                }))

            self.task.ended.set()
            self.balancer.task_exited(self.task)
            self.state = WorkerState.IDLE
            return

        self.task.result = self.result.value
        self.task.set_state(TaskState.FINISHED, TaskStatus(100, ''))
        self.task.ended.set()
        self.balancer.task_exited(self.task)
        self.state = WorkerState.IDLE

    def abort(self):
        self.balancer.logger.info("Trying to abort task #{0}".format(self.task.id))
        # Try to abort via RPC. If this fails, kill process
        try:
            self.conn.call_client_sync('taskproxy.abort')
        except RpcException as err:
            self.balancer.logger.warning("Failed to abort task #{0} gracefully: {1}".format(self.task.id, str(err)))
            self.balancer.logger.warning("Killing process {0}".format(self.pid))
            self.proc.terminate()

    def executor(self):
        while not self.exiting:
            try:
                self.proc = Popen(
                    [TASKWORKER_PATH, self.key],
                    close_fds=True,
                    preexec_fn=os.setpgrp,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.STDOUT)

                self.pid = self.proc.pid
                self.balancer.logger.debug('Started executor #{0} as PID {1}'.format(self.index, self.pid))
            except OSError:
                self.result.set_exception(TaskException(errno.EFAULT, 'Cannot spawn task executor'))
                self.balancer.logger.error('Cannot spawn task executor #{0}'.format(self.index))
                return

            for line in self.proc.stdout:
                line = line.decode('utf8')
                self.balancer.logger.debug('Executor #{0}: {1}'.format(self.index, line.strip()))
                if self.task:
                    self.task.output += line

            self.proc.wait()

            if self.proc.returncode == -signal.SIGTERM:
                self.balancer.logger.info(
                    'Executor process with PID {0} was terminated gracefully'.format(
                        self.proc.pid
                    )
                )
            else:
                self.balancer.logger.error('Executor process with PID {0} died abruptly with exit code {1}'.format(
                    self.proc.pid,
                    self.proc.returncode)
                )

            self.result.set_exception(TaskException(errno.EFAULT, 'Task executor died'))
            gevent.sleep(1)

    def die(self):
        self.exiting = True
        if self.proc:
            try:
                self.proc.terminate()
            except ProcessLookupError:
                self.balancer.logger.warning('Executor process with PID {0} already dead'.format(self.proc.pid))
Beispiel #16
0
class GeventLoop(object):

    def __init__(self):
        super(GeventLoop, self).__init__()
        self._greenlets = set()
        self._idle_handle = 0
        self._idle_callbacks = {}
        self._exit = AsyncResult()

    def _greenlet_spawned(self, greenlet):
        greenlet.link(self._greenlet_completed)
        greenlet.link_exception(self._greenlet_failed)
        self._greenlets.add(greenlet)
        return greenlet

    def _greenlet_completed(self, greenlet):
        self._greenlets.discard(greenlet)
        self._entering_idle()

    def _greenlet_failed(self, greenlet):
        self._exit.set_exception(greenlet.exception)

    # alarm

    def alarm(self, seconds, callback):
        greenlet = gevent.spawn_later(seconds, callback)
        return self._greenlet_spawned(greenlet)

    def remove_alarm(self, handle):
        if handle._start_event.active:
            handle._start_event.stop()
            return True
        return False

    # file

    def _watch_file(self, fd, callback):
        while True:
            select.select([fd], [], [])
            self._greenlet_spawned(gevent.spawn(callback))

    def watch_file(self, fd, callback):
        greenlet = gevent.spawn(self._watch_file, fd, callback)
        return self._greenlet_spawned(greenlet)

    def remove_watch_file(self, handle):
        handle.kill()
        return True

    # idle

    def _entering_idle(self):
        for callback in self._idle_callbacks.values():
            callback()

    def enter_idle(self, callback):
        self._idle_handle += 1
        self._idle_callbacks[self._idle_handle] = callback
        return self._idle_handle

    def remove_enter_idle(self, handle):
        try:
            del self._idle_callbacks[handle]
        except KeyError:
            return False
        return True

    def run(self):
        while True:
            greenlets = [self._exit]
            greenlets.extend(self._greenlets)
            try:
                gevent.joinall(greenlets, timeout=1, raise_error=True)
            except gevent.Timeout:
                pass
            self._entering_idle()
Beispiel #17
0
class GeventLoop(object):
    def __init__(self):
        super(GeventLoop, self).__init__()
        self._greenlets = set()
        self._idle_handle = 0
        self._idle_callbacks = {}
        self._exit = AsyncResult()

    def _greenlet_spawned(self, greenlet):
        greenlet.link(self._greenlet_completed)
        greenlet.link_exception(self._greenlet_failed)
        self._greenlets.add(greenlet)
        return greenlet

    def _greenlet_completed(self, greenlet):
        self._greenlets.discard(greenlet)
        self._entering_idle()

    def _greenlet_failed(self, greenlet):
        self._exit.set_exception(greenlet.exception)

    # alarm

    def alarm(self, seconds, callback):
        greenlet = gevent.spawn_later(seconds, callback)
        return self._greenlet_spawned(greenlet)

    def remove_alarm(self, handle):
        if handle._start_event.active:
            handle._start_event.stop()
            return True
        return False

    # file

    def _watch_file(self, fd, callback):
        while True:
            select.select([fd], [], [])
            self._greenlet_spawned(gevent.spawn(callback))

    def watch_file(self, fd, callback):
        greenlet = gevent.spawn(self._watch_file, fd, callback)
        return self._greenlet_spawned(greenlet)

    def remove_watch_file(self, handle):
        handle.kill()
        return True

    # idle

    def _entering_idle(self):
        for callback in self._idle_callbacks.values():
            callback()

    def enter_idle(self, callback):
        self._idle_handle += 1
        self._idle_callbacks[self._idle_handle] = callback
        return self._idle_handle

    def remove_enter_idle(self, handle):
        try:
            del self._idle_callbacks[handle]
        except KeyError:
            return False
        return True

    def run(self):
        while True:
            greenlets = [self._exit]
            greenlets.extend(self._greenlets)
            try:
                gevent.joinall(greenlets, timeout=1, raise_error=True)
            except gevent.Timeout:
                pass
            self._entering_idle()
Beispiel #18
0
    def register_secret_batch(self, secrets: List[Secret]):
        """Register a batch of secrets. Check if they are already registered at
        the given block identifier."""
        secrets_to_register = list()
        secrethashes_to_register = list()
        secrethashes_not_sent = list()
        transaction_result = AsyncResult()
        wait_for = set()

        # secret registration has no preconditions:
        #
        # - The action does not depend on any state, it's always valid to call
        #   it.
        # - This action is always susceptible to race conditions.
        #
        # Therefore this proxy only needs to detect if the secret is already
        # registered, to avoid sending obviously unecessary transactions, and
        # it has to handle race conditions.

        with self._open_secret_transactions_lock:
            verification_block_hash = self.client.get_confirmed_blockhash()

            for secret in secrets:
                secrethash = sha3(secret)
                secrethash_hex = encode_hex(secrethash)

                # Do the local test on `open_secret_transactions` first, then
                # if necessary do an RPC call.
                #
                # The call to `is_secret_registered` has two conflicting
                # requirements:
                #
                # - Avoid sending duplicated transactions for the same lock
                # - Operating on a consistent/confirmed view of the blockchain
                #   (if a secret has been registered in a block that is not
                #   confirmed it doesn't count yet, an optimization would be to
                #   *not* send the transaction and wait for the confirmation)
                #
                # The code below respects the consistent blockchain view,
                # meaning that if this proxy method is called with an old
                # blockhash an unecessary transaction will be sent, and the
                # error will be treated as a race-condition.
                other_result = self.open_secret_transactions.get(secret)

                if other_result is not None:
                    wait_for.add(other_result)
                    secrethashes_not_sent.append(secrethash_hex)
                elif not self.is_secret_registered(secrethash,
                                                   verification_block_hash):
                    secrets_to_register.append(secret)
                    secrethashes_to_register.append(secrethash_hex)
                    self.open_secret_transactions[secret] = transaction_result

        # From here on the lock is not required. Context-switches will happen
        # for the gas estimation and the transaction, however the
        # synchronization data is limited to the open_secret_transactions
        log_details = {
            "node": pex(self.node_address),
            "contract": pex(self.address),
            "secrethashes": secrethashes_to_register,
            "secrethashes_not_sent": secrethashes_not_sent,
        }

        if not secrets_to_register:
            log.debug("registerSecretBatch skipped, waiting for transactions",
                      **log_details)

            gevent.joinall(wait_for, raise_error=True)

            log.info("registerSecretBatch successful", **log_details)
            return

        checking_block = self.client.get_checking_block()
        gas_limit = self.proxy.estimate_gas(checking_block,
                                            "registerSecretBatch",
                                            secrets_to_register)
        receipt = None
        transaction_hash = None
        msg = None

        if gas_limit:
            gas_limit = safe_gas_limit(
                gas_limit,
                len(secrets_to_register) * GAS_REQUIRED_PER_SECRET_IN_BATCH)

            log.debug("registerSecretBatch called", **log_details)

            try:
                transaction_hash = self.proxy.transact("registerSecretBatch",
                                                       gas_limit,
                                                       secrets_to_register)
                self.client.poll(transaction_hash)
                receipt = self.client.get_transaction_receipt(transaction_hash)
            except Exception as e:  # pylint: disable=broad-except
                msg = f"Unexpected exception {e} at sending registerSecretBatch transaction."

        # Clear `open_secret_transactions` regardless of the transaction being
        # successfully executed or not.
        with self._open_secret_transactions_lock:
            for secret in secrets_to_register:
                self.open_secret_transactions.pop(secret)

        # As of version `0.4.0` of the contract has *no* asserts or requires.
        # Therefore the only reason for the transaction to fail is if there is
        # a bug.
        unrecoverable_error = (gas_limit is None or receipt is None
                               or receipt["status"] == RECEIPT_FAILURE_CODE)

        exception: Union[RaidenRecoverableError, RaidenUnrecoverableError]
        if unrecoverable_error:
            # If the transaction was sent it must not fail. If this happened
            # some of our assumptions is broken therefore the error is
            # unrecoverable
            if receipt is not None:
                if receipt["gasUsed"] == gas_limit:
                    # The transaction failed and all gas was used. This can
                    # happen because of:
                    #
                    # - A compiler bug if an invalid opcode was executed.
                    # - A configuration bug if an assert was executed,
                    # because version 0.4.0 of the secret registry does not have an
                    # assert.
                    # - An ethereum client bug if the gas_limit was
                    # underestimated.
                    #
                    # Safety cannot be guaranteed under any of these cases,
                    # this error is unrecoverable.
                    error = (
                        "Secret registration failed because of a bug in either "
                        "the solidity compiler, the running ethereum client, or "
                        "a configuration error in Raiden.")
                else:
                    # The transaction failed and *not* all gas was used. This
                    # can happen because of:
                    #
                    # - A compiler bug if a revert was introduced.
                    # - A configuration bug, because for 0.4.0 the secret
                    # registry does not have a revert.
                    error = (
                        "Secret registration failed because of a configuration "
                        "bug or compiler bug. Please double check the secret "
                        "smart contract is at version 0.4.0, if it is then a "
                        "compiler bug was hit.")

                log.critical(error, **log_details)
                exception = RaidenUnrecoverableError(error)
                transaction_result.set_exception(exception)
                raise exception

            # If gas_limit is set and there is no receipt then an exception was
            # raised while sending the transaction. This should only happen if
            # the account is being used concurrently, which is not supported.
            # This can happen because:
            #
            # - The nonce of the transaction was already used
            # - The nonce was reused *and* the account didn't have enough ether
            # to pay for the gas
            #
            # Safety cannot be guaranteed under any of these cases, this error
            # is unrecoverable. *Note*: This assumes the ethereum client
            # takes into account the current transactions in the pool.
            if gas_limit:
                assert msg, "Unexpected control flow, an exception should have been raised."
                error = (
                    f"Sending the the transaction for registerSecretBatch failed with: `{msg}`. "
                    f"This happens if the same ethereum account is being used by more than one "
                    f"program which is not supported.")

                log.critical(error, **log_details)
                exception = RaidenUnrecoverableError(error)
                transaction_result.set_exception(exception)
                raise exception

            # gas_limit can fail because:
            #
            # - The Ethereum client detected the transaction could not
            # successfully execute, this happens if an assert/revert is hit.
            # - The account is lacking funds to pay for the gas.
            #
            # Either of these is a bug. The contract does not use
            # assert/revert, and the account should always be funded
            self.proxy.jsonrpc_client.check_for_insufficient_eth(
                transaction_name="registerSecretBatch",
                address=self.node_address,
                transaction_executed=True,
                required_gas=gas_limit,
                block_identifier=checking_block,
            )
            error = "Call to registerSecretBatch couldn't be done"

            log.critical(error, **log_details)
            exception = RaidenRecoverableError(error)
            transaction_result.set_exception(exception)
            raise exception

        # The local **MUST** transaction_result be set before waiting for the
        # other results, otherwise we have a dead-lock
        transaction_result.set(transaction_hash)

        if wait_for:
            log.info("registerSecretBatch waiting for pending", **log_details)
            gevent.joinall(wait_for, raise_error=True)

        log.info("registerSecretBatch successful", **log_details)
Beispiel #19
0
    def register_secret_batch(self, secrets: List[Secret]):
        secrets_to_register = list()
        secrethashes_to_register = list()
        secrethashes_not_sent = list()
        secret_registry_transaction = AsyncResult()

        for secret in secrets:
            secrethash = sha3(secret)
            secrethash_hex = encode_hex(secrethash)

            is_register_needed = (
                not self.check_registered(secrethash) and
                secret not in self.open_secret_transactions
            )
            if is_register_needed:
                secrets_to_register.append(secret)
                secrethashes_to_register.append(secrethash_hex)
                self.open_secret_transactions[secret] = secret_registry_transaction
            else:
                secrethashes_not_sent.append(secrethash_hex)

        log_details = {
            'node': pex(self.node_address),
            'contract': pex(self.address),
            'secrethashes': secrethashes_to_register,
            'secrethashes_not_sent': secrethashes_not_sent,
        }

        if not secrets_to_register:
            log.debug('registerSecretBatch skipped', **log_details)
            return

        error_prefix = 'Call to registerSecretBatch will fail'
        gas_limit = self.proxy.estimate_gas('pending', 'registerSecretBatch', secrets)
        if gas_limit:
            error_prefix = 'Call to registerSecretBatch failed'
            try:
                gas_limit = safe_gas_limit(
                    gas_limit,
                    len(secrets) * GAS_REQUIRED_PER_SECRET_IN_BATCH,
                )
                transaction_hash = self.proxy.transact('registerSecretBatch', gas_limit, secrets)
                self.client.poll(transaction_hash)
                receipt_or_none = check_transaction_threw(self.client, transaction_hash)
            except Exception as e:
                secret_registry_transaction.set_exception(e)
                msg = 'Unexpected exception at sending registerSecretBatch transaction'
            else:
                secret_registry_transaction.set(transaction_hash)
            finally:
                for secret in secrets_to_register:
                    self.open_secret_transactions.pop(secret, None)

        transaction_executed = gas_limit is not None
        if not transaction_executed or receipt_or_none:
            if transaction_executed:
                block = receipt_or_none['blockNumber']
            else:
                block = 'pending'

            self.proxy.jsonrpc_client.check_for_insufficient_eth(
                transaction_name='registerSecretBatch',
                transaction_executed=transaction_executed,
                required_gas=len(secrets) * GAS_REQUIRED_PER_SECRET_IN_BATCH,
                block_identifier=block,
            )
            error_msg = f'{error_prefix}. {msg}'
            log.critical(error_msg, **log_details)
            raise RaidenUnrecoverableError(error_msg)

        log.info('registerSecretBatch successful', **log_details)
Beispiel #20
0
class StreamingExtract(object):
    def __init__(self, id, hddsem, threadpool):
        self.id = id
        self.hddsem = hddsem
        self.threadpool = threadpool

        self.password = None

        self.killed = False
        self.parts = dict()
        self.first = None
        self.current = None
        self.next = None
        self.next_part_event = AsyncResult()
        self.rar = None
        self.library = None
        self._library_added = set()
        self._deleted_library = None
        extractors[id] = self

    def feed_part(self, path, file):
        path.finished = AsyncResult()
        self.parts[path.path] = path, file
        log.debug('fed new part {}: {}'.format(path, path))

        if file.state != 'rarextract':
            with transaction:
                file.state = 'rarextract'

        if self.first is None:
            self.first = self.current = path, file
            self.add_library_files()
            self.run(path, file)
        else:
            if path.path == self.next:
                self.next_part_event.set(path)
            path.finished.get()

    def run(self, path, file):
        try:
            self.first = self.current
            with transaction:
                file.greenlet = gevent.getcurrent()
                file.on_greenlet_started()
            try:
                result = self.bruteforce(path, file)
            except rarfile.NeedFirstVolume:
                self.next = os.path.join(path.dir, "{}.part{}.rar".format(path.basename, "1".zfill(len(path.part))))
                self.find_next()
                if core.config.delete_extracted_archives:
                    return False
                return
            
            if result and result is not True:
                raise result

            if self.password:
                rarpw = "-p"+self.password
            else:
                rarpw = "-p-"

            cmd = [rarfile.UNRAR_TOOL, "x", "-y", rarpw, "-idq", "-vp", path, file.get_extract_path() + os.sep]
            file.log.info("starting extraction of {} with params {}".format(path[1:], cmd))
            self.rar = rarfile.custom_popen(cmd)

            self.wait_data()
            if not path.finished.ready():
                path.finished.set()
            if core.config.delete_extracted_archives:
                return False
        except BaseException as e:
            traceback.print_exc()
            self.kill(e)
            raise

    def bruteforce(self, path, file):
        rar = rarfile.RarFile(path, ignore_next_part_missing=True)
        if rar.not_first_volume:
            raise rarfile.NeedFirstVolume("First Volume for extraction")

        if not rar.needs_password():
            self.password = None
            return
        passwords = []
        for i in itertools.chain(file.package.extract_passwords, core.config.bruteforce_passwords):
            if not i in passwords:
                passwords.append(i)
        if rar.needs_password() and rar.infolist():
            pw = bruteforce_by_content(rar, passwords)
            if not pw:
                print "could not find password, asking user"
                for pw in file.solve_password(
                        message="Rarfile {} password cannot be cracked. Enter correct password: #".format(path.name),
                        retries=5):
                    pw = bruteforce_by_content(rar, [pw])
                    if pw:
                        break
                else:
                    return self.kill('extract password not entered')
            else:
                print "Found password by content:", pw
            self.password = pw
            return
        print "testing", passwords
        if not self.threadpool.apply(bruteforce, (rar, passwords, self.hddsem, file.log)):
            # ask user for password
            for pw in file.solve_password(
                    message="Enter the extract password for file: {} #".format(path.name),
                    retries=5):
                if self.threadpool.apply(bruteforce, (rar, [pw], self.hddsem, file.log)):
                    break
            else:
                return self.kill('extract password not entered')

        self.password = rar._password
        if self.password and self.password not in core.config.bruteforce_passwords:
            with transaction:
                core.config.bruteforce_passwords.append(self.password)

    def wait_data(self):
        bytes = ''
        while True:
            data = self.rar.stdout.read(1)
            if not data:
                break

            bytes += data
            for i in bytes.splitlines():
                if i:
                    result = self.new_data(i)
                    if result is True:
                        bytes = ''
                    if result and result is not True:
                        raise result
        self.close()

    def finish_file(self, path, file):
        if file is not None:
            with core.transaction:
                #if not 'rarextract' in file.completed_plugins:
                #    file.completed_plugins.append('rarextract')
                #file.greenlet = None
                #file.on_greenlet_finish()
                #file.on_greenlet_stopped()
                file.state = 'rarextract_complete'
                file.init_progress(1)
                file.set_progress(1)
                #file.stop()
        #path.finished.set()
        event.fire('rarextract:part_complete', path, file)
    
    def new_data(self, data):
        """called when new data or new line"""
        if "packed data CRC failed in volume" in data:
            return self.kill('checksum error in rar archive')

        if data.startswith("CRC failed in the encrypted file"):  # corrupt file or download not complete
            return self.kill('checksum error in rar archive. wrong password?')

        if "bad archive" in data.lower():
            return self.kill('Bad archive')

        m = re.search(r"Insert disk with (.*?) \[C\]ontinue\, \[Q\]uit", data)
        if not m:
            return

        if self.current is not None:
            self.finish_file(*self.current)

        self.next = m.group(1)
        print "setting self.next", self.next
        return self.find_next()

    def find_next(self):
        print "finding next", self.next
        next = self.next
        if next not in self.parts:
            # check if file is in core.files()
            found = False
            name = os.path.basename(next)
            for f in core.files():
                if f.name == name and f.get_complete_file() == next:
                    if not f.working and 'download' in f.completed_plugins:
                        found = True
                        current = fileplugin.FilePath(next), f
                        current[0].finished = AsyncResult()
                        self.parts[next] = current
                        print('got next part from idle {}: {}'.format(next, self.current[0]))
                        break
                    if f.state == "download":
                        found = True
                        break
                    print "found path but not valid", f.state, f.working

            if not found:
                # file is not in system, check if it exists on hdd
                if os.path.exists(next):
                    current = fileplugin.FilePath(next), self.first[1]
                    current[0].finished = AsyncResult()
                    self.parts[next] = current
                    print('got next part from hdd {}: {}'.format(next, self.current[0]))
                else:
                    # part does not exists. fail this extract
                    return self.kill('missing part {}'.format(next))

            if next not in self.parts:
                print('waiting for part {}'.format(next))
                event.fire('rarextract:waiting_for_part', next)

                @event.register("file:last_error")
                def killit(e, f):
                    if not f.name == name and f.get_complete_file() == next:
                        return
                    if all(f.last_error for f in core.files() if f.name == name and f.get_complete_file() == next):
                        event.remove("file:last_error", killit)
                        self.kill('all of the next parts are broken.')
                
                while next not in self.parts:
                    self.next_part_event.get()
                    self.next_part_event = AsyncResult()

                log.debug('got next part from wait {}: {}'.format(next, self.current[0]))

        self.current = self.parts[next]
        self.add_library_files()
        return self.go_on()

    def add_library_files(self):
        """Add extracted files into the library"""
        path = fileplugin.FilePath(self.current[0])
        f = self.first[1]

        with transaction:
            if not self.library:
                print "Creating package for", path.basename
                name = "{} {}".format("Extracted files from", os.path.basename(path.basename))
                for p in core.packages():
                    if p.name == name:
                        self.library = p
                        self._library_added = set(f.name for f in p.files)
                        print "\treused package", p.id
                        print "package", p.id, p.tab
                if not self.library:
                    self.library = f.package.clone_empty(
                        name=name,
                        tab="complete",
                        state="download_complete",
                    )

                @event.register("package:deleted")
                @event.register("file:deleted")
                def _deleted_library(event, package):
                    if event.startswith("file:"):
                        package = package.package

                    if package.id == self.library.id:
                        event.remove(_deleted_library)
                        for f in self.library.files:
                            f.delete_local_files()
                        self.kill("Extracted files have been deleted.", False)

                self._deleted_library = _deleted_library

            rar = rarfile.RarFile(path, ignore_next_part_missing=True)
            print "password is", self.password
            try:
                if not rar.infolist():
                    rar.setpassword(self.password)
            except rarfile.BadRarFile:
                if not rar.infolist():
                    self.library.delete()
                    return
            links = []
            for item in rar.infolist():
                name = item.filename
                print "From new infolist:", name
                if name in self._library_added:
                    print "\t already added"
                    continue
                elif item.isdir():
                    print "\t is dir"
                    continue
                else:
                    self._library_added.add(name)
                print "creating file for", repr(name), self.library
                
                links.append(dict(
                    name=name,
                    size=item.file_size,
                    url=u'file://' + os.path.join(
                        f.get_extract_path().decode(sys.getfilesystemencoding()),
                        name),
                ))
        if links:
            core.add_links(links, package_id=self.library.id)

    def go_on(self):
        if self.rar is None:
            return self.run(*self.current)
        if not os.path.exists(self.next):
            return
        self.rar.stdin.write("C\n")
        self.rar.stdin.flush()

        if self.current[1] is not None:
            with core.transaction:
                self.current[1].greenlet = gevent.getcurrent()
                self.current[1].greenlet.link(self.current[0].finished)
                self.current[1].on_greenlet_started()
            self.current[1].log.info("extract go on: {}".format(self.current[1].name))
        return True
        
    def kill(self, exc="", _del_lib=True):
        blacklist.add(self.first[0].basename)  # no autoextract for failed archives
        if _del_lib:
            self.library.delete()
        print "killing rarextract", self.first[0].basename
        if isinstance(exc, basestring):
            exc = ValueError(exc)

        self.current = None
        self.killed = True

        if self.rar is not None:
            self.rar.terminate()
            self.rar = None

        try:
            del extractors[self.id]
        except KeyError:
            pass
        
        self.next_part_event.set_exception(exc)
        for path, file in self.parts.values():
            if not path.finished.ready():
                path.finished.set_exception(exc)

        with transaction:
            for path, file in self.parts.values():
                if file is not None:
                    file.stop()
                    if file.state == 'rarextract_complete':
                        file.state = 'rarextract'
                        file.enabled = False
                    if 'rarextract' in file.completed_plugins:
                        file.completed_plugins.remove('rarextract')

        self.first[1].fatal('rarextract: {}'.format(exc))

        return exc

    def close(self):
        """called when process is closed"""
        if not self.library:
            self.add_library_files()
        try:
            del extractors[self.id]
        except KeyError:
            pass

        if self._deleted_library:
            event.remove("package:deleted", self._deleted_library)

        if not self.killed:
            if self.current is not None:
                self.finish_file(*self.current)

            if core.config.delete_extracted_archives:
                with transaction:
                    for path, file in self.parts.values():
                        if file:
                            file.delete_local_files()
                            file.fatal('extracted and deleted', type='info', abort_greenlet=False)
                        else:
                            os.remove(path)
            else:
                for path, file in self.parts.values():
                    if file:
                        file.log.info('extract complete')
Beispiel #21
0
    def new_netting_channel(
        self,
        partner: typing.Address,
        settle_timeout: int,
    ) -> typing.ChannelID:
        """ Creates a new channel in the TokenNetwork contract.

        Args:
            partner: The peer to open the channel with.
            settle_timeout: The settle timout to use for this channel.

        Returns:
            The address of the new netting channel.
        """
        if not isaddress(partner):
            raise InvalidAddress(
                'Expected binary address format for channel partner')

        invalid_timeout = (settle_timeout < NETTINGCHANNEL_SETTLE_TIMEOUT_MIN
                           or
                           settle_timeout > NETTINGCHANNEL_SETTLE_TIMEOUT_MAX)
        if invalid_timeout:
            raise InvalidSettleTimeout(
                'settle_timeout must be in range [{}, {}]'.format(
                    NETTINGCHANNEL_SETTLE_TIMEOUT_MIN,
                    NETTINGCHANNEL_SETTLE_TIMEOUT_MAX))

        if self.node_address == partner:
            raise SamePeerAddress(
                'The other peer must not have the same address as the client.')

        # Prevent concurrent attempts to open a channel with the same token and
        # partner address.
        if partner not in self.open_channel_transactions:
            new_open_channel_transaction = AsyncResult()
            self.open_channel_transactions[
                partner] = new_open_channel_transaction

            try:
                transaction_hash = self._new_netting_channel(
                    partner, settle_timeout)
            except Exception as e:
                new_open_channel_transaction.set_exception(e)
                raise
            else:
                new_open_channel_transaction.set(transaction_hash)
            finally:
                self.open_channel_transactions.pop(partner, None)
        else:
            # All other concurrent threads should block on the result of opening this channel
            transaction_hash = self.open_channel_transactions[partner].get()

        channel_created = self.channel_exists(partner)
        if channel_created is False:
            log.error('creating new channel failed',
                      peer1=pex(self.node_address),
                      peer2=pex(partner))
            raise RuntimeError('creating new channel failed')

        channel_identifier = self.detail_channel(partner)['channel_identifier']

        log.info(
            'new_netting_channel called',
            peer1=pex(self.node_address),
            peer2=pex(partner),
            channel_identifier=channel_identifier,
        )

        return channel_identifier
Beispiel #22
0
class TaskExecutor(object):
    def __init__(self, balancer, index):
        self.balancer = balancer
        self.index = index
        self.task = None
        self.proc = None
        self.pid = None
        self.conn = None
        self.state = WorkerState.STARTING
        self.key = str(uuid.uuid4())
        self.result = AsyncResult()
        self.exiting = False
        self.killed = False
        self.thread = gevent.spawn(self.executor)
        self.cv = Condition()
        self.status_lock = RLock()

    def checkin(self, conn):
        with self.cv:
            self.balancer.logger.debug(
                'Check-in of worker #{0} (key {1})'.format(
                    self.index, self.key))
            self.conn = conn
            self.state = WorkerState.IDLE
            self.cv.notify_all()

    def put_progress(self, progress):
        st = TaskStatus(None)
        st.__setstate__(progress)
        self.task.set_state(progress=st)

    def put_status(self, status):
        with self.cv:
            # Try to collect rusage at this point, when process is still alive
            try:
                kinfo = self.balancer.dispatcher.threaded(
                    bsd.kinfo_getproc, self.pid)
                self.task.rusage = kinfo.rusage
            except LookupError:
                pass

            if status['status'] == 'ROLLBACK':
                self.task.set_state(TaskState.ROLLBACK)

            if status['status'] == 'FINISHED':
                self.result.set(status['result'])

            if status['status'] == 'FAILED':
                error = status['error']

                if error['type'] in ERROR_TYPES:
                    cls = ERROR_TYPES[error['type']]
                    exc = cls(code=error['code'],
                              message=error['message'],
                              stacktrace=error['stacktrace'],
                              extra=error.get('extra'))
                else:
                    exc = OtherException(
                        code=error['code'],
                        message=error['message'],
                        stacktrace=error['stacktrace'],
                        type=error['type'],
                        extra=error.get('extra'),
                    )

                self.result.set_exception(exc)

    def put_warning(self, warning):
        self.task.add_warning(warning)

    def update_env(self, env):
        self.conn.call_sync('taskproxy.update_env', env)

    def run(self, task):
        def match_file(module, f):
            name, ext = os.path.splitext(f)
            return module == name and ext in ['.py', '.pyc', '.so']

        with self.cv:
            self.cv.wait_for(lambda: self.state == WorkerState.ASSIGNED)
            self.result = AsyncResult()
            self.task = task
            self.task.set_state(TaskState.EXECUTING)
            self.state = WorkerState.EXECUTING
            self.cv.notify_all()

        self.balancer.logger.debug('Actually starting task {0}'.format(
            task.id))

        filename = None
        module_name = inspect.getmodule(task.clazz).__name__
        for dir in self.balancer.dispatcher.plugin_dirs:
            found = False
            try:
                for root, _, files in os.walk(dir):
                    file = first_or_default(
                        lambda f: match_file(module_name, f), files)
                    if file:
                        filename = os.path.join(root, file)
                        found = True
                        break

                if found:
                    break
            except OSError:
                continue

        try:
            self.conn.call_sync(
                'taskproxy.run', {
                    'id': task.id,
                    'user': task.user,
                    'class': task.clazz.__name__,
                    'filename': filename,
                    'args': task.args,
                    'debugger': task.debugger,
                    'environment': task.environment,
                    'hooks': task.hooks,
                })
        except RpcException as e:
            self.balancer.logger.warning(
                'Cannot start task {0} on executor #{1}: {2}'.format(
                    task.id, self.index, str(e)))

            self.balancer.logger.warning(
                'Killing unresponsive task executor #{0} (pid {1})'.format(
                    self.index, self.proc.pid))

            self.terminate()

        try:
            self.result.get()
        except BaseException as e:
            if isinstance(e, OtherException):
                self.balancer.dispatcher.report_error(
                    'Task {0} raised invalid exception'.format(self.task.name),
                    e)

            if isinstance(e, TaskAbortException):
                self.task.set_state(TaskState.ABORTED,
                                    TaskStatus(0, 'aborted'))
            else:
                self.task.error = serialize_error(e)
                self.task.set_state(
                    TaskState.FAILED,
                    TaskStatus(0,
                               str(e),
                               extra={"stacktrace": traceback.format_exc()}))

            with self.cv:
                self.task.ended.set()

                if self.state == WorkerState.EXECUTING:
                    self.state = WorkerState.IDLE
                    self.cv.notify_all()

            self.balancer.task_exited(self.task)
            return

        with self.cv:
            self.task.result = self.result.value
            self.task.set_state(TaskState.FINISHED, TaskStatus(100, ''))
            self.task.ended.set()
            if self.state == WorkerState.EXECUTING:
                self.state = WorkerState.IDLE
                self.cv.notify_all()

        self.balancer.task_exited(self.task)

    def abort(self):
        self.balancer.logger.info("Trying to abort task #{0}".format(
            self.task.id))
        # Try to abort via RPC. If this fails, kill process
        try:
            # If task supports abort protocol we don't need to worry about subtasks - it's task
            # responsibility to kill them
            self.conn.call_sync('taskproxy.abort')
        except RpcException as err:
            self.balancer.logger.warning(
                "Failed to abort task #{0} gracefully: {1}".format(
                    self.task.id, str(err)))
            self.balancer.logger.warning("Killing process {0}".format(
                self.pid))
            self.killed = True
            self.terminate()

            # Now kill all the subtasks
            for subtask in filter(lambda t: t.parent is self.task,
                                  self.balancer.task_list):
                self.balancer.logger.warning(
                    "Aborting subtask {0} because parent task {1} died".format(
                        subtask.id, self.task.id))
                self.balancer.abort(subtask.id)

    def terminate(self):
        try:
            self.proc.terminate()
        except OSError:
            self.balancer.logger.warning(
                'Executor process with PID {0} already dead'.format(
                    self.proc.pid))

    def executor(self):
        while not self.exiting:
            try:
                self.proc = Popen([TASKWORKER_PATH, self.key,
                                   str(self.index)],
                                  close_fds=True,
                                  preexec_fn=os.setpgrp,
                                  stdout=subprocess.PIPE,
                                  stderr=subprocess.STDOUT)

                self.pid = self.proc.pid
                self.balancer.logger.debug(
                    'Started executor #{0} as PID {1}'.format(
                        self.index, self.pid))
            except OSError:
                self.result.set_exception(
                    TaskException(errno.EFAULT, 'Cannot spawn task executor'))
                self.balancer.logger.error(
                    'Cannot spawn task executor #{0}'.format(self.index))
                return

            for line in self.proc.stdout:
                line = line.decode('utf8')
                self.balancer.logger.debug('Executor #{0}: {1}'.format(
                    self.index, line.strip()))
                if self.task:
                    self.task.output += line

            self.proc.wait()

            with self.cv:
                self.state = WorkerState.STARTING
                self.cv.notify_all()

            if self.proc.returncode == -signal.SIGTERM:
                self.balancer.logger.info(
                    'Executor process with PID {0} was terminated gracefully'.
                    format(self.proc.pid))
            else:
                self.balancer.logger.error(
                    'Executor process with PID {0} died abruptly with exit code {1}'
                    .format(self.proc.pid, self.proc.returncode))

            if self.killed:
                self.result.set_exception(
                    TaskException(errno.EFAULT, 'Task killed'))
            else:
                self.result.set_exception(
                    TaskException(errno.EFAULT, 'Task executor died'))
            gevent.sleep(1)

    def die(self):
        self.exiting = True
        if self.proc:
            self.terminate()
Beispiel #23
0
class InputTable(scheme.Table):
    _table_name = 'input'
    _table_collection = input_tables

    id = scheme.Column('api')
    type = scheme.Column('api')
    parent = scheme.Column(
        'api',
        lambda self, value: value and [value._table_name, value.id] or None)
    timeout = scheme.Column(
        'api',
        lambda self, timeout: timeout and int(timeout.eta * 1000) or None)
    elements = scheme.Column('api')
    result = scheme.Column('api')
    close_aborts = scheme.Column('api')

    ignore_api = False

    def __init__(self,
                 type,
                 parent,
                 timeout,
                 elements,
                 close_aborts,
                 ignore_api=False):
        self.type = type
        self.parent = parent
        self.timeout = None
        self.elements = [isinstance(e, list) and e or [e] for e in elements]
        self.close_aborts = close_aborts
        self.ignore_api = ignore_api

        if parent:
            parent.input = self

        self._result = AsyncResult()
        self.reset_timeout(timeout)

    def set_result(self, value):
        if self._result.ready():
            #raise RuntimeError('result of input already set')
            return
        with scheme.transaction:
            self.result = value
            self.reset_timeout(None)
        self._result.set(value)
        event.fire("input:result", self)

    def set_error(self, value):
        if self._result.ready():
            #raise RuntimeError('result of input already set')
            return
        with scheme.transaction:
            self.result = str(value)
            self.reset_timeout(None)
        self._result.set_exception(value)
        event.fire("input:error", self)

    def reset_timeout(self, timeout):
        with scheme.transaction:
            if self.timeout:
                self.timeout.kill()
            if timeout:
                self.timeout = gevent.spawn_later(timeout, self._timed_out)
                self.timeout.eta = time.time() + timeout
            elif self.timeout:
                self.timeout = None

    def _timed_out(self):
        with scheme.transaction:
            self.timeout = None
            self.set_error(InputTimeout())
Beispiel #24
0
    def new_netting_channel(self, other_peer: Address,
                            settle_timeout: int) -> Address:
        """ Creates and deploys a new netting channel contract.

        Args:
            other_peer: The peer to open the channel with.
            settle_timeout: The settle timout to use for this channel.

        Returns:
            The address of the new netting channel.
        """
        if not is_binary_address(other_peer):
            raise ValueError('The other_peer must be a valid address')

        invalid_timeout = (settle_timeout < NETTINGCHANNEL_SETTLE_TIMEOUT_MIN
                           or
                           settle_timeout > NETTINGCHANNEL_SETTLE_TIMEOUT_MAX)
        if invalid_timeout:
            raise InvalidSettleTimeout(
                'settle_timeout must be in range [{}, {}]'.format(
                    NETTINGCHANNEL_SETTLE_TIMEOUT_MIN,
                    NETTINGCHANNEL_SETTLE_TIMEOUT_MAX,
                ))

        local_address = privatekey_to_address(self.client.privkey)
        if local_address == other_peer:
            raise SamePeerAddress(
                'The other peer must not have the same address as the client.')

        # Prevent concurrent attempts to open a channel with the same token and
        # partner address.
        if other_peer not in self.open_channel_transactions:
            new_open_channel_transaction = AsyncResult()
            self.open_channel_transactions[
                other_peer] = new_open_channel_transaction

            try:
                transaction_hash = self._new_netting_channel(
                    other_peer, settle_timeout)
            except Exception as e:
                new_open_channel_transaction.set_exception(e)
                raise
            else:
                new_open_channel_transaction.set(transaction_hash)
            finally:
                self.open_channel_transactions.pop(other_peer, None)
        else:
            # All other concurrent threads should block on the result of opening this channel
            self.open_channel_transactions[other_peer].get()

        netting_channel_results_encoded = self.proxy.contract.functions.getChannelWith(
            to_checksum_address(other_peer), ).call(
                {'from': to_checksum_address(self.client.sender)})

        # address is at index 0
        netting_channel_address_encoded = netting_channel_results_encoded

        if not netting_channel_address_encoded:
            log.error(
                'netting_channel_address failed',
                peer1=pex(local_address),
                peer2=pex(other_peer),
            )
            raise RuntimeError('netting_channel_address failed')

        netting_channel_address_bin = to_canonical_address(
            netting_channel_address_encoded)

        log.info(
            'new_netting_channel called',
            peer1=pex(local_address),
            peer2=pex(other_peer),
            netting_channel=pex(netting_channel_address_bin),
        )

        return netting_channel_address_bin
Beispiel #25
0
class AceClient(object):

    def __init__(self, acehost, aceAPIport, aceHTTPport, acehostslist, connect_timeout=5, result_timeout=10):
        # Receive buffer
        self._recvbuffer = None
        # Stream URL
        self._url = None
        # Ace stream socket
        self._socket = None
        # Result timeout
        self._resulttimeout = result_timeout
        # Shutting down flag
        self._shuttingDown = Event()
        # Product key
        self._product_key = None
        # Current STATUS
        self._status = None
        # Current STATE
        self._state = None
        # Current video position
        self._position = None
        # Available video position (loaded data)
        self._position_last = None
        # Buffered video pieces
        self._position_buf = None
        # Current AUTH
        self._auth = None
        self._gender = None
        self._age = None
        # Result (Created with AsyncResult() on call)
        self._result = AsyncResult()
        self._authevent = Event()
        # Result for getURL()
        self._urlresult = AsyncResult()
        # Result for GETCID()
        self._cidresult = AsyncResult()
        # Event for resuming from PAUSE
        self._resumeevent = Event()
        # Seekback seconds.
        self._seekback = AceConfig.videoseekback
        # Did we get START command again? For seekback.
        self._started_again = False

        self._idleSince = time.time()
        self._lock = threading.Condition(threading.Lock())
        self._streamReaderConnection = None
        self._streamReaderState = None
        self._streamReaderQueue = deque()
        self._engine_version_code = 0;

        # Logger
        logger = logging.getLogger('AceClientimport tracebacknt_init')

        # Try to connect AceStream engine
        try:
            self._socket = telnetlib.Telnet(acehost, aceAPIport, connect_timeout)
            AceConfig.acehost, AceConfig.aceAPIport, AceConfig.aceHTTPport = acehost, aceAPIport, aceHTTPport
            logger.debug("Successfully connected to AceStream on %s:%d" % (acehost, aceAPIport))
        except:
            for AceEngine in acehostslist:
               try:
                   self._socket = telnetlib.Telnet(AceEngine[0], AceEngine[1], connect_timeout)
                   AceConfig.acehost, AceConfig.aceAPIport, AceConfig.aceHTTPport = AceEngine[0], AceEngine[1], AceEngine[2]
                   logger.debug("Successfully connected to AceStream on %s:%d" % (AceEngine[0], AceEngine[1]))
                   break
               except:
                   logger.debug("The are no alive AceStream on %s:%d" % (AceEngine[0], AceEngine[1]))
                   pass

        # Spawning recvData greenlet
        gevent.spawn(self._recvData)
        gevent.sleep()

    def __del__(self):
        # Destructor just calls destroy() method
        self.destroy()

    def destroy(self):
        '''
        AceClient Destructor
        '''
        if self._shuttingDown.isSet():
        # Already in the middle of destroying
            return

        # Logger
        logger = logging.getLogger("AceClient_destroy")
        # We should resume video to prevent read greenlet deadlock
        self._resumeevent.set()
        # And to prevent getUrl deadlock
        self._urlresult.set()

        # Trying to disconnect
        try:
            logger.debug("Destroying client...")
            self._shuttingDown.set()
            self._write(AceMessage.request.SHUTDOWN)
        except:
            # Ignore exceptions on destroy
            pass
        finally:
            self._shuttingDown.set()

    def reset(self):
        self._started_again = False
        self._idleSince = time.time()
        self._streamReaderState = None

    def _write(self, message):
        try:
            logger = logging.getLogger("AceClient_write")
            logger.debug(message)
            self._socket.write(message + "\r\n")
        except EOFError as e:
            raise AceException("Write error! " + repr(e))

    def aceInit(self, gender=AceConst.SEX_MALE, age=AceConst.AGE_18_24, product_key=None, seekback=0):
        self._product_key = product_key
        self._gender = gender
        self._age = age
        # Seekback seconds
        self._seekback = seekback

        # Logger
        logger = logging.getLogger("AceClient_aceInit")

        # Sending HELLO
        self._write(AceMessage.request.HELLO)
        if not self._authevent.wait(self._resulttimeout):
            errmsg = "Authentication timeout. Wrong key?"
            logger.error(errmsg)
            raise AceException(errmsg)
            return

        if not self._auth:
            errmsg = "Authentication error. Wrong key?"
            logger.error(errmsg)
            raise AceException(errmsg)
            return

        logger.debug("aceInit ended")

    def _getResult(self):
        # Logger
        try:
            result = self._result.get(timeout=self._resulttimeout)
            if not result:
                raise AceException("Result not received")
        except gevent.Timeout:
            raise AceException("Timeout")

        return result

    def START(self, datatype, value):
        '''
        Start video method
        '''
        if self._engine_version_code >= 3010500 and AceConfig.vlcuse:
           stream_type = 'output_format=hls' + ' transcode_audio=' + str(AceConfig.transcode_audio) \
                                             + ' transcode_mp3=' + str(AceConfig.transcode_mp3) \
                                             + ' transcode_ac3=' + str(AceConfig.transcode_ac3) \
                                             + ' preferred_audio_language=' + AceConfig.preferred_audio_language
        else:
           stream_type = 'output_format=http'

        self._urlresult = AsyncResult()
        self._write(AceMessage.request.START(datatype.upper(), value, stream_type))
        self._getResult()

    def STOP(self):
        '''
        Stop video method
        '''
        if self._state is not None and self._state != '0':
            self._result = AsyncResult()
            self._write(AceMessage.request.STOP)
            self._getResult()

    def LOADASYNC(self, datatype, value):
        self._result = AsyncResult()
        self._write(AceMessage.request.LOADASYNC(datatype.upper(), random.randint(1, AceConfig.maxconns * 10000), value))
        return self._getResult()

    def GETCONTENTINFO(self, datatype, value):
        dict = {'torrent': 'url', 'infohash':'infohash', 'raw':'data', 'pid':'content_id'}
        self.paramsdict={dict[datatype]:value,'developer_id':'0','affiliate_id':'0','zone_id':'0'}
        contentinfo = self.LOADASYNC(datatype, self.paramsdict)
        return contentinfo

    def GETCID(self, datatype, url):
        contentinfo = self.GETCONTENTINFO(datatype, url)
        self._cidresult = AsyncResult()
        self._write(AceMessage.request.GETCID(contentinfo.get('checksum'), contentinfo.get('infohash'), 0, 0, 0))
        cid = self._cidresult.get(True, 5)
        return '' if not cid or cid == '' else cid[2:]

    def getUrl(self, timeout=30):
        # Logger
        logger = logging.getLogger("AceClient_getURL")

        try:
            res = self._urlresult.get(timeout=timeout)
            return res
        except gevent.Timeout:
            errmsg = "Engine response time exceeded. GetURL timeout!"
            logger.error(errmsg)
            raise AceException(errmsg)

    def startStreamReader(self, url, cid, counter, req_headers=None):
        logger = logging.getLogger("StreamReader")
        logger.debug("Opening video stream: %s" % url)
        logger.debug("Get headers from client: %s" % req_headers)
        self._streamReaderState = 1

        try:
           if url.endswith('.m3u8'):
              logger.debug("Can't stream HLS in non VLC mode: %s" % url)
              return None

           # Sending client hedaers to AceEngine
           if req_headers.has_key('range'):
               del req_headers['range']

           connection = self._streamReaderConnection = requests.get(url, headers=req_headers, stream=True)

           if connection.status_code not in (200, 206):
               logger.error("Failed to open video stream %s" % url)
               return None

           with self._lock:
               self._streamReaderState = 2
               self._lock.notifyAll()

           while True:
                 data = None
                 clients = counter.getClients(cid)

                 try:
                      data = connection.raw.read(AceConfig.readchunksize)
                 except:
                     break;

                 if data and clients:
                     with self._lock:
                         if len(self._streamReaderQueue) == AceConfig.readchunksize:
                             self._streamReaderQueue.popleft()
                         self._streamReaderQueue.append(data)

                     for c in clients:
                         try:
                             c.addChunk(data, 5.0)
                         except Queue.Full:
                             if len(clients) > 1:
                                 logger.debug("Disconnecting client: %s" % str(c))
                                 c.destroy()
                 elif not clients:
                     logger.debug("All clients disconnected - closing video stream")
                     break
                 else:
                     logger.warning("No data received")
                     break

        except requests.exceptions.ConnectionError:
            logger.error("Failed to open video stream")
            logger.error(traceback.format_exc())
        except:
            logger.error(traceback.format_exc())
            if counter.getClients(cid):
                logger.error("Failed to read video stream")
        finally:
            self.closeStreamReader()
            with self._lock:
                self._streamReaderState = 3
                self._lock.notifyAll()
            counter.deleteAll(cid)

    def closeStreamReader(self):
        logger = logging.getLogger("StreamReader")
        c = self._streamReaderConnection

        if c:
            self._streamReaderConnection = None
            c.close()
            logger.debug("Video stream closed")

        self._streamReaderQueue.clear()

    def getPlayEvent(self, timeout=None):
        '''
        Blocking while in PAUSE, non-blocking while in RESUME
        '''
        return self._resumeevent.wait(timeout=timeout)

    def pause(self):
        self._write(AceMessage.request.PAUSE)

    def play(self):
        self._write(AceMessage.request.PLAY)

    def _recvData(self):
        '''
        Data receiver method for greenlet
        '''
        logger = logging.getLogger('AceClient_recvdata')

        while True:
            gevent.sleep()
            try:
                self._recvbuffer = self._socket.read_until("\r\n")
                self._recvbuffer = self._recvbuffer.strip()
                logger.debug('<<< ' + self._recvbuffer)
            except:
                # If something happened during read, abandon reader.
                if not self._shuttingDown.isSet():
                    logger.error("Exception at socket read")
                    self._shuttingDown.set()
                return

            if self._recvbuffer:
                # Parsing everything only if the string is not empty
                if self._recvbuffer.startswith(AceMessage.response.HELLO):
                    # Parse HELLO
                    if 'version_code=' in self._recvbuffer:
                        v = self._recvbuffer.find('version_code=')
                        self._engine_version_code = int(self._recvbuffer[v+13:v+20])

                    if 'key=' in self._recvbuffer:
                        self._request_key_begin = self._recvbuffer.find('key=')
                        self._request_key = \
                            self._recvbuffer[self._request_key_begin + 4:self._request_key_begin + 14]
                        try:
                            self._write(AceMessage.request.READY_key(
                                self._request_key, self._product_key))
                        except requests.exceptions.ConnectionError as e:
                            logger.error("Can't connect to keygen server! " + \
                                repr(e))
                            self._auth = False
                            self._authevent.set()
                        self._request_key = None
                    else:
                        self._write(AceMessage.request.READY_nokey)

                elif self._recvbuffer.startswith(AceMessage.response.NOTREADY):
                    # NOTREADY
                    logger.error("Ace is not ready. Wrong auth?")
                    self._auth = False
                    self._authevent.set()

                elif self._recvbuffer.startswith(AceMessage.response.LOADRESP):
                    # LOADRESP
                    _contentinfo_raw = self._recvbuffer.split()[2:]
                    _contentinfo_raw = ' '.join(_contentinfo_raw)
                    _contentinfo = json.loads(_contentinfo_raw)
                    if _contentinfo.get('status') == 100:
                        logger.error("LOADASYNC returned error with message: %s"
                            % _contentinfo.get('message'))
                        self._result.set(False)
                    else:
                        logger.debug("Content info: %s", _contentinfo)
                        self._result.set(_contentinfo)

                elif self._recvbuffer.startswith(AceMessage.response.START):
                    # START
                    if not self._seekback or self._started_again or not self._recvbuffer.endswith(' stream=1'):
                        # If seekback is disabled, we use link in first START command.
                        # If seekback is enabled, we wait for first START command and
                        # ignore it, then do seekback in first EVENT position command
                        # AceStream sends us STOP and START again with new link.
                        # We use only second link then.
                        try:
                            self._url = self._recvbuffer.split()[1]
                            self._urlresult.set(self._url)
                            self._resumeevent.set()
                        except IndexError as e:
                            self._url = None
                    else:
                        logger.debug("START received. Waiting for %s." % AceMessage.response.LIVEPOS)
                # STOP
                elif self._recvbuffer.startswith(AceMessage.response.STOP):
                    pass
                # SHUTDOWN
                elif self._recvbuffer.startswith(AceMessage.response.SHUTDOWN):
                    logger.debug("Got SHUTDOWN from engine")
                    self._socket.close()
                    return
                # AUTH
                elif self._recvbuffer.startswith(AceMessage.response.AUTH):
                    try:
                        self._auth = self._recvbuffer.split()[1]
                        # Send USERDATA here
                        self._write(
                            AceMessage.request.USERDATA(self._gender, self._age))
                    except:
                        pass
                    self._authevent.set()
                # GETUSERDATA
                elif self._recvbuffer.startswith(AceMessage.response.GETUSERDATA):
                    raise AceException("You should init me first!")
                # LIVEPOS
                elif self._recvbuffer.startswith(AceMessage.response.LIVEPOS):
                    self._position = self._recvbuffer.split()
                    self._position_last = self._position[2].split('=')[1]
                    self._position_buf = self._position[9].split('=')[1]
                    self._position = self._position[4].split('=')[1]

                    if self._seekback and not self._started_again:
                        self._write(AceMessage.request.LIVESEEK(str(int(self._position_last) - self._seekback)))
                        logger.debug('Seeking back')
                        self._started_again = True
                # DOWNLOADSTOP
                elif self._recvbuffer.startswith(AceMessage.response.DOWNLOADSTOP):
                    self._state = self._recvbuffer.split()[1]
                # STATE
                elif self._recvbuffer.startswith(AceMessage.response.STATE):
                    self._state = self._recvbuffer.split()[1]
                # STATUS
                elif self._recvbuffer.startswith(AceMessage.response.STATUS):
                    self._tempstatus = self._recvbuffer.split()[1].split(';')[0]
                    if self._tempstatus != self._status:
                        self._status = self._tempstatus
                        logger.debug("STATUS changed to " + self._status)

                    if self._status == 'main:err':
                        logger.error(
                            self._status + ' with message ' + self._recvbuffer.split(';')[2])
                        self._result.set_exception(
                            AceException(self._status + ' with message ' + self._recvbuffer.split(';')[2]))
                        self._urlresult.set_exception(
                            AceException(self._status + ' with message ' + self._recvbuffer.split(';')[2]))
                    elif self._status == 'main:starting':
                        self._result.set(True)
                    elif self._status == 'main:idle':
                        self._result.set(True)
                # PAUSE
                elif self._recvbuffer.startswith(AceMessage.response.PAUSE):
                    logger.debug("PAUSE event")
                    self._resumeevent.clear()
                # RESUME
                elif self._recvbuffer.startswith(AceMessage.response.RESUME):
                    logger.debug("RESUME event")
                    gevent.sleep()    # PAUSE/RESUME delay
                    self._resumeevent.set()
                # CID
                elif self._recvbuffer.startswith('##') or len(self._recvbuffer) == 0:
                    self._cidresult.set(self._recvbuffer)
                    logger.debug("CID: %s" %self._recvbuffer)
Beispiel #26
0
class AceClient(object):

    def __init__(self, host, port, connect_timeout=5, result_timeout=10):
        # Receive buffer
        self._recvbuffer = None
        # Stream URL
        self._url = None
        # Ace stream socket
        self._socket = None
        # Result timeout
        self._resulttimeout = result_timeout
        # Shutting down flag
        self._shuttingDown = Event()
        # Product key
        self._product_key = None
        # Current STATUS
        self._status = None
        # Current STATE
        self._state = None
        # Current AUTH
        self._auth = None
        self._gender = None
        self._age = None
        # Result (Created with AsyncResult() on call)
        self._result = AsyncResult()
        self._authevent = Event()
        # Result for getURL()
        self._urlresult = AsyncResult()
        # Event for resuming from PAUSE
        self._resumeevent = Event()

        # Logger
        logger = logging.getLogger('AceClient_init')

        try:
            self._socket = telnetlib.Telnet(host, port, connect_timeout)
            logger.info("Successfully connected with Ace!")
        except Exception as e:
            raise AceException(
                "Socket creation error! Ace is not running? " + repr(e))

        # Spawning recvData greenlet
        gevent.spawn(self._recvData)
        gevent.sleep()

    def __del__(self):
        # Destructor just calls destroy() method
        self.destroy()

    def destroy(self):
        '''
        AceClient Destructor
        '''
        if self._shuttingDown.isSet():
        # Already in the middle of destroying
            return

        # Logger
        logger = logging.getLogger("AceClient_destroy")
        # We should resume video to prevent read greenlet deadlock
        self._resumeevent.set()
        # And to prevent getUrl deadlock
        self._urlresult.set()

        # Trying to disconnect
        try:
            logger.debug("Destroying client...")
            self._shuttingDown.set()
            self._write(AceMessage.request.SHUTDOWN)
        except:
            # Ignore exceptions on destroy
            pass
        finally:
            self._shuttingDown.set()

    def _write(self, message):
        try:
            self._socket.write(message + "\r\n")
        except EOFError as e:
            raise AceException("Write error! " + repr(e))

    def aceInit(self, gender=AceConst.SEX_MALE, age=AceConst.AGE_18_24, product_key=None, pause_delay=0):
        self._product_key = product_key
        self._gender = gender
        self._age = age
        # PAUSE/RESUME delay
        self._pausedelay = pause_delay

        # Logger
        logger = logging.getLogger("AceClient_aceInit")

        # Sending HELLO
        self._write(AceMessage.request.HELLO)
        if not self._authevent.wait(self._resulttimeout):
            errmsg = "Authentication timeout. Wrong key?"
            logger.error(errmsg)
            raise AceException(errmsg)
            return

        if not self._auth:
            errmsg = "Authentication error. Wrong key?"
            logger.error(errmsg)
            raise AceException(errmsg)
            return

        logger.debug("aceInit ended")

    def _getResult(self):
        # Logger
        logger = logging.getLogger("AceClient_START")

        try:
            result = self._result.get(timeout=self._resulttimeout)
            if not result:
                errmsg = "START error!"
                logger.error(errmsg)
                raise AceException(errmsg)
        except gevent.Timeout:
            errmsg = "START timeout!"
            logger.error(errmsg)
            raise AceException(errmsg)

        return result

    def START(self, datatype, value):
        '''
        Start video method
        '''
        self._result = AsyncResult()
        self._urlresult = AsyncResult()

        self._write(AceMessage.request.LOADASYNC(datatype.upper(), 0, value))
        contentinfo = self._getResult()

        self._write(AceMessage.request.START(datatype.upper(), value))
        self._getResult()

        return contentinfo

    def getUrl(self, timeout=40):
        # Logger
        logger = logging.getLogger("AceClient_getURL")

        try:
            res = self._urlresult.get(timeout=timeout)
            return res
        except gevent.Timeout:
            errmsg = "getURL timeout!"
            logger.error(errmsg)
            raise AceException(errmsg)

    def getPlayEvent(self, timeout=None):
        '''
        Blocking while in PAUSE, non-blocking while in RESUME
        '''
        self._resumeevent.wait(timeout=timeout)
        return

    def _recvData(self):
        '''
        Data receiver method for greenlet
        '''
        logger = logging.getLogger('AceClient_recvdata')

        while True:
            gevent.sleep()
            try:
                self._recvbuffer = self._socket.read_until("\r\n")
                self._recvbuffer = self._recvbuffer.strip()
            except:
                # If something happened during read, abandon reader.
                if not self._shuttingDown.isSet():
                    logger.error("Exception at socket read")
                    self._shuttingDown.set()
                return

            if self._recvbuffer:
                # Parsing everything only if the string is not empty
                if self._recvbuffer.startswith(AceMessage.response.HELLO):
                    # Parse HELLO
                    if 'key=' in self._recvbuffer:
                        self._request_key_begin = self._recvbuffer.find('key=')
                        self._request_key = \
                            self._recvbuffer[self._request_key_begin+4:self._request_key_begin+14]
                        try:
                            self._write(AceMessage.request.READY_key(
                                self._request_key, self._product_key))
                        except urllib2.URLError as e:
                            logger.error("Can't connect to keygen server! " + \
                                repr(e))
                            self._auth = False
                            self._authevent.set()
                        self._request_key = None
                    else:
                        self._write(AceMessage.request.READY_nokey)

                elif self._recvbuffer.startswith(AceMessage.response.NOTREADY):
                    # NOTREADY
                    logger.error("Ace is not ready. Wrong auth?")
                    self._auth = False
                    self._authevent.set()

                elif self._recvbuffer.startswith(AceMessage.response.LOADRESP):
                    # LOADRESP
                    _contentinfo_raw = self._recvbuffer.split()[2:]
                    _contentinfo_raw = ' '.join(_contentinfo_raw)
                    _contentinfo = json.loads(_contentinfo_raw)
                    if _contentinfo.get('status') == 100:
                        logger.error("LOADASYNC returned error with message: %s"
                            % _contentinfo.get('message'))
                        self._result.set(False)
                    else:
                        logger.debug("Content info: %s", _contentinfo)
                        _filename = urllib2.unquote(_contentinfo.get('files')[0][0])
                        self._result.set(_filename)

                elif self._recvbuffer.startswith(AceMessage.response.START):
                    # START
                    try:
                        self._url = self._recvbuffer.split()[1]
                        self._urlresult.set(self._url)
                        self._resumeevent.set()
                    except IndexError as e:
                        self._url = None

                elif self._recvbuffer.startswith(AceMessage.response.STOP):
                    pass

                elif self._recvbuffer.startswith(AceMessage.response.SHUTDOWN):
                    logger.debug("Got SHUTDOWN from engine")
                    self._socket.close()
                    return

                elif self._recvbuffer.startswith(AceMessage.response.AUTH):
                    try:
                        self._auth = self._recvbuffer.split()[1]
                        # Send USERDATA here
                        self._write(
                            AceMessage.request.USERDATA(self._gender, self._age))
                    except:
                        pass
                    self._authevent.set()

                elif self._recvbuffer.startswith(AceMessage.response.GETUSERDATA):
                    raise AceException("You should init me first!")

                elif self._recvbuffer.startswith(AceMessage.response.STATE):
                    self._state = self._recvbuffer.split()[1]

                elif self._recvbuffer.startswith(AceMessage.response.STATUS):
                    self._tempstatus = self._recvbuffer.split()[1].split(';')[0]
                    if self._tempstatus != self._status:
                        self._status = self._tempstatus
                        logger.debug("STATUS changed to " + self._status)

                    if self._status == 'main:err':
                        logger.error(
                            self._status + ' with message ' + self._recvbuffer.split(';')[2])
                        self._result.set_exception(
                            AceException(self._status + ' with message ' + self._recvbuffer.split(';')[2]))
                        self._urlresult.set_exception(
                            AceException(self._status + ' with message ' + self._recvbuffer.split(';')[2]))
                    elif self._status == 'main:starting':
                        self._result.set(True)

                elif self._recvbuffer.startswith(AceMessage.response.PAUSE):
                    logger.debug("PAUSE event")
                    self._resumeevent.clear()

                elif self._recvbuffer.startswith(AceMessage.response.RESUME):
                    logger.debug("RESUME event")
                    gevent.sleep(self._pausedelay)
                    self._resumeevent.set()
Beispiel #27
0
class StreamingExtract(object):
    def __init__(self, id, hddsem, threadpool):
        self.id = id
        self.hddsem = hddsem
        self.threadpool = threadpool

        self.password = None

        self.killed = False
        self.parts = dict()
        self.first = None
        self.current = None
        self.next = None
        self.next_part_event = AsyncResult()
        self.rar = None
        self.library = None
        self._library_added = set()
        self._deleted_library = None
        extractors[id] = self

    def feed_part(self, path, file):
        path.finished = AsyncResult()
        self.parts[path.path] = path, file
        log.debug('fed new part {}: {}'.format(path, path))

        if file.state != 'rarextract':
            with transaction:
                file.state = 'rarextract'

        if self.first is None:
            self.first = self.current = path, file
            self.add_library_files()
            self.run(path, file)
        else:
            if path.path == self.next:
                self.next_part_event.set(path)
            path.finished.get()

    def run(self, path, file):
        try:
            self.first = self.current
            with transaction:
                file.greenlet = gevent.getcurrent()
                file.on_greenlet_started()
            try:
                result = self.bruteforce(path, file)
            except rarfile.NeedFirstVolume:
                self.next = os.path.join(path.dir, "{}.part{}.rar".format(path.basename, "1".zfill(len(path.part))))
                self.find_next()
                if core.config.delete_extracted_archives:
                    return False
                return
            
            if result and result is not True:
                raise result

            if self.password:
                rarpw = "-p"+self.password
            else:
                rarpw = "-p-"

            cmd = [rarfile.UNRAR_TOOL, "x", "-y", rarpw, "-idq", "-vp", path, file.get_extract_path() + os.sep]
            file.log.info("starting extraction of {} with params {}".format(path[1:], cmd))
            self.rar = rarfile.custom_popen(cmd)

            self.wait_data()
            if not path.finished.ready():
                path.finished.set()
            if core.config.delete_extracted_archives:
                return False
        except BaseException as e:
            traceback.print_exc()
            self.kill(e)
            raise

    def bruteforce(self, path, file):
        rar = rarfile.RarFile(path, ignore_next_part_missing=True)
        if rar.not_first_volume:
            raise rarfile.NeedFirstVolume("First Volume for extraction")

        if not rar.needs_password():
            self.password = None
            return
        passwords = []
        for i in itertools.chain(file.package.extract_passwords, core.config.bruteforce_passwords):
            if not i in passwords:
                passwords.append(i)
        if rar.needs_password() and rar.infolist():
            pw = bruteforce_by_content(rar, passwords)
            if not pw:
                print "could not find password, asking user"
                for pw in file.solve_password(
                        message="Rarfile {} password cannot be cracked. Enter correct password: #".format(path.name),
                        retries=5):
                    pw = bruteforce_by_content(rar, [pw])
                    if pw:
                        break
                else:
                    return self.kill('extract password not entered')
            else:
                print "Found password by content:", pw
            self.password = pw
            return
        print "testing", passwords
        if not self.threadpool.apply(bruteforce, (rar, passwords, self.hddsem, file.log)):
            # ask user for password
            for pw in file.solve_password(
                    message="Enter the extract password for file: {} #".format(path.name),
                    retries=5):
                if self.threadpool.apply(bruteforce, (rar, [pw], self.hddsem, file.log)):
                    break
            else:
                return self.kill('extract password not entered')

        self.password = rar._password
        if self.password and self.password not in core.config.bruteforce_passwords:
            with transaction:
                core.config.bruteforce_passwords.append(self.password)

    def wait_data(self):
        bytes = ''
        while True:
            data = self.rar.stdout.read(1)
            if not data:
                break

            bytes += data
            for i in bytes.splitlines():
                if i:
                    result = self.new_data(i)
                    if result is True:
                        bytes = ''
                    if result and result is not True:
                        raise result
        self.close()

    def finish_file(self, path, file):
        if file is not None:
            with core.transaction:
                #if not 'rarextract' in file.completed_plugins:
                #    file.completed_plugins.append('rarextract')
                #file.greenlet = None
                #file.on_greenlet_finish()
                #file.on_greenlet_stopped()
                file.state = 'rarextract_complete'
                file.init_progress(1)
                file.set_progress(1)
                #file.stop()
        #path.finished.set()
        event.fire('rarextract:part_complete', path, file)
    
    def new_data(self, data):
        """called when new data or new line"""
        if "packed data CRC failed in volume" in data:
            return self.kill('checksum error in rar archive')

        if data.startswith("CRC failed in the encrypted file"):  # corrupt file or download not complete
            return self.kill('checksum error in rar archive. wrong password?')

        if "bad archive" in data.lower():
            return self.kill('Bad archive')

        m = re.search(r"Insert disk with (.*?((\.part\d+)?\.r..)) \[C\]ontinue\, \[Q\]uit", data)
        if not m:
            return

        if self.current is not None:
            self.finish_file(*self.current)

        self.next = self.first[0].basename + m.group(2)
        print "setting self.next", self.next
        return self.find_next()

    def find_next(self):
        print "finding next", self.next
        next = self.next
        if next not in self.parts:
            # check if file is in core.files()
            found = False
            name = os.path.basename(next)
            for f in core.files():
                if f.name == name and f.get_complete_file() == next:
                    if not f.working and 'download' in f.completed_plugins:
                        found = True
                        current = fileplugin.FilePath(next), f
                        current[0].finished = AsyncResult()
                        self.parts[next] = current
                        print('got next part from idle {}: {}'.format(next, self.current[0]))
                        break
                    if f.state == "download":
                        found = True
                        break
                    print "found path but not valid", f.state, f.working

            if not found:
                # file is not in system, check if it exists on hdd
                if os.path.exists(next):
                    current = fileplugin.FilePath(next), self.first[1]
                    current[0].finished = AsyncResult()
                    self.parts[next] = current
                    print('got next part from hdd {}: {}'.format(next, self.current[0]))
                else:
                    # part does not exists. fail this extract
                    return self.kill('missing part {}'.format(next))

            if next not in self.parts:
                print('waiting for part {}'.format(next))
                event.fire('rarextract:waiting_for_part', next)

                @event.register("file:last_error")
                def killit(e, f):
                    if not f.name == name and f.get_complete_file() == next:
                        return
                    if all(f.last_error for f in core.files() if f.name == name and f.get_complete_file() == next):
                        event.remove("file:last_error", killit)
                        self.kill('all of the next parts are broken.')
                
                while next not in self.parts:
                    self.next_part_event.get()
                    self.next_part_event = AsyncResult()

                log.debug('got next part from wait {}: {}'.format(next, self.current[0]))

        self.current = self.parts[next]
        self.add_library_files()
        return self.go_on()

    def add_library_files(self):
        """Add extracted files into the library"""
        path = fileplugin.FilePath(self.current[0])
        f = self.first[1]

        with transaction:
            if not self.library:
                print "Creating package for", path.basename
                name = "{} {}".format("Extracted files from", os.path.basename(path.basename))
                for p in core.packages():
                    if p.name == name:
                        self.library = p
                        self._library_added = set(f.name for f in p.files)
                        print "\treused package", p.id
                        print "package", p.id, p.tab
                if not self.library:
                    self.library = f.package.clone_empty(
                        name=name,
                        tab="complete",
                        state="download_complete",
                    )

                @event.register("package:deleted")
                @event.register("file:deleted")
                def _deleted_library(e, package):
                    import traceback
                    print traceback.print_stack()
                    print "---------", e
                    if e.startswith("file:"):
                        package = package.package

                    if package.id == self.library.id:
                        event.remove("package:deleted", _deleted_library)
                        event.remove("file:deleted", _deleted_library)
                        for f in self.library.files:
                            f.delete_local_files()
                        self.kill("Extracted files have been deleted.", False)

                self._deleted_library = _deleted_library

            rar = rarfile.RarFile(path, ignore_next_part_missing=True)
            print "password is", self.password
            try:
                if not rar.infolist():
                    rar.setpassword(self.password)
            except rarfile.BadRarFile:
                if not rar.infolist():
                    self.library.delete()
                    return
            links = []
            for item in rar.infolist():
                name = item.filename
                print "From new infolist:", name
                if name in self._library_added:
                    print "\t already added"
                    continue
                elif item.isdir():
                    print "\t is dir"
                    continue
                else:
                    self._library_added.add(name)
                print "creating file for", repr(name), self.library
                
                links.append(dict(
                    name=name,
                    size=item.file_size,
                    url=u'file://' + os.path.join(
                        f.get_extract_path().decode(sys.getfilesystemencoding()),
                        name),
                ))
        if links:
            core.add_links(links, package_id=self.library.id)

    def go_on(self):
        if self.rar is None:
            return self.run(*self.current)
        if not os.path.exists(self.next):
            return
        self.rar.stdin.write("C\n")
        self.rar.stdin.flush()

        if self.current[1] is not None:
            with core.transaction:
                self.current[1].greenlet = gevent.getcurrent()
                self.current[1].greenlet.link(self.current[0].finished)
                self.current[1].on_greenlet_started()
            self.current[1].log.info("extract go on: {}".format(self.current[1].name))
        return True
        
    def kill(self, exc="", _del_lib=True):
        if self.killed:
            return exc
        self.killed = True

        blacklist.add(self.first[0].basename)  # no autoextract for failed archives
        if _del_lib:
            self.library.delete()
        print "killing rarextract", self.first[0].basename, exc
        if isinstance(exc, basestring):
            exc = ValueError(exc)

        self.current = None

        if self.rar is not None:
            self.rar.terminate()
            self.rar = None

        try:
            del extractors[self.id]
        except KeyError:
            pass
        
        self.next_part_event.set_exception(exc)
        for path, file in self.parts.values():
            if not path.finished.ready():
                path.finished.set_exception(exc)

        with transaction:
            for path, file in self.parts.values():
                if file is not None:
                    file.stop()
                    if file.state == 'rarextract_complete':
                        file.state = 'rarextract'
                        file.enabled = False
                    if 'rarextract' in file.completed_plugins:
                        file.completed_plugins.remove('rarextract')

        self.first[1].fatal('rarextract: {}'.format(exc))

        return exc

    def close(self):
        """called when process is closed"""
        if not self.library:
            self.add_library_files()
        try:
            del extractors[self.id]
        except KeyError:
            pass

        if self._deleted_library:
            event.remove("package:deleted", self._deleted_library)

        if not self.killed:
            if self.current is not None:
                self.finish_file(*self.current)

            if core.config.delete_extracted_archives:
                with transaction:
                    for path, file in self.parts.values():
                        if file:
                            file.delete_local_files()
                            file.fatal('extracted and deleted', type='info', abort_greenlet=False)
                        else:
                            os.remove(path)
            else:
                for path, file in self.parts.values():
                    if file:
                        file.log.info('extract complete')
Beispiel #28
0
class AceClient(object):

    def __init__(self, host, port, connect_timeout=5, result_timeout=10):
        # Receive buffer
        self._recvbuffer = None
        # Stream URL
        self._url = None
        # Ace stream socket
        self._socket = None
        # Result timeout
        self._resulttimeout = result_timeout
        # Shutting down flag
        self._shuttingDown = Event()
        # Product key
        self._product_key = None
        # Current STATUS
        self._status = None
        # Current STATE
        self._state = None
        # Current video position
        self._position = None
        # Available video position (loaded data)
        self._position_last = None
        # Buffered video pieces
        self._position_buf = None
        # Current AUTH
        self._auth = None
        self._gender = None
        self._age = None
        # Result (Created with AsyncResult() on call)
        self._result = AsyncResult()
        self._authevent = Event()
        # Result for getURL()
        self._urlresult = AsyncResult()
        # Event for resuming from PAUSE
        self._resumeevent = Event()
        # Seekback seconds.
        self._seekback = 0
        # Did we get START command again? For seekback.
        self._started_again = False

        # Logger
        logger = logging.getLogger('AceClient_init')

        try:
            self._socket = telnetlib.Telnet(host, port, connect_timeout)
            logger.info("Successfully connected with Ace!")
        except Exception as e:
            raise AceException(
                "Socket creation error! Ace is not running? " + repr(e))

        # Spawning recvData greenlet
        gevent.spawn(self._recvData)
        gevent.sleep()

    def __del__(self):
        # Destructor just calls destroy() method
        self.destroy()

    def destroy(self):
        '''
        AceClient Destructor
        '''
        if self._shuttingDown.isSet():
        # Already in the middle of destroying
            return

        # Logger
        logger = logging.getLogger("AceClient_destroy")
        # We should resume video to prevent read greenlet deadlock
        self._resumeevent.set()
        # And to prevent getUrl deadlock
        self._urlresult.set()

        # Trying to disconnect
        try:
            logger.debug("Destroying client...")
            self._shuttingDown.set()
            self._write(AceMessage.request.SHUTDOWN)
        except:
            # Ignore exceptions on destroy
            pass
        finally:
            self._shuttingDown.set()

    def _write(self, message):
        try:
            self._socket.write(message + "\r\n")
        except EOFError as e:
            raise AceException("Write error! " + repr(e))

    def aceInit(self, gender=AceConst.SEX_MALE, age=AceConst.AGE_18_24, product_key=None, pause_delay=0, seekback=0):
        self._product_key = product_key
        self._gender = gender
        self._age = age
        # PAUSE/RESUME delay
        self._pausedelay = pause_delay
        # Seekback seconds
        self._seekback = seekback

        # Logger
        logger = logging.getLogger("AceClient_aceInit")

        # Sending HELLO
        self._write(AceMessage.request.HELLO)
        if not self._authevent.wait(self._resulttimeout):
            errmsg = "Authentication timeout. Wrong key?"
            logger.error(errmsg)
            raise AceException(errmsg)
            return

        if not self._auth:
            errmsg = "Authentication error. Wrong key?"
            logger.error(errmsg)
            raise AceException(errmsg)
            return

        logger.debug("aceInit ended")

    def _getResult(self):
        # Logger
        logger = logging.getLogger("AceClient_START")

        try:
            result = self._result.get(timeout=self._resulttimeout)
            if not result:
                errmsg = "START error!"
                logger.error(errmsg)
                raise AceException(errmsg)
        except gevent.Timeout:
            errmsg = "START timeout!"
            logger.error(errmsg)
            raise AceException(errmsg)

        return result

    def START(self, datatype, value):
        '''
        Start video method
        '''
        self._result = AsyncResult()
        self._urlresult = AsyncResult()

        self._write(AceMessage.request.LOADASYNC(datatype.upper(), 0, value))
        contentinfo = self._getResult()

        self._write(AceMessage.request.START(datatype.upper(), value))
        self._getResult()

        return contentinfo

    def getUrl(self, timeout=40):
        # Logger
        logger = logging.getLogger("AceClient_getURL")

        try:
            res = self._urlresult.get(timeout=timeout)
            return res
        except gevent.Timeout:
            errmsg = "getURL timeout!"
            logger.error(errmsg)
            raise AceException(errmsg)

    def getPlayEvent(self, timeout=None):
        '''
        Blocking while in PAUSE, non-blocking while in RESUME
        '''
        return self._resumeevent.wait(timeout=timeout)

    def pause(self):
        self._write(AceMessage.request.PAUSE)

    def play(self):
        self._write(AceMessage.request.PLAY)

    def _recvData(self):
        '''
        Data receiver method for greenlet
        '''
        logger = logging.getLogger('AceClient_recvdata')

        while True:
            gevent.sleep()
            try:
                self._recvbuffer = self._socket.read_until("\r\n")
                self._recvbuffer = self._recvbuffer.strip()
                #logger.debug('<<< ' + self._recvbuffer)
            except:
                # If something happened during read, abandon reader.
                if not self._shuttingDown.isSet():
                    logger.error("Exception at socket read")
                    self._shuttingDown.set()
                return

            if self._recvbuffer:
                # Parsing everything only if the string is not empty
                if self._recvbuffer.startswith(AceMessage.response.HELLO):
                    # Parse HELLO
                    if 'key=' in self._recvbuffer:
                        self._request_key_begin = self._recvbuffer.find('key=')
                        self._request_key = \
                            self._recvbuffer[self._request_key_begin+4:self._request_key_begin+14]
                        try:
                            self._write(AceMessage.request.READY_key(
                                self._request_key, self._product_key))
                        except:
                            self._auth = False
                            self._authevent.set()
                        self._request_key = None
                    else:
                        self._write(AceMessage.request.READY_nokey)

                elif self._recvbuffer.startswith(AceMessage.response.NOTREADY):
                    # NOTREADY
                    logger.error("Ace is not ready. Wrong auth?")
                    self._auth = False
                    self._authevent.set()

                elif self._recvbuffer.startswith(AceMessage.response.LOADRESP):
                    # LOADRESP
                    _contentinfo_raw = self._recvbuffer.split()[2:]
                    _contentinfo_raw = ' '.join(_contentinfo_raw)
                    _contentinfo = json.loads(_contentinfo_raw)
                    if _contentinfo.get('status') == 100:
                        logger.error("LOADASYNC returned error with message: %s"
                            % _contentinfo.get('message'))
                        self._result.set(False)
                    else:
                        logger.debug("Content info: %s", _contentinfo)
                        _filename = urllib2.unquote(_contentinfo.get('files')[0][0])
                        self._result.set(_filename)

                elif self._recvbuffer.startswith(AceMessage.response.START):
                    # START
                    if not self._seekback or (self._seekback and self._started_again):
                        # If seekback is disabled, we use link in first START command.
                        # If seekback is enabled, we wait for first START command and
                        # ignore it, then do seeback in first EVENT position command
                        # AceStream sends us STOP and START again with new link.
                        # We use only second link then.
                        try:
                            self._url = self._recvbuffer.split()[1]
                            self._urlresult.set(self._url)
                            self._resumeevent.set()
                        except IndexError as e:
                            self._url = None

                elif self._recvbuffer.startswith(AceMessage.response.STOP):
                    pass

                elif self._recvbuffer.startswith(AceMessage.response.SHUTDOWN):
                    logger.debug("Got SHUTDOWN from engine")
                    self._socket.close()
                    return

                elif self._recvbuffer.startswith(AceMessage.response.AUTH):
                    try:
                        self._auth = self._recvbuffer.split()[1]
                        # Send USERDATA here
                        self._write(
                            AceMessage.request.USERDATA(self._gender, self._age))
                    except:
                        pass
                    self._authevent.set()

                elif self._recvbuffer.startswith(AceMessage.response.GETUSERDATA):
                    raise AceException("You should init me first!")

                elif self._recvbuffer.startswith(AceMessage.response.LIVEPOS):
                    self._position = self._recvbuffer.split()
                    self._position_last = self._position[2].split('=')[1]
                    self._position_buf = self._position[9].split('=')[1]
                    self._position = self._position[4].split('=')[1]
                    # logger.debug('Current position/last/buf: %s/%s/%s' % (self._position, self._position_last, self._position_buf))
                    if self._seekback and not self._started_again:
                        self._write(AceMessage.request.SEEK(str(int(self._position_last) - \
                            self._seekback)))
                        logger.debug('Seeking back')
                        self._started_again = True

                elif self._recvbuffer.startswith(AceMessage.response.STATE):
                    self._state = self._recvbuffer.split()[1]

                elif self._recvbuffer.startswith(AceMessage.response.STATUS):
                    self._tempstatus = self._recvbuffer.split()[1].split(';')[0]
                    if self._tempstatus != self._status:
                        self._status = self._tempstatus
                        logger.debug("STATUS changed to {0} data {1}".format(self._status, repr(self._recvbuffer)))

                    if self._status == 'main:dl':
                        logger.debug("progress - speed {3}kb/s peers {6}".format(*self._recvbuffer.split(';')))
                    elif self._status == 'main:prebuf' or self._status == 'main:buf':
                        logger.debug("progress {1}% speed {5}kb/s peers {8}".format(*self._recvbuffer.split(';')))
                    elif self._status == 'main:err':
                        logger.error(
                            self._status + ' with message ' + self._recvbuffer.split(';')[2])
                        self._result.set_exception(
                            AceException(self._status + ' with message ' + self._recvbuffer.split(';')[2]))
                        self._urlresult.set_exception(
                            AceException(self._status + ' with message ' + self._recvbuffer.split(';')[2]))
                    elif self._status == 'main:starting':
                        self._result.set(True)

                elif self._recvbuffer.startswith(AceMessage.response.PAUSE):
                    logger.debug("PAUSE event")
                    self._resumeevent.clear()

                elif self._recvbuffer.startswith(AceMessage.response.RESUME):
                    logger.debug("RESUME event")
                    gevent.sleep(self._pausedelay)
                    self._resumeevent.set()
Beispiel #29
0
class ChildrenMonitor(object):
    """Simple monitor that monitors the children of a node and their
    content.
    """
    _STOP_REQUEST = object()

    def __init__(self, client, path, into, factory, args, listener):
        self.client = client
        self.path = path
        self.into = into if into is not None else {}
        self.factory = factory if factory is not None else str
        self.args = args
        self.listener = listener or MonitorListener()
        self.started = AsyncResult()
        self.queue = Queue()
        self.stats = {}
        self._delay = 1.343
        self.max_delay = 180

    def _monitor(self):
        """Run the monitoring loop."""
        def watcher(event):
            self.queue.put(event)

        while True:
            try:
                children = self.client.get_children(self.path, watcher)
            except zookeeper.NoNodeException:
                if not self.started.ready():
                    self.started.set(None)
                gevent.sleep(1)
                continue
            except (zookeeper.ConnectionLossException,
                    zookeeper.SessionExpiredException,
                    zookeeper.InvalidStateException) as err:
                if not self.started.ready():
                    self.started.set_exception(err)
                    break
                logging.error("got %r while monitoring %s", str(err),
                              self.path)
                gevent.sleep(self._delay)
                self._delay += self._delay * random.random()
                self._delay = min(self._delay, self.max_delay)
                continue
            except Exception, err:
                if not self.started.ready():
                    self.started.set_exception(err)
                    break
                raise

            for child in children:
                if not child in self.stats:
                    try:
                        data, stat = self.client.get(os.path.join(self.path,
                                                                  child))
                    except zookeeper.NoNodeException:
                        print "race condition while getting", os.path.join(
                            self.path, child)
                    else:
                        self.into[child] = self.factory(data, *self.args)
                        self.listener.created(child, self.into[child])
                        self.stats[child] = stat
                else:
                    try:
                        data, stat = self.client.get(os.path.join(self.path,
                                                                  child))
                    except zookeeper.NoNodeException:
                        print "race condition while getting", os.path.join(
                            self.path, child)
                        # should we remove it here?
                    else:
                        if stat['version'] != self.stats[child]['version']:
                            self.into[child] = self.factory(data, *self.args)
                            self.listener.modified(child, self.into[child])
                        self.stats[child] = stat
            for child in self.into.keys():
                if child not in children:
                    del self.into[child]
                    del self.stats[child]
                    self.listener.deleted(child)

            if not self.started.ready():
                self.started.set(None)

            self.listener.commit()

            event = self.queue.get()
            if event is self._STOP_REQUEST:
                break
Beispiel #30
0
class AceClient(object):

    def __init__(self, ace, connect_timeout=5, result_timeout=10):
        # Telnet response buffer
        self._recvbuffer = None
        # AceEngine socket
        self._socket = None
        # AceEngine read result timeout
        self._resulttimeout = float(result_timeout)
        # Shutting down flag
        self._shuttingDown = Event()
        # AceEngine product key
        self._product_key = None
        # Result (Created with AsyncResult() on call)
        self._result = AsyncResult()
        # Result for START URL
        self._urlresult = AsyncResult()
        # URL response time from AceEngine
        self._videotimeout = None
        # Result for CID
        self._cidresult = AsyncResult()
        # Current STATUS
        self._status = AsyncResult()
        # Current EVENT
        self._event = AsyncResult()
        # Current STATE
        self._state = AsyncResult()
        # Current AUTH
        self._gender = None
        self._age = None
        # Seekback seconds.
        self._seekback = None
        # Did we get START command again? For seekback.
        self._started_again = Event()
        # AceEngine Streamreader ring buffer with max number of chunks in queue
        self._streamReaderQueue = gevent.queue.Queue(maxsize=1000)

        # Logger
        logger = logging.getLogger('AceClient')
        # Try to connect AceStream engine
        try:
           self._socket = Telnet(ace['aceHostIP'], ace['aceAPIport'], connect_timeout)
           logger.debug('Successfully connected to AceStream on %s:%s' % (ace['aceHostIP'], ace['aceAPIport']))
        except:
           errmsg = 'The are no alive AceStream Engines found!'
           raise AceException(errmsg)
        else:
           # Spawning telnet data reader greenlet
           gevent.spawn(self._recvData)

    def destroy(self):
        '''
        AceClient Destructor
        '''
        logger = logging.getLogger('AceClient_destroy') # Logger
        if self._shuttingDown.ready(): return   # Already in the middle of destroying
        self._result.set()
        # Trying to disconnect
        try:
            logger.debug('Destroying AceStream client.....')
            self._shuttingDown.set()
            self._write(AceMessage.request.SHUTDOWN)
        except: pass # Ignore exceptions on destroy
        finally: self._shuttingDown.set()

    def reset(self):
        self._started_again.clear()
        self._result.set()
        self._urlresult.set()

    def _write(self, message):
        try:
            logger = logging.getLogger('AceClient_write')
            logger.debug('>>> %s' % message)
            self._socket.write('%s\r\n' % message)
        except EOFError as e: raise AceException('Write error! %s' % repr(e))

    def aceInit(self, gender=AceConst.SEX_MALE, age=AceConst.AGE_25_34, product_key=None, videoseekback=0, videotimeout=0):
        self._gender = gender
        self._age = age
        self._product_key = product_key
        self._seekback = videoseekback
        self._videotimeout = float(videotimeout)
        self._started_again.clear()

        logger = logging.getLogger('AceClient_aceInit')

        self._result = AsyncResult()
        self._write(AceMessage.request.HELLO) # Sending HELLOBG
        try: params = self._result.get(timeout=self._resulttimeout)
        except gevent.Timeout:
            errmsg = 'Engine response time %ssec exceeded. HELLOTS not resived!' % self._resulttimeout
            raise AceException(errmsg)
            return

        self._result = AsyncResult()
        self._write(AceMessage.request.READY(params.get('key',''), self._product_key))
        try:
            if self._result.get(timeout=self._resulttimeout) == 'NOTREADY': # Get NOTREADY instead AUTH user_auth_level
               errmsg = 'NOTREADY recived from AceEngine! Wrong acekey?'
               raise AceException(errmsg)
               return
        except gevent.Timeout:
            errmsg = 'Engine response time %ssec exceeded. AUTH not resived!' % self._resulttimeout
            raise AceException(errmsg)

        if int(params.get('version_code', 0)) >= 3003600: # Display download_stopped massage
            params_dict = {'use_stop_notifications': '1'}
            self._write(AceMessage.request.SETOPTIONS(params_dict))

    def START(self, command, paramsdict, acestreamtype):
        '''
        Start video method
        Returns the url provided by AceEngine
        '''
        paramsdict['stream_type'] = ' '.join(['{}={}'.format(k,v) for k,v in acestreamtype.items()])
        self._urlresult = AsyncResult()
        self._write(AceMessage.request.START(command.upper(), paramsdict))
        try: return self._urlresult.get(timeout=self._videotimeout) # Get url for play from AceEngine
        except gevent.Timeout:
            errmsg = 'Engine response time %ssec exceeded. START URL not resived!' % self._videotimeout
            raise AceException(errmsg)

    def STOP(self):
        '''
        Stop video method
        '''
        self._state = AsyncResult()
        self._write(AceMessage.request.STOP)
        try: self._state.get(timeout=self._resulttimeout); self._started_again.clear()
        except gevent.Timeout:
            errmsg = 'Engine response time %ssec exceeded. STATE 0 (IDLE) not resived!' % self._resulttimeout
            raise AceException(errmsg)

    def LOADASYNC(self, command, params):
        self._result = AsyncResult()
        self._write(AceMessage.request.LOADASYNC(command.upper(), random.randint(1, 100000), params))
        try: return self._result.get(timeout=self._resulttimeout) # Get _contentinfo json
        except gevent.Timeout:
            errmsg = 'Engine response %ssec time exceeded. LOADARESP not resived!' % self._resulttimeout
            raise AceException(errmsg)

    def GETCONTENTINFO(self, command, value):
        paramsdict = { command:value, 'developer_id':'0', 'affiliate_id':'0', 'zone_id':'0' }
        return self.LOADASYNC(command, paramsdict)

    def GETCID(self, command, value):
        contentinfo = self.GETCONTENTINFO(command, value)
        if contentinfo['status'] in (1, 2):
            paramsdict = {'checksum':contentinfo['checksum'], 'infohash':contentinfo['infohash'], 'developer_id':'0', 'affiliate_id':'0', 'zone_id':'0'}
            self._cidresult = AsyncResult()
            self._write(AceMessage.request.GETCID(paramsdict))
            try:
                cid = self._cidresult.get(timeout=self._resulttimeout)
                return '' if cid is None or cid == '' else cid[2:]
            except gevent.Timeout:
                 errmsg = 'Engine response time %ssec exceeded. CID not resived!' % self._resulttimeout
                 raise AceException(errmsg)
        else:
            cid = None
            errmsg = 'LOADASYNC returned error with message: %s' % contentinfo['message']
            raise AceException(errmsg)

    def GETINFOHASH(self, command, value, idx=0):
        contentinfo = self.GETCONTENTINFO(command, value)
        if contentinfo['status'] in (1, 2):
            return contentinfo['infohash'], [x[0] for x in contentinfo['files'] if x[1] == int(idx)][0]
        elif contentinfo['status'] == 0:
           errmsg = 'LOADASYNC returned status 0: The transport file does not contain audio/video files'
           raise AceException(errmsg)
        else:
           errmsg = 'LOADASYNC returned error with message: %s' % contentinfo['message']
           raise AceException(errmsg)

    def StreamReader(self, url, cid, counter, req_headers=None):
        logger = logging.getLogger('StreamReader')
        logger.debug('Start StreamReader for url: %s' % url)

        self._write(AceMessage.request.EVENT('play'))

        with requests.Session() as session:
           if req_headers:
               logger.debug('Sending headers from client to AceEngine: %s' % req_headers)
               session.headers.update(req_headers)
           try:
              # AceEngine return link for HLS stream
              if url.endswith('.m3u8'):
                  _used_chunks = []
                  while self._state.get(timeout=self._resulttimeout)[0] in ('2', '3'):
                     for line in session.get(url, stream=True, timeout = (5,None)).iter_lines():
                        if self._state.get(timeout=self._resulttimeout)[0] not in ('2', '3'): return
                        if line.startswith(b'http://') and line not in _used_chunks:
                            self.RAWDataReader(session.get(line, stream=True, timeout=(5,None)).raw, cid, counter)
                            _used_chunks.append(line)
                            if len(_used_chunks) > 15: _used_chunks.pop(0)
                     gevent.sleep(4)
              # AceStream return link for HTTP stream
              else: self.RAWDataReader(session.get(url, stream=True, timeout = (5,None)).raw, cid, counter)

           except requests.exceptions.HTTPError as err:
                   logger.error('An http error occurred while connecting to aceengine: %s' % repr(err))
           except requests.exceptions.RequestException:
                   logger.error('There was an ambiguous exception that occurred while handling request')
           except Exception as err:
                   logger.error('Unexpected error in streamreader %s' % repr(err))
           finally:
                   _used_chunks = None
                   self._streamReaderQueue.queue.clear()
                   counter.deleteAll(cid)

    def RAWDataReader(self, stream, cid, counter):
        logger = logging.getLogger('RAWDataReader')

        while self._state.get(timeout=self._resulttimeout)[0] in ('2', '3'):
           gevent.sleep()
           if self._state.get(timeout=self._resulttimeout)[0] == '2': # Read data from AceEngine only if STATE 2 (DOWNLOADING)
              data = stream.read(requests.models.CONTENT_CHUNK_SIZE)
              if not data: return
              try: self._streamReaderQueue.put_nowait(data)
              except gevent.queue.Full: self._streamReaderQueue.get_nowait(); self._streamReaderQueue.put_nowait(data)
              clients = counter.getClients(cid)
              if not clients: return
              for c in clients:
                 try: c.queue.put(data, timeout=5)
                 except gevent.queue.Full:
                     if len(clients) > 1:
                         logger.warning('Client %s does not read data from buffer until 5sec - disconnect it' % c.handler.clientip)
                         c.destroy()
           elif (time.time() - self._state.get(timeout=self._resulttimeout)[1]) >= self._videotimeout: # STATE 3 (BUFFERING)
                   logger.warning('No data received from AceEngine for %ssec - broadcast stoped' % self._videotimeout); return

    def _recvData(self):
        '''
        Data receiver method for greenlet
        '''
        logger = logging.getLogger('AceClient_recvdata')

        while 1:
            gevent.sleep()
            try:
                self._recvbuffer = self._socket.read_until('\r\n').strip()
                logger.debug('<<< %s' % requests.compat.unquote(self._recvbuffer))
            except gevent.GreenletExit: break
            except:
                # If something happened during read, abandon reader.
                logger.error('Exception at socket read. AceClient destroyed')
                if not self._shuttingDown.ready(): self._shuttingDown.set()
                return
            else: # Parsing everything only if the string is not empty
                # HELLOTS
                if self._recvbuffer.startswith('HELLOTS'):
                    # version=engine_version version_code=version_code key=request_key http_port=http_port
                    self._result.set({ k:v for k,v in (x.split('=') for x in self._recvbuffer.split() if '=' in x) })
                # NOTREADY
                elif self._recvbuffer.startswith('NOTREADY'): self._result.set('NOTREADY')
                # AUTH
                elif self._recvbuffer.startswith('AUTH'): self._result.set(self._recvbuffer.split()[1]) # user_auth_level
                # START
                elif self._recvbuffer.startswith('START'):
                    # url [ad=1 [interruptable=1]] [stream=1] [pos=position]
                    params = { k:v for k,v in (x.split('=') for x in self._recvbuffer.split() if '=' in x) }
                    if not self._seekback or self._started_again.ready() or params.get('stream','') is not '1':
                        # If seekback is disabled, we use link in first START command.
                        # If seekback is enabled, we wait for first START command and
                        # ignore it, then do seekback in first EVENT position command
                        # AceStream sends us STOP and START again with new link.
                        # We use only second link then.
                        self._urlresult.set(self._recvbuffer.split()[1]) # url for play
                # LOADRESP
                elif self._recvbuffer.startswith('LOADRESP'):
                    self._result.set(requests.compat.json.loads(requests.compat.unquote(''.join(self._recvbuffer.split()[2:]))))
                # STATE
                elif self._recvbuffer.startswith('STATE'): # tuple of (state_id, time of appearance)
                    self._state.set((self._recvbuffer.split()[1], time.time()))
                # STATUS
                elif self._recvbuffer.startswith('STATUS'):
                    self._tempstatus = self._recvbuffer.split()[1]
                    if self._tempstatus.startswith('main:idle'): pass
                    elif self._tempstatus.startswith('main:loading'): pass
                    elif self._tempstatus.startswith('main:starting'): pass
                    elif self._tempstatus.startswith('main:check'): pass
                    elif self._tempstatus.startswith('main:wait'): pass
                    elif self._tempstatus.startswith(('main:prebuf','main:buf')): pass #progress;time
                       #values = list(map(int, self._tempstatus.split(';')[3:]))
                       #self._status.set({k: v for k, v in zip(AceConst.STATUS, values)})
                    elif self._tempstatus.startswith('main:dl'): pass
                       #values = list(map(int, self._tempstatus.split(';')[1:]))
                       #self._status.set({k: v for k, v in zip(AceConst.STATUS, values)})
                    elif self._tempstatus.startswith('main:err'): # err;error_id;error_message
                       self._status.set_exception(AceException('%s with message %s' % (self._tempstatus.split(';')[0],self._tempstatus.split(';')[2])))
                # CID
                elif self._recvbuffer.startswith('##'): self._cidresult.set(self._recvbuffer)
                # INFO
                elif self._recvbuffer.startswith('INFO'): pass
                # EVENT
                elif self._recvbuffer.startswith('EVENT'):
                    self._tempevent = self._recvbuffer.split()
                    if self._seekback and not self._started_again.ready() and 'livepos' in self._tempevent:
                           params = { k:v for k,v in (x.split('=') for x in self._tempevent if '=' in x) }
                           self._write(AceMessage.request.LIVESEEK(int(params['last']) - self._seekback))
                           self._started_again.set()
                    elif 'getuserdata' in self._tempevent: self._write(AceMessage.request.USERDATA(self._gender, self._age))
                    elif 'cansave' in self._tempevent: pass
                    elif 'showurl' in self._tempevent: pass
                    elif 'download_stopped' in self._tempevent: pass
                # PAUSE
                elif self._recvbuffer.startswith('PAUSE'): self._write(AceMessage.request.EVENT('pause'))
                # RESUME
                elif self._recvbuffer.startswith('RESUME'): self._write(AceMessage.request.EVENT('play'))
                # STOP
                elif self._recvbuffer.startswith('STOP'): pass
                # SHUTDOWN
                elif self._recvbuffer.startswith('SHUTDOWN'):
                    self._socket.close()
                    logger.debug('AceClient destroyed')
                    break
Beispiel #31
0
class GClient(object):
	"""
	A generic gevent-based network client, that implements common send and receive functionality.
	Useful members:
		group: A gevent.pool.Group() tied to the lifetime of the client. When stopping, all greenlets
		       in the group will be killed.
		started: True if the client has been started
		stopped: True if the client has been stopped
		running: True if the client has been started but not stopped
	"""

	def __init__(self, logger=None):
		self.group = Group()
		self.started = False
		self._send_queue = Queue()
		self._stopping = False
		self._stopped = AsyncResult()
		if not hasattr(self, 'logger'): # let subclass overrride if they want
			if not logger:
				logger = logging.getLogger('gclient').getChild(type(self).__name__)
			self.logger = logger

	def start(self):
		"""Start the client, performing some connection step and beginning processing."""
		if self.started:
			raise Exception("Already started")
		self.started = True
		self.logger.debug("{} starting".format(self))
		self._start()
		self._send_loop_worker = self.group.spawn(self._send_loop)
		self._recv_loop_worker = self.group.spawn(self._recv_loop)
		self.logger.info("{} started".format(self))

	def _start(self):
		"""Override this with code that creates and initializes a connection"""

	def stop(self, ex=None):
		"""Stop the client, optionally referencing some exception.
		This will kill all greenlets in group and do any specific stop handling.
		Anyone waiting on the client stopping will have the exception raised, if any.
		"""
		if self._stopping:
			self.wait_for_stop()
			return
		if not self.started:
			self.started = True
		self._stopping = True

		if ex:
			self.logger.info("{} stopping with error".format(self), exc_info=True)
		else:
			self.logger.info("{} stopping".format(self))

		# since the greenlet calling stop() might be in self.group, we make a new greenlet to do the work
		@gevent.spawn
		def stop_worker():
			self.group.kill(block=True)
			while not self._send_queue.empty():
				msg, event = self._send_queue.get(block=False)
				event.set()
			self._stop(ex)
			if ex:
				self._stopped.set_exception(ex)
			else:
				self._stopped.set(None)
			self.logger.debug("{} fully stopped".format(self))

		stop_worker.get()

	def _stop(self, ex=None):
		"""Optionally override this with specific cleanup code for stopping the client,
		such as closing the connection."""
		pass

	def wait_for_stop(self):
		"""Block until the client has stopped, re-raising the exception it was stopped with, if any."""
		self._stopped.get()

	@property
	def stopped(self):
		return self._stopped.ready()

	@property
	def running(self):
		return self.started and not self.stopped

	def send(self, msg, block=False):
		"""Enqueue some kind of message to be sent. If block=True, block until actually sent.
		If block=False, returns a gevent.event.Event() that will be set when actually sent,
		or the client is stopped.
		Note that messages are sent in order, so using either of these shouldn't often be needed.
		"""
		if self._stopping:
			raise Exception("Can't send to stopped client")
		event = Event()
		self._send_queue.put((msg, event))
		if block:
			event.wait()
		else:
			return event

	def _send_loop(self):
		try:
			for msg, event in self._send_queue:
				self._send(msg)
				event.set()
		except Exception as ex:
			self.stop(ex)

	def _send(self, msg):
		"""Override this with specific code for sending a message. It may raise to indicate a failure
		that will stop the client."""

	def _recv_loop(self):
		try:
			self._receive()
		except Exception as ex:
			self.stop(ex)
		else:
			self.stop()

	def _receive(self):
		"""Override this with code that receives data. It may return to indicate a graceful close,
Beispiel #32
0
class Operation(BaseOperation):
    def __init__(self, monitor, callable, *args):
        super(Operation, self).__init__(monitor)

        self.id = (callable, args)

        self.callable = callable
        self.args = args
        self.node = getattr(callable, "__self__", None)
        self.method = getattr(callable, "__name__", None)

        self.result = AsyncResult()

        self.primary_parent = self.monitor.get_current()
        self.primary_parent.add_dependency(self)

        self.greenlet = Yaylet(callable, *args)
        self.greenlet.operation = self
        self.greenlet.link(self._operation_finish)

    def start(self):
        self.greenlet.start()

    def ready(self):
        return self.result.ready()

    def get(self):
        return self.result.get()

    def _operation_finish(self, source):
        # WARNING: This method will be caused in it's own greenlet.
        # Using self.monitor.execute from here will cause work to be owned by Root

        # Cycle breaking
        source.operation = None

        # Setup the AsyncResult so *new* calls will return immediately
        # But let's not notify the existing blocked greenlets until we
        # have run the paradox detector
        if source.successful():
            self.result.value = source.value
            self.result._exception = None
        else:
            self.result.value = None
            self.result._exception = source.exception

        # Purge any operations that were cached during a peek operation
        checks = []

        for p in self.peeks:
            for c, op in p.walk_children():
                if op.method.startswith("as_"):
                    checks.append(op)
                op.purge_one()

            if p.method.startswith("as_"):
                checks.append(p)
            p.purge_one()

        getcurrent().operation = self

        for op in checks:
            try:
                current_val = op.get()
                new_val = self.monitor.wait(getattr(op.node, op.method))
            except Exception as e:
                self.result.set_exception(e)
                return

            if new_val != current_val:
                self.result.set_exception(
                    errors.ParadoxError(
                        "Inconsistent configuration detected - changed from %r to %r" % (current_val, new_val),
                        anchor=op.node.anchor,
                    )
                )
                getcurrent().operation = None
                return

        getcurrent().operation = None

        # Now notify all the other greenlets waiting for us that it is safe to continue
        if source.successful():
            self.result.set(source.value)
        else:
            self.result.set_exception(source.exception)

    def __repr__(self):
        return "%s<%s>.%s(%r)" % (self.node.__class__.__name__, id(self), self.method, self.args)
Beispiel #33
0
    def new_netting_channel(
            self,
            partner: typing.Address,
            settle_timeout: int,
    ) -> typing.ChannelID:
        """ Creates a new channel in the TokenNetwork contract.

        Args:
            partner: The peer to open the channel with.
            settle_timeout: The settle timout to use for this channel.

        Returns:
            The address of the new netting channel.
        """
        if not is_binary_address(partner):
            raise InvalidAddress('Expected binary address format for channel partner')

        invalid_timeout = (
            settle_timeout < self.settlement_timeout_min() or
            settle_timeout > self.settlement_timeout_max()
        )
        if invalid_timeout:
            raise InvalidSettleTimeout('settle_timeout must be in range [{}, {}], is {}'.format(
                self.settlement_timeout_min(),
                self.settlement_timeout_max(),
                settle_timeout,
            ))

        if self.node_address == partner:
            raise SamePeerAddress('The other peer must not have the same address as the client.')

        # Prevent concurrent attempts to open a channel with the same token and
        # partner address.
        if partner not in self.open_channel_transactions:
            new_open_channel_transaction = AsyncResult()
            self.open_channel_transactions[partner] = new_open_channel_transaction

            try:
                transaction_hash = self._new_netting_channel(partner, settle_timeout)
            except Exception as e:
                new_open_channel_transaction.set_exception(e)
                raise
            else:
                new_open_channel_transaction.set(transaction_hash)
            finally:
                self.open_channel_transactions.pop(partner, None)
        else:
            # All other concurrent threads should block on the result of opening this channel
            self.open_channel_transactions[partner].get()

        channel_created = self.channel_exists(self.node_address, partner)
        if channel_created is False:
            log.error(
                'creating new channel failed',
                peer1=pex(self.node_address),
                peer2=pex(partner),
            )
            raise RuntimeError('creating new channel failed')

        channel_identifier = self.detail_channel(self.node_address, partner)['channel_identifier']

        log.info(
            'new_netting_channel called',
            peer1=pex(self.node_address),
            peer2=pex(partner),
            channel_identifier=encode_hex(channel_identifier),
        )

        return channel_identifier
Beispiel #34
0
class AceClient:
  def __init__(self, host, port, connect_timeout = 5, result_timeout = 5, debug = logging.ERROR):
    # Receive buffer
    self._recvbuffer = None
    # Stream URL
    self._url = None
    # Ace stream socket
    self._socket = None
    # Result timeout
    self._resulttimeout = result_timeout
    # Shutting down flag
    self._shuttingDown = Event()
    # Product key
    self._product_key = None
    # Debug level
    self._debug = debug
    # Current STATUS
    self._status = None
    # Current STATE
    self._state = None
    # Current AUTH
    self._auth = None
    self._gender = None
    self._age = None
    # Result (Created with AsyncResult() on call)
    self._result = AsyncResult()
    self._authevent = Event()
    # Result for getURL()
    self._urlresult = AsyncResult()
    # Event for resuming from PAUSE
    self._resumeevent = Event()
    
    # Logger
    logger = logging.getLogger('AceClient_init')
    
    try:
      self._socket = telnetlib.Telnet(host, port, connect_timeout)
      logger.info("Successfully connected with Ace!")
    except Exception as e:
      raise AceException("Socket creation error! Ace is not running? " + str(e))
    
    # Spawning recvData greenlet
    gevent.spawn(self._recvData)
    gevent.sleep()
    
    
  def __del__(self):
    # Destructor just calls destroy() method
    self.destroy()
    
    
  def destroy(self):
    '''
    AceClient Destructor
    '''
    if self._shuttingDown.isSet():
    # Already in the middle of destroying
      return
    
    # Logger
    logger = logging.getLogger("AceClient_destroy")
    # We should resume video to prevent read greenlet deadlock
    self._resumeevent.set()
    # And to prevent getUrl deadlock
    self._urlresult.set()

    # Trying to disconnect
    try:
      logger.debug("Destroying client...")
      self._shuttingDown.set()
      self._write(AceMessage.request.SHUTDOWN)
    except:
      # Ignore exceptions on destroy
      pass
    finally:
      self._shuttingDown.set()
      
  def _write(self, message):
    try:
      self._socket.write(message + "\r\n")
    except EOFError as e:
      raise AceException("Write error! " + str(e))
    
    
  def aceInit(self, gender = AceConst.SEX_MALE, age = AceConst.AGE_18_24, product_key = None, pause_delay = 0):
    self._product_key = product_key
    self._gender = gender
    self._age = age
    # PAUSE/RESUME delay
    self._pausedelay = pause_delay
    
    # Logger
    logger = logging.getLogger("AceClient_aceInit")
    
    # Sending HELLO
    self._write(AceMessage.request.HELLO)
    if not self._authevent.wait(self._resulttimeout):
      errmsg = "Authentication timeout. Wrong key?"
      logger.error(errmsg)
      raise AceException(errmsg)
      return
    
    if not self._auth:
      errmsg = "Authentication error. Wrong key?"
      logger.error(errmsg)
      raise AceException(errmsg)
      return
    
    logger.debug("aceInit ended")
    
    
  def START(self, datatype, value):
    '''
    Start video method
    '''
    
    # Logger
    logger = logging.getLogger("AceClient_START")
    
    self._result = AsyncResult()
    self._urlresult = AsyncResult()
    
    self._write(AceMessage.request.START(datatype.upper(), value))
      
    try:
      if not self._result.get(timeout = self._resulttimeout):
	errmsg = "START error!"
	logger.error(errmsg)
	raise AceException(errmsg)
    except gevent.Timeout:
      errmsg = "START timeout!"
      logger.error(errmsg)
      raise AceException(errmsg)
  
  
  def getUrl(self, timeout = 40):
    # Logger
    logger = logging.getLogger("AceClient_getURL")
    
    try:
      res = self._urlresult.get(timeout = timeout)
      return res
    except gevent.Timeout:
      errmsg = "getURL timeout!"
      logger.error(errmsg)
      raise AceException(errmsg)
      
  
  def getPlayEvent(self, timeout = None):
    '''
    Blocking while in PAUSE, non-blocking while in RESUME
    '''
    self._resumeevent.wait(timeout = timeout)
    return
    
    
  def _recvData(self):
    '''
    Data receiver method for greenlet
    '''
    logger = logging.getLogger('AceClient_recvdata')

    while True:
      gevent.sleep()
      try:
	self._recvbuffer = self._socket.read_until("\r\n", 1)
	self._recvbuffer = self._recvbuffer.strip()
      except:
	# If something happened during read, abandon reader
	# Should not ever happen
	logger.error("Exception at socket read")
	return
	
      # Parsing everything
      if self._recvbuffer.startswith(AceMessage.response.HELLO):
	# Parse HELLO
	if 'key=' in self._recvbuffer:
	  self._request_key = self._recvbuffer.split()[2].split('=')[1]
	  self._write(AceMessage.request.READY_key(self._request_key, self._product_key))
	  self._request_key = None
	else:
	  self._write(AceMessage.request.READY_nokey)
	
      elif self._recvbuffer.startswith(AceMessage.response.NOTREADY):
	# NOTREADY
	logger.error("Ace is not ready. Wrong auth?")
	return
      
      elif self._recvbuffer.startswith(AceMessage.response.START):
	# START
	try:
	  self._url = self._recvbuffer.split()[1]
	  self._urlresult.set(self._url)
	  self._resumeevent.set()
	except IndexError as e:
	  self._url = None
	
      elif self._recvbuffer.startswith(AceMessage.response.STOP):
	pass
      
      elif self._recvbuffer.startswith(AceMessage.response.SHUTDOWN):
	logger.debug("Got SHUTDOWN from engine")
	self._socket.close()
	return
	
      elif self._recvbuffer.startswith(AceMessage.response.AUTH):
	try:
	  self._auth = self._recvbuffer.split()[1]
	  # Send USERDATA here
	  self._write(AceMessage.request.USERDATA(self._gender, self._age))
	except:
	  pass
	self._authevent.set()
	
      elif self._recvbuffer.startswith(AceMessage.response.GETUSERDATA):
	raise AceException("You should init me first!")
      
      elif self._recvbuffer.startswith(AceMessage.response.STATE):
	self._state = self._recvbuffer.split()[1]
	
      elif self._recvbuffer.startswith(AceMessage.response.STATUS):
	self._tempstatus = self._recvbuffer.split()[1].split(';')[0]
	if self._tempstatus != self._status:
	  self._status = self._tempstatus
	  logger.debug("STATUS changed to "+self._status)
	  
	if self._status == 'main:err':
	  logger.warning(self._status + ' with message ' + self._recvbuffer.split(';')[2])
	  self._result.set_exception(AceException(self._status + ' with message ' + self._recvbuffer.split(';')[2]))
	  self._urlresult.set_exception(AceException(self._status + ' with message ' + self._recvbuffer.split(';')[2]))
	elif self._status == 'main:starting':
	  self._result.set(True)
	  
      elif self._recvbuffer.startswith(AceMessage.response.PAUSE):
	logger.debug("PAUSE event")
	self._resumeevent.clear()
	
      elif self._recvbuffer.startswith(AceMessage.response.RESUME):
	logger.debug("RESUME event")
	gevent.sleep(self._pausedelay)
	self._resumeevent.set()
Beispiel #35
0
class AceClient(object):
    def __init__(self, host, port, connect_timeout=5, result_timeout=10):
        # Receive buffer
        self._recvbuffer = None
        # Stream URL
        self._url = None
        # Ace stream socket
        self._socket = None
        # Result timeout
        self._resulttimeout = result_timeout
        # Shutting down flag
        self._shuttingDown = Event()
        # Product key
        self._product_key = None
        # Current STATUS
        self._status = None
        # Current STATE
        self._state = None
        # Current video position
        self._position = None
        # Available video position (loaded data)
        self._position_last = None
        # Buffered video pieces
        self._position_buf = None
        # Current AUTH
        self._auth = None
        self._gender = None
        self._age = None
        # Result (Created with AsyncResult() on call)
        self._result = AsyncResult()
        self._authevent = Event()
        # Result for getURL()
        self._urlresult = AsyncResult()
        # Result for GETCID()
        self._cidresult = AsyncResult()
        # Event for resuming from PAUSE
        self._resumeevent = Event()
        # Seekback seconds.
        self._seekback = 0
        # Did we get START command again? For seekback.
        self._started_again = False

        self._idleSince = time.time()
        self._lock = threading.Condition(threading.Lock())
        self._streamReaderConnection = None
        self._streamReaderState = None
        self._streamReaderQueue = deque()
        self._engine_version_code = 0

        # Logger
        logger = logging.getLogger('AceClieimport tracebacknt_init')

        try:
            self._socket = telnetlib.Telnet(host, port, connect_timeout)
            logger.info("Successfully connected with Ace!")
        except Exception as e:
            raise AceException("Socket creation error! Ace is not running? " +
                               repr(e))

        # Spawning recvData greenlet
        gevent.spawn(self._recvData)
        gevent.sleep()

    def __del__(self):
        # Destructor just calls destroy() method
        self.destroy()

    def destroy(self):
        '''
        AceClient Destructor
        '''
        if self._shuttingDown.isSet():
            # Already in the middle of destroying
            return

        # Logger
        logger = logging.getLogger("AceClient_destroy")
        # We should resume video to prevent read greenlet deadlock
        self._resumeevent.set()
        # And to prevent getUrl deadlock
        self._urlresult.set()

        # Trying to disconnect
        try:
            logger.debug("Destroying client...")
            self._shuttingDown.set()
            self._write(AceMessage.request.SHUTDOWN)
        except:
            # Ignore exceptions on destroy
            pass
        finally:
            self._shuttingDown.set()

    def reset(self):
        self._started_again = False
        self._idleSince = time.time()
        self._streamReaderState = None

    def _write(self, message):
        try:
            logger = logging.getLogger("AceClient_write")
            logger.debug(message)
            self._socket.write(message + "\r\n")
        except EOFError as e:
            raise AceException("Write error! " + repr(e))

    def aceInit(self,
                gender=AceConst.SEX_MALE,
                age=AceConst.AGE_18_24,
                product_key=None,
                pause_delay=0,
                seekback=0):
        self._product_key = product_key
        self._gender = gender
        self._age = age
        # PAUSE/RESUME delay
        self._pausedelay = pause_delay
        # Seekback seconds
        self._seekback = seekback

        # Logger
        logger = logging.getLogger("AceClient_aceInit")

        # Sending HELLO
        self._write(AceMessage.request.HELLO)
        if not self._authevent.wait(self._resulttimeout):
            errmsg = "Authentication timeout. Wrong key?"
            logger.error(errmsg)
            raise AceException(errmsg)
            return

        if not self._auth:
            errmsg = "Authentication error. Wrong key?"
            logger.error(errmsg)
            raise AceException(errmsg)
            return

        logger.debug("aceInit ended")

    def _getResult(self):
        # Logger
        try:
            result = self._result.get(timeout=self._resulttimeout)
            if not result:
                raise AceException("Result not received")
        except gevent.Timeout:
            raise AceException("Timeout")

        return result

    def START(self, datatype, value):
        '''
        Start video method
        '''
        stream_type = 'output_format=http' if self._engine_version_code >= 3010500 and not AceConfig.vlcuse else ''
        self._urlresult = AsyncResult()
        self._write(
            AceMessage.request.START(datatype.upper(), value, stream_type))
        self._getResult()

    def STOP(self):
        '''
        Stop video method
        '''
        if self._state is not None and self._state != '0':
            self._result = AsyncResult()
            self._write(AceMessage.request.STOP)
            self._getResult()

    def LOADASYNC(self, datatype, url):
        self._result = AsyncResult()
        self._write(
            AceMessage.request.LOADASYNC(datatype.upper(), 0, {'url': url}))
        return self._getResult()

    def GETCID(self, datatype, url):
        contentinfo = self.LOADASYNC(datatype, url)
        self._cidresult = AsyncResult()
        self._write(
            AceMessage.request.GETCID(contentinfo.get('checksum'),
                                      contentinfo.get('infohash'), 0, 0, 0))
        cid = self._cidresult.get(True, 5)
        return '' if not cid or cid == '' else cid[2:]

    def GETCONTENTINFO(self, datatype, url):
        contentinfo = self.LOADASYNC(datatype, url)
        return contentinfo

    def getUrl(self, timeout=40):
        # Logger
        logger = logging.getLogger("AceClient_getURL")

        try:
            res = self._urlresult.get(timeout=timeout)
            return res
        except gevent.Timeout:
            errmsg = "getURL timeout!"
            logger.error(errmsg)
            raise AceException(errmsg)

    def startStreamReader(self, url, cid, counter):
        logger = logging.getLogger("StreamReader")
        self._streamReaderState = 1
        logger.debug("Opening video stream: %s" % url)

        try:
            connection = self._streamReaderConnection = urllib2.urlopen(url)

            if url.endswith('.m3u8'):
                logger.debug("Can't stream HLS in non VLC mode: %s" % url)
                return

            if connection.getcode() != 200:
                logger.error("Failed to open video stream %s" % connection)
                return

            with self._lock:
                self._streamReaderState = 2
                self._lock.notifyAll()

            while True:
                data = None
                clients = counter.getClients(cid)

                try:
                    data = connection.read(AceConfig.readchunksize)
                except:
                    break

                if data and clients:
                    with self._lock:
                        if len(self._streamReaderQueue
                               ) == AceConfig.readcachesize:
                            self._streamReaderQueue.popleft()
                        self._streamReaderQueue.append(data)

                    for c in clients:
                        try:
                            c.addChunk(data, 5.0)
                        except Queue.Full:
                            if len(clients) > 1:
                                logger.debug("Disconnecting client: %s" %
                                             str(c))
                                c.destroy()
                elif not clients:
                    logger.debug(
                        "All clients disconnected - closing video stream")
                    break
                else:
                    logger.warning("No data received")
                    break
        except urllib2.URLError:
            logger.error("Failed to open video stream")
            logger.error(traceback.format_exc())
        except:
            logger.error(traceback.format_exc())
            if counter.getClients(cid):
                logger.error("Failed to read video stream")
        finally:
            self.closeStreamReader()
            with self._lock:
                self._streamReaderState = 3
                self._lock.notifyAll()
            counter.deleteAll(cid)

    def closeStreamReader(self):
        logger = logging.getLogger("StreamReader")
        c = self._streamReaderConnection

        if c:
            self._streamReaderConnection = None
            c.close()
            logger.debug("Video stream closed")

        self._streamReaderQueue.clear()

    def getPlayEvent(self, timeout=None):
        '''
        Blocking while in PAUSE, non-blocking while in RESUME
        '''
        return self._resumeevent.wait(timeout=timeout)

    def pause(self):
        self._write(AceMessage.request.PAUSE)

    def play(self):
        self._write(AceMessage.request.PLAY)

    def _recvData(self):
        '''
        Data receiver method for greenlet
        '''
        logger = logging.getLogger('AceClient_recvdata')

        while True:
            gevent.sleep()
            try:
                self._recvbuffer = self._socket.read_until("\r\n")
                self._recvbuffer = self._recvbuffer.strip()
                # logger.debug('<<< ' + self._recvbuffer)
            except:
                # If something happened during read, abandon reader.
                if not self._shuttingDown.isSet():
                    logger.error("Exception at socket read")
                    self._shuttingDown.set()
                return

            if self._recvbuffer:
                # Parsing everything only if the string is not empty
                if self._recvbuffer.startswith(AceMessage.response.HELLO):
                    # Parse HELLO
                    if 'version_code=' in self._recvbuffer:
                        v = self._recvbuffer.find('version_code=')
                        self._engine_version_code = int(
                            self._recvbuffer[v + 13:v + 20])

                    if 'key=' in self._recvbuffer:
                        self._request_key_begin = self._recvbuffer.find('key=')
                        self._request_key = \
                            self._recvbuffer[self._request_key_begin + 4:self._request_key_begin + 14]
                        try:
                            self._write(
                                AceMessage.request.READY_key(
                                    self._request_key, self._product_key))
                        except urllib2.URLError as e:
                            logger.error("Can't connect to keygen server! " + \
                                repr(e))
                            self._auth = False
                            self._authevent.set()
                        self._request_key = None
                    else:
                        self._write(AceMessage.request.READY_nokey)

                elif self._recvbuffer.startswith(AceMessage.response.NOTREADY):
                    # NOTREADY
                    logger.error("Ace is not ready. Wrong auth?")
                    self._auth = False
                    self._authevent.set()

                elif self._recvbuffer.startswith(AceMessage.response.LOADRESP):
                    # LOADRESP
                    _contentinfo_raw = self._recvbuffer.split()[2:]
                    _contentinfo_raw = ' '.join(_contentinfo_raw)
                    _contentinfo = json.loads(_contentinfo_raw)
                    if _contentinfo.get('status') == 100:
                        logger.error(
                            "LOADASYNC returned error with message: %s" %
                            _contentinfo.get('message'))
                        self._result.set(False)
                    else:
                        logger.debug("Content info: %s", _contentinfo)
                        self._result.set(_contentinfo)

                elif self._recvbuffer.startswith(AceMessage.response.START):
                    # START
                    if not self._seekback or self._started_again or not self._recvbuffer.endswith(
                            ' stream=1'):
                        # If seekback is disabled, we use link in first START command.
                        # If seekback is enabled, we wait for first START command and
                        # ignore it, then do seeback in first EVENT position command
                        # AceStream sends us STOP and START again with new link.
                        # We use only second link then.
                        try:
                            self._url = self._recvbuffer.split()[1]
                            self._urlresult.set(self._url)
                            self._resumeevent.set()
                        except IndexError as e:
                            self._url = None
                    else:
                        logger.debug("START received. Waiting for %s." %
                                     AceMessage.response.LIVEPOS)

                elif self._recvbuffer.startswith(AceMessage.response.STOP):
                    pass

                elif self._recvbuffer.startswith(AceMessage.response.SHUTDOWN):
                    logger.debug("Got SHUTDOWN from engine")
                    self._socket.close()
                    return

                elif self._recvbuffer.startswith(AceMessage.response.AUTH):
                    try:
                        self._auth = self._recvbuffer.split()[1]
                        # Send USERDATA here
                        self._write(
                            AceMessage.request.USERDATA(
                                self._gender, self._age))
                    except:
                        pass
                    self._authevent.set()

                elif self._recvbuffer.startswith(
                        AceMessage.response.GETUSERDATA):
                    raise AceException("You should init me first!")

                elif self._recvbuffer.startswith(AceMessage.response.LIVEPOS):
                    self._position = self._recvbuffer.split()
                    self._position_last = self._position[2].split('=')[1]
                    self._position_buf = self._position[9].split('=')[1]
                    self._position = self._position[4].split('=')[1]

                    if self._seekback and not self._started_again:
                        self._write(AceMessage.request.SEEK(str(int(self._position_last) - \
                            self._seekback)))
                        logger.debug('Seeking back')
                        self._started_again = True

                elif self._recvbuffer.startswith(AceMessage.response.STATE):
                    self._state = self._recvbuffer.split()[1]

                elif self._recvbuffer.startswith(AceMessage.response.STATUS):
                    self._tempstatus = self._recvbuffer.split()[1].split(
                        ';')[0]
                    if self._tempstatus != self._status:
                        self._status = self._tempstatus
                        logger.debug("STATUS changed to " + self._status)

                    if self._status == 'main:err':
                        logger.error(self._status + ' with message ' +
                                     self._recvbuffer.split(';')[2])
                        self._result.set_exception(
                            AceException(self._status + ' with message ' +
                                         self._recvbuffer.split(';')[2]))
                        self._urlresult.set_exception(
                            AceException(self._status + ' with message ' +
                                         self._recvbuffer.split(';')[2]))
                    elif self._status == 'main:starting':
                        self._result.set(True)
                    elif self._status == 'main:idle':
                        self._result.set(True)

                elif self._recvbuffer.startswith(AceMessage.response.PAUSE):
                    logger.debug("PAUSE event")
                    self._resumeevent.clear()

                elif self._recvbuffer.startswith(AceMessage.response.RESUME):
                    logger.debug("RESUME event")
                    gevent.sleep(self._pausedelay)
                    self._resumeevent.set()

                elif self._recvbuffer.startswith('##') or len(
                        self._recvbuffer) == 0:
                    self._cidresult.set(self._recvbuffer)
                    logger.debug("CID: %s" % self._recvbuffer)

    def sendHeadersPT(self, client, code, headers):
        client.handler.send_response(code)
        for key, value in headers.items():
            client.handler.send_header(key, value)
        client.handler.end_headers()

    def openStreamReaderPT(self, url, req_headers):
        logger = logging.getLogger("openStreamReaderPT")
        logger.debug("Opening video stream: %s" % url)
        logger.debug("headers: %s" % req_headers)

        if url.endswith('.m3u8'):
            logger.debug("Can't stream HLS in non VLC mode: %s" % url)
            return None, None, None

        request = urllib2.Request(url, headers=req_headers)
        connection = self._streamReaderConnection = urllib2.urlopen(
            request, timeout=120)
        code = connection.getcode()

        if code not in (200, 206):
            logger.error("Failed to open video stream %s" % connection)
            return None, None, None

        FORWARD_HEADERS = [
            'Content-Range',
            'Connection',
            'Keep-Alive',
            'Content-Type',
            'Accept-Ranges',
            'X-Content-Duration',
            'Content-Length',
        ]
        SKIP_HEADERS = ['Server', 'Date']
        response_headers = {}
        for k in connection.info().headers:
            if k.split(':')[0] not in (FORWARD_HEADERS + SKIP_HEADERS):
                logger.debug('NEW HEADERS: %s' % k.split(':')[0])
        for h in FORWARD_HEADERS:
            if connection.info().getheader(h):
                response_headers[h] = connection.info().getheader(h)
                # self.connection.send_header(h, connection.info().getheader(h))
                logger.debug('key=%s value=%s' %
                             (h, connection.info().getheader(h)))

        with self._lock:
            self._streamReaderState = 2
            self._lock.notifyAll()

        return connection, code, response_headers

    def startStreamReaderPT(self, url, cid, counter, req_headers=None):
        logger = logging.getLogger("StreamReaderPT")
        self._streamReaderState = 1
        # current_req_headers = req_headers

        try:
            while True:
                data = None
                clients = counter.getClientsPT(cid)

                if not req_headers == self.req_headers:
                    self.req_headers = req_headers
                    connection, code, resp_headers = self.openStreamReaderPT(
                        url, req_headers)
                    if not connection:
                        return

                    for c in clients:
                        try:
                            c.headers_sent
                        except:
                            self.sendHeadersPT(c, code, resp_headers)
                            c.headers_sent = True

                # logger.debug("i")
                try:
                    data = connection.read(AceConfig.readchunksize)
                except:
                    break
                # logger.debug("d: %s c:%s" % (data, clients))
                if data and clients:
                    with self._lock:
                        if len(self._streamReaderQueue
                               ) == AceConfig.readcachesize:
                            self._streamReaderQueue.popleft()
                        self._streamReaderQueue.append(data)

                    for c in clients:
                        try:
                            c.addChunk(data, 5.0)
                        except Queue.Full:
                            if len(clients) > 1:
                                logger.debug("Disconnecting client: %s" %
                                             str(c))
                                c.destroy()
                elif not clients:
                    logger.debug(
                        "All clients disconnected - closing video stream")
                    break
                else:
                    logger.warning("No data received")
                    break
        except urllib2.URLError:
            logger.error("Failed to open video stream")
            logger.error(traceback.format_exc())
        except:
            logger.error(traceback.format_exc())
            if counter.getClientsPT(cid):
                logger.error("Failed to read video stream")
        finally:
            self.closeStreamReader()
            with self._lock:
                self._streamReaderState = 3
                self._lock.notifyAll()
            counter.deleteAll(cid)
Beispiel #36
0
class Bucket(AsyncBucket):
    def __init__(self, *args, **kwargs):
        """
        This class is a 'GEvent'-optimized subclass of libcouchbase
        which utilizes the underlying IOPS structures and the gevent
        event primitives to efficiently utilize couroutine switching.
        """
        super(Bucket, self).__init__(IOPS(), *args, **kwargs)

    def _do_ctor_connect(self):
        if self.connected:
            return

        self._connect()
        self._evconn = AsyncResult()
        self._conncb = self._on_connected
        self._evconn.get()
        self._evconn = None

    def _on_connected(self, err):
        if err:
            self._evconn.set_exception(err)
        else:
            self._evconn.set(None)

    def _waitwrap(self, cbasync):
        cur_thread = getcurrent()
        errback = lambda r, x, y, z: cur_thread.throw(x, y, z)
        cbasync.set_callbacks(cur_thread.switch, errback)
        try:
            return get_hub().switch()
        finally:
            # Deregister callbacks to prevent another request on the same
            # greenlet to get the result from this context.
            cbasync.set_callbacks(dummy_callback, dummy_callback)

    def _meth_factory(meth, name):
        def ret(self, *args, **kwargs):
            return self._waitwrap(meth(self, *args, **kwargs))

        return ret

    def _http_request(self, **kwargs):
        res = super(Bucket, self)._http_request(**kwargs)

        w = Waiter()
        res.callback = lambda x: w.switch(x)
        res.errback = lambda x, c, o, b: w.throw(c, o, b)
        return w.get()

    def query(self, *args, **kwargs):
        kwargs['itercls'] = GView
        return super(Bucket, self).query(*args, **kwargs)

    def n1ql_query(self, query, *args, **kwargs):
        kwargs['itercls'] = GN1QLRequest
        return super(Bucket, self).n1ql_query(query, *args, **kwargs)

    def _get_close_future(self):
        ev = Event()

        def _dtor_cb(*args):
            ev.set()

        self._dtorcb = _dtor_cb
        return ev

    locals().update(AsyncBucket._gen_memd_wrappers(_meth_factory))
Beispiel #37
0
class StreamingExtract(object):
    def __init__(self, id, hddsem, threadpool):
        self.id = id
        self.hddsem = hddsem
        self.threadpool = threadpool

        self.password = None

        self.killed = False
        self.parts = dict()
        self.first = None
        self.current = None
        self.next = None
        self.next_part_event = AsyncResult()
        self.rar = None
        extractors[id] = self
        
    def feed_part(self, path, file):
        path.finished = AsyncResult()
        self.parts[path.path] = path, file
        log.debug('fed new part {}: {}'.format(path, path))

        if file.state != 'rarextract':
            with transaction:
                file.state = 'rarextract'

        if self.first is None:
            self.current = path, file
            self.run(path, file)
        else:
            if path.path == self.next:
                self.next_part_event.set(path)
            path.finished.get()

    def run(self, path, file):
        try:
            self.first = self.current
            with transaction:
                file.greenlet = gevent.getcurrent()
                file.on_greenlet_started()
            try:
                result = self.bruteforce(path, file)
            except rarfile.NeedFirstVolume:
                self.next = os.path.join(path.dir, "{}.part{}.rar".format(path.basename, "1".zfill(len(path.part))))
                return self.find_next()
            
            if result and result is not True:
                raise result

            if self.password:
                rarpw = "-p"+self.password
            else:
                rarpw = "-p-"

            cmd = [rarfile.UNRAR_TOOL, "x", "-y", rarpw, "-idq", "-vp", path, file.get_extract_path() + os.sep]
            file.log.info("starting extraction of {} with params {}".format(path[1:], cmd))
            self.rar = rarfile.custom_popen(cmd)

            self.wait_data()
            if not path.finished.ready():
                path.finished.set()
        except BaseException as e:
            traceback.print_exc()
            self.kill(e)
            raise
        
    def bruteforce(self, path, file):
        try:
            rar = rarfile.RarFile(path, ignore_next_part_missing=True)
        except rarfile.NeedFirstVolume:
            raise
        if not rar.needs_password():
            self.password = None
            return
        if rar.needs_password() and rar.infolist():
            # unencrypted headers. use file password or ask user.
            pw = None
            if len(file.package.extract_passwords) == 1:
                pw = file.package.extract_passwords[0]
            if not pw:
                for pw in file.solve_password(message="Rarfile {} password cannot be cracked. Enter correct password: #".format(path.name), retries=1):
                    break
                else:
                    return self.kill('extract password not entered')
            self.password = pw
            return
        passwords = []
        for i in itertools.chain(file.package.extract_passwords, core.config.bruteforce_passwords):
            if not i in passwords:
                passwords.append(i)
        print "testing", passwords
        if not self.threadpool.apply(bruteforce, (rar, passwords, self.hddsem, file.log)):
            # ask user for password
            for pw in file.solve_password(message="Enter the extract password for file: {} #".format(path.name), retries=5):
                if self.threadpool.apply(bruteforce, (rar, [pw], self.hddsem, file.log)):
                    break
            else:
                return self.kill('extract password not entered')

        self.password = rar._password
        if self.password and self.password not in core.config.bruteforce_passwords:
            with transaction:
                core.config.bruteforce_passwords.append(self.password)

    def wait_data(self):
        bytes = ''
        while True:
            data = self.rar.stdout.read(1)
            if not data:
                break

            bytes += data
            for i in bytes.splitlines():
                if i:
                    result = self.new_data(i)
                    if result is True:
                        bytes = ''
                    if result and result is not True:
                        raise result
        self.close()

    def finish_file(self, path, file):
        if file is not None:
            with core.transaction:
                #if not 'rarextract' in file.completed_plugins:
                #    file.completed_plugins.append('rarextract')
                #file.greenlet = None
                #file.on_greenlet_finish()
                #file.on_greenlet_stopped()
                file.state = 'rarextract_complete'
                file.init_progress(1)
                file.set_progress(1)
                #file.stop()
        #path.finished.set()
        event.fire('rarextract:part_complete', path, file)
    
    def new_data(self, data):
        """called when new data or new line
        """
        #print "got new data from unrar:", data
        if "packed data CRC failed in volume" in data:
            return self.kill('checksum error in rar archive')
            
        if data.startswith("CRC failed in the encrypted file"): # corrupt file or download not complete
            return self.kill('checksum error in rar archive. wrong password?')

        m = re.search(r"Insert disk with (.*?) \[C\]ontinue\, \[Q\]uit", data)
        if not m:
            return

        if self.current is not None:
            self.finish_file(*self.current)

        self.next = m.group(1)
        print "setting self.next", self.next
        return self.find_next()
        
    def find_next(self):
        print "finding next", self.next
        next = self.next
        if next not in self.parts:
            # check if file is in core.files()
            found = False
            name = os.path.basename(next)
            for f in core.files():
                if f.name == name and f.get_complete_file() == next:
                    found = True
                    if not f.working and 'download' in f.completed_plugins:
                        current = fileplugin.FilePath(next), f
                        current[0].finished = AsyncResult()
                        self.parts[next] = current
                        log.debug('got next part from idle {}: {}'.format(next, self.current[0]))
                        break
            if not found:
                # file is not in system, check if it exists on hdd
                if os.path.exists(next):
                    current = fileplugin.FilePath(next), self.first[1]
                    current[0].finished = AsyncResult()
                    self.parts[next] = current
                    log.debug('got next part from hdd {}: {}'.format(next, self.current[0]))
                else:
                    # part does not exists. fail this extract
                    return self.kill('missing part {}'.format(next))

            if next not in self.parts:
                log.debug('waiting for part {}'.format(next))
                event.fire('rarextract:waiting_for_part', next)
                
                while next not in self.parts:
                    self.next_part_event.get()
                    self.next_part_event = AsyncResult()

                log.debug('got next part from wait {}: {}'.format(next, self.current[0]))

        self.current = self.parts[next]
        return self.go_on()
                
    def go_on(self):
        if self.rar is None:
            return self.run(*self.current)
        if not os.path.exists(self.next):
            return
        self.rar.stdin.write("C\n")
        self.rar.stdin.flush()

        if self.current[1] is not None:
            with core.transaction:
                self.current[1].greenlet = gevent.getcurrent()
                self.current[1].greenlet.link(self.current[0].finished)
                self.current[1].on_greenlet_started()
            self.current[1].log.info("extract go on: {}".format(self.current[1].name))
        return True
        
    def kill(self, exc=""):
        #blacklist.add(self.first[0].basename) # no autoextract for failed archives
        print "killing rarextract", self.first[0].basename
        if isinstance(exc, basestring):
            exc = ValueError(exc)

        self.current = None
        self.killed = True

        if self.rar is not None:
            self.rar.terminate()
            self.rar = None

        try:
            del extractors[self.id]
        except KeyError:
            pass
        
        self.next_part_event.set_exception(exc)
        for path, file in self.parts.values():
            if not path.finished.ready():
                path.finished.set_exception(exc)

        with transaction:
            for path, file in self.parts.values():
                if file is not None:
                    file.stop()
                    if file.state == 'rarextract_complete':
                        file.state = 'rarextract'
                        file.enabled = False
                    if 'rarextract' in file.completed_plugins:
                        file.completed_plugins.remove('rarextract')

        self.first[1].fatal('rarextract: {}'.format(exc))

        return exc

    def close(self):
        """called when process is closed"""
        try:
            del extractors[self.id]
        except KeyError:
            pass
        
        if not self.killed:
            if self.current is not None:
                self.finish_file(*self.current)

            if core.config.delete_extracted_archives:
                for path, file in self.parts.values():
                    os.remove(file.get_complete_file())
Beispiel #38
0
    def register_secret_batch(self, secrets: List[Secret]):
        secrets_to_register = list()
        secrethashes_to_register = list()
        secrethashes_not_sent = list()
        secret_registry_transaction = AsyncResult()

        for secret in secrets:
            secrethash = sha3(secret)
            secrethash_hex = encode_hex(secrethash)

            is_register_needed = (not self.check_registered(secrethash) and
                                  secret not in self.open_secret_transactions)
            if is_register_needed:
                secrets_to_register.append(secret)
                secrethashes_to_register.append(secrethash_hex)
                self.open_secret_transactions[
                    secret] = secret_registry_transaction
            else:
                secrethashes_not_sent.append(secrethash_hex)

        log_details = {
            'node': pex(self.node_address),
            'contract': pex(self.address),
            'secrethashes': secrethashes_to_register,
            'secrethashes_not_sent': secrethashes_not_sent,
        }

        if not secrets_to_register:
            log.debug('registerSecretBatch skipped', **log_details)
            return

        checking_block = self.client.get_checking_block()
        error_prefix = 'Call to registerSecretBatch will fail'
        gas_limit = self.proxy.estimate_gas(checking_block,
                                            'registerSecretBatch', secrets)
        if gas_limit:
            error_prefix = 'Call to registerSecretBatch failed'
            try:
                gas_limit = safe_gas_limit(
                    gas_limit,
                    len(secrets) * GAS_REQUIRED_PER_SECRET_IN_BATCH,
                )
                transaction_hash = self.proxy.transact('registerSecretBatch',
                                                       gas_limit, secrets)
                self.client.poll(transaction_hash)
                receipt_or_none = check_transaction_threw(
                    self.client, transaction_hash)
            except Exception as e:
                secret_registry_transaction.set_exception(e)
                msg = 'Unexpected exception at sending registerSecretBatch transaction'
            else:
                secret_registry_transaction.set(transaction_hash)
            finally:
                for secret in secrets_to_register:
                    self.open_secret_transactions.pop(secret, None)

        transaction_executed = gas_limit is not None
        if not transaction_executed or receipt_or_none:
            if transaction_executed:
                block = receipt_or_none['blockNumber']
            else:
                block = checking_block

            self.proxy.jsonrpc_client.check_for_insufficient_eth(
                transaction_name='registerSecretBatch',
                transaction_executed=transaction_executed,
                required_gas=len(secrets) * GAS_REQUIRED_PER_SECRET_IN_BATCH,
                block_identifier=block,
            )
            error_msg = f'{error_prefix}. {msg}'
            log.critical(error_msg, **log_details)
            raise RaidenUnrecoverableError(error_msg)

        log.info('registerSecretBatch successful', **log_details)
Beispiel #39
0
class AceClient(object):

    def __init__(self, host, port, connect_timeout=5, result_timeout=10):
        # Receive buffer
        self._recvbuffer = None
        # Stream URL
        self._url = None
        # Ace stream socket
        self._socket = None
        # Result timeout
        self._resulttimeout = result_timeout
        # Shutting down flag
        self._shuttingDown = Event()
        # Product key
        self._product_key = None
        # Current STATUS
        self._status = None
        # Current STATE
        self._state = None
        # Current video position
        self._position = None
        # Available video position (loaded data)
        self._position_last = None
        # Buffered video pieces
        self._position_buf = None
        # Current AUTH
        self._auth = None
        self._gender = None
        self._age = None
        # Result (Created with AsyncResult() on call)
        self._result = AsyncResult()
        self._authevent = Event()
        # Result for getURL()
        self._urlresult = AsyncResult()
        # Result for GETCID()
        self._cidresult = AsyncResult()
        # Event for resuming from PAUSE
        self._resumeevent = Event()
        # Seekback seconds.
        self._seekback = 0
        # Did we get START command again? For seekback.
        self._started_again = False

        self._idleSince = time.time()
        self._lock = threading.Condition(threading.Lock())
        self._streamReaderConnection = None
        self._streamReaderState = None
        self._streamReaderQueue = deque()
        self._engine_version_code = 0;

        # Logger
        logger = logging.getLogger('AceClieimport tracebacknt_init')

        try:
            self._socket = telnetlib.Telnet(host, port, connect_timeout)
            logger.info("Successfully connected with Ace!")
        except Exception as e:
            raise AceException(
                "Socket creation error! Ace is not running? " + repr(e))

        # Spawning recvData greenlet
        gevent.spawn(self._recvData)
        gevent.sleep()

    def __del__(self):
        # Destructor just calls destroy() method
        self.destroy()

    def destroy(self):
        '''
        AceClient Destructor
        '''
        if self._shuttingDown.isSet():
        # Already in the middle of destroying
            return

        # Logger
        logger = logging.getLogger("AceClient_destroy")
        # We should resume video to prevent read greenlet deadlock
        self._resumeevent.set()
        # And to prevent getUrl deadlock
        self._urlresult.set()

        # Trying to disconnect
        try:
            logger.debug("Destroying client...")
            self._shuttingDown.set()
            self._write(AceMessage.request.SHUTDOWN)
        except:
            # Ignore exceptions on destroy
            pass
        finally:
            self._shuttingDown.set()

    def reset(self):
        self._started_again = False
        self._idleSince = time.time()
        self._streamReaderState = None

    def _write(self, message):
        try:
            logger = logging.getLogger("AceClient_write")
            logger.debug(message)
            self._socket.write(message + "\r\n")
        except EOFError as e:
            raise AceException("Write error! " + repr(e))

    def aceInit(self, gender=AceConst.SEX_MALE, age=AceConst.AGE_18_24, product_key=None, pause_delay=0, seekback=0):
        self._product_key = product_key
        self._gender = gender
        self._age = age
        # PAUSE/RESUME delay
        self._pausedelay = pause_delay
        # Seekback seconds
        self._seekback = seekback

        # Logger
        logger = logging.getLogger("AceClient_aceInit")

        # Sending HELLO
        self._write(AceMessage.request.HELLO)
        if not self._authevent.wait(self._resulttimeout):
            errmsg = "Authentication timeout. Wrong key?"
            logger.error(errmsg)
            raise AceException(errmsg)
            return

        if not self._auth:
            errmsg = "Authentication error. Wrong key?"
            logger.error(errmsg)
            raise AceException(errmsg)
            return

        logger.debug("aceInit ended")

    def _getResult(self):
        # Logger
        try:
            result = self._result.get(timeout=self._resulttimeout)
            if not result:
                raise AceException("Result not received")
        except gevent.Timeout:
            raise AceException("Timeout")

        return result

    def START(self, datatype, value):
        '''
        Start video method
        '''
        stream_type = 'output_format=http' if self._engine_version_code >= 3010500 and not AceConfig.vlcuse else ''
        self._urlresult = AsyncResult()
        self._write(AceMessage.request.START(datatype.upper(), value, stream_type))
        self._getResult()

    def STOP(self):
        '''
        Stop video method
        '''
        if self._state is not None and self._state != '0':
            self._result = AsyncResult()
            self._write(AceMessage.request.STOP)
            self._getResult()

    def LOADASYNC(self, datatype, url):
        self._result = AsyncResult()
        self._write(AceMessage.request.LOADASYNC(datatype.upper(), 0, {'url': url}))
        return self._getResult()

    def GETCID(self, datatype, url):
        contentinfo = self.LOADASYNC(datatype, url)
        self._cidresult = AsyncResult()
        self._write(AceMessage.request.GETCID(contentinfo.get('checksum'), contentinfo.get('infohash'), 0, 0, 0))
        cid = self._cidresult.get(True, 5)
        return '' if not cid or cid == '' else cid[2:]

    def GETCONTENTINFO(self, datatype, url):
        contentinfo = self.LOADASYNC(datatype, url)
        return contentinfo

    def getUrl(self, timeout=40):
        # Logger
        logger = logging.getLogger("AceClient_getURL")

        try:
            res = self._urlresult.get(timeout=timeout)
            return res
        except gevent.Timeout:
            errmsg = "getURL timeout!"
            logger.error(errmsg)
            raise AceException(errmsg)

    def startStreamReader(self, url, cid, counter):
        logger = logging.getLogger("StreamReader")
        self._streamReaderState = 1
        logger.debug("Opening video stream: %s" % url)

        try:
            connection = self._streamReaderConnection = urllib2.urlopen(url)

            if url.endswith('.m3u8'):
                logger.debug("Can't stream HLS in non VLC mode: %s" % url)
                return

            if connection.getcode() != 200:
                logger.error("Failed to open video stream %s" % connection)
                return

            with self._lock:
                self._streamReaderState = 2
                self._lock.notifyAll()

            while True:
                data = None
                clients = counter.getClients(cid)

                try:
                    data = connection.read(AceConfig.readchunksize)
                except:
                    break;

                if data and clients:
                    with self._lock:
                        if len(self._streamReaderQueue) == AceConfig.readcachesize:
                            self._streamReaderQueue.popleft()
                        self._streamReaderQueue.append(data)

                    for c in clients:
                        try:
                            c.addChunk(data, 5.0)
                        except Queue.Full:
                            if len(clients) > 1:
                                logger.debug("Disconnecting client: %s" % str(c))
                                c.destroy()
                elif not clients:
                    logger.debug("All clients disconnected - closing video stream")
                    break
                else:
                    logger.warning("No data received")
                    break
        except urllib2.URLError:
            logger.error("Failed to open video stream")
            logger.error(traceback.format_exc())
        except:
            logger.error(traceback.format_exc())
            if counter.getClients(cid):
                logger.error("Failed to read video stream")
        finally:
            self.closeStreamReader()
            with self._lock:
                self._streamReaderState = 3
                self._lock.notifyAll()
            counter.deleteAll(cid)

    def closeStreamReader(self):
        logger = logging.getLogger("StreamReader")
        c = self._streamReaderConnection

        if c:
            self._streamReaderConnection = None
            c.close()
            logger.debug("Video stream closed")

        self._streamReaderQueue.clear()

    def getPlayEvent(self, timeout=None):
        '''
        Blocking while in PAUSE, non-blocking while in RESUME
        '''
        return self._resumeevent.wait(timeout=timeout)

    def pause(self):
        self._write(AceMessage.request.PAUSE)

    def play(self):
        self._write(AceMessage.request.PLAY)

    def _recvData(self):
        '''
        Data receiver method for greenlet
        '''
        logger = logging.getLogger('AceClient_recvdata')

        while True:
            gevent.sleep()
            try:
                self._recvbuffer = self._socket.read_until("\r\n")
                self._recvbuffer = self._recvbuffer.strip()
                # logger.debug('<<< ' + self._recvbuffer)
            except:
                # If something happened during read, abandon reader.
                if not self._shuttingDown.isSet():
                    logger.error("Exception at socket read")
                    self._shuttingDown.set()
                return

            if self._recvbuffer:
                # Parsing everything only if the string is not empty
                if self._recvbuffer.startswith(AceMessage.response.HELLO):
                    # Parse HELLO
                    if 'version_code=' in self._recvbuffer:
                        v = self._recvbuffer.find('version_code=')
                        self._engine_version_code = int(self._recvbuffer[v + 13:v + 20])

                    if 'key=' in self._recvbuffer:
                        self._request_key_begin = self._recvbuffer.find('key=')
                        self._request_key = \
                            self._recvbuffer[self._request_key_begin + 4:self._request_key_begin + 14]
                        try:
                            self._write(AceMessage.request.READY_key(
                                self._request_key, self._product_key))
                        except urllib2.URLError as e:
                            logger.error("Can't connect to keygen server! " + \
                                repr(e))
                            self._auth = False
                            self._authevent.set()
                        self._request_key = None
                    else:
                        self._write(AceMessage.request.READY_nokey)

                elif self._recvbuffer.startswith(AceMessage.response.NOTREADY):
                    # NOTREADY
                    logger.error("Ace is not ready. Wrong auth?")
                    self._auth = False
                    self._authevent.set()

                elif self._recvbuffer.startswith(AceMessage.response.LOADRESP):
                    # LOADRESP
                    _contentinfo_raw = self._recvbuffer.split()[2:]
                    _contentinfo_raw = ' '.join(_contentinfo_raw)
                    _contentinfo = json.loads(_contentinfo_raw)
                    if _contentinfo.get('status') == 100:
                        logger.error("LOADASYNC returned error with message: %s"
                            % _contentinfo.get('message'))
                        self._result.set(False)
                    else:
                        logger.debug("Content info: %s", _contentinfo)
                        self._result.set(_contentinfo)

                elif self._recvbuffer.startswith(AceMessage.response.START):
                    # START
                    if not self._seekback or self._started_again or not self._recvbuffer.endswith(' stream=1'):
                        # If seekback is disabled, we use link in first START command.
                        # If seekback is enabled, we wait for first START command and
                        # ignore it, then do seeback in first EVENT position command
                        # AceStream sends us STOP and START again with new link.
                        # We use only second link then.
                        try:
                            self._url = self._recvbuffer.split()[1]
                            self._urlresult.set(self._url)
                            self._resumeevent.set()
                        except IndexError as e:
                            self._url = None
                    else:
                        logger.debug("START received. Waiting for %s." % AceMessage.response.LIVEPOS)

                elif self._recvbuffer.startswith(AceMessage.response.STOP):
                    pass

                elif self._recvbuffer.startswith(AceMessage.response.SHUTDOWN):
                    logger.debug("Got SHUTDOWN from engine")
                    self._socket.close()
                    return

                elif self._recvbuffer.startswith(AceMessage.response.AUTH):
                    try:
                        self._auth = self._recvbuffer.split()[1]
                        # Send USERDATA here
                        self._write(
                            AceMessage.request.USERDATA(self._gender, self._age))
                    except:
                        pass
                    self._authevent.set()

                elif self._recvbuffer.startswith(AceMessage.response.GETUSERDATA):
                    raise AceException("You should init me first!")

                elif self._recvbuffer.startswith(AceMessage.response.LIVEPOS):
                    self._position = self._recvbuffer.split()
                    self._position_last = self._position[2].split('=')[1]
                    self._position_buf = self._position[9].split('=')[1]
                    self._position = self._position[4].split('=')[1]

                    if self._seekback and not self._started_again:
                        self._write(AceMessage.request.SEEK(str(int(self._position_last) - \
                            self._seekback)))
                        logger.debug('Seeking back')
                        self._started_again = True

                elif self._recvbuffer.startswith(AceMessage.response.STATE):
                    self._state = self._recvbuffer.split()[1]

                elif self._recvbuffer.startswith(AceMessage.response.STATUS):
                    self._tempstatus = self._recvbuffer.split()[1].split(';')[0]
                    if self._tempstatus != self._status:
                        self._status = self._tempstatus
                        logger.debug("STATUS changed to " + self._status)

                    if self._status == 'main:err':
                        logger.error(
                            self._status + ' with message ' + self._recvbuffer.split(';')[2])
                        self._result.set_exception(
                            AceException(self._status + ' with message ' + self._recvbuffer.split(';')[2]))
                        self._urlresult.set_exception(
                            AceException(self._status + ' with message ' + self._recvbuffer.split(';')[2]))
                    elif self._status == 'main:starting':
                        self._result.set(True)
                    elif self._status == 'main:idle':
                        self._result.set(True)

                elif self._recvbuffer.startswith(AceMessage.response.PAUSE):
                    logger.debug("PAUSE event")
                    self._resumeevent.clear()

                elif self._recvbuffer.startswith(AceMessage.response.RESUME):
                    logger.debug("RESUME event")
                    gevent.sleep(self._pausedelay)
                    self._resumeevent.set()

                elif self._recvbuffer.startswith('##') or len(self._recvbuffer) == 0:
                    self._cidresult.set(self._recvbuffer)
                    logger.debug("CID: %s" % self._recvbuffer)


    def sendHeadersPT(self, client, code, headers):
        client.handler.send_response(code)
        for key, value in headers.items():
            client.handler.send_header(key, value)
        client.handler.end_headers()

    def openStreamReaderPT(self, url, req_headers):
        logger = logging.getLogger("openStreamReaderPT")
        logger.debug("Opening video stream: %s" % url)
        logger.debug("headers: %s" % req_headers)

        if url.endswith('.m3u8'):
            logger.debug("Can't stream HLS in non VLC mode: %s" % url)
            return None, None, None

        request = urllib2.Request(url, headers=req_headers)
        connection = self._streamReaderConnection = urllib2.urlopen(request, timeout=120)
        code = connection.getcode()

        if code not in (200, 206):
            logger.error("Failed to open video stream %s" % connection)
            return None, None, None

        FORWARD_HEADERS = ['Content-Range',
                           'Connection',
                           'Keep-Alive',
                           'Content-Type',
                           'Accept-Ranges',
                           'X-Content-Duration',
                           'Content-Length',
                           ]
        SKIP_HEADERS = ['Server', 'Date']
        response_headers = {}
        for k in connection.info().headers:
            if k.split(':')[0] not in (FORWARD_HEADERS + SKIP_HEADERS):
                logger.debug('NEW HEADERS: %s' % k.split(':')[0])
        for h in FORWARD_HEADERS:
            if connection.info().getheader(h):
                response_headers[h] = connection.info().getheader(h)
                # self.connection.send_header(h, connection.info().getheader(h))
                logger.debug('key=%s value=%s' % (h, connection.info().getheader(h)))

        with self._lock:
            self._streamReaderState = 2
            self._lock.notifyAll()

        return connection, code, response_headers

    def startStreamReaderPT(self, url, cid, counter, req_headers=None):
        logger = logging.getLogger("StreamReaderPT")
        self._streamReaderState = 1
        # current_req_headers = req_headers

        try:
            while True:
                data = None
                clients = counter.getClientsPT(cid)

                if not req_headers == self.req_headers:
                    self.req_headers = req_headers
                    connection, code, resp_headers = self.openStreamReaderPT(url, req_headers)
                    if not connection:
                        return

                    for c in clients:
                        try:
                            c.headers_sent
                        except:
                            self.sendHeadersPT(c, code, resp_headers)
                            c.headers_sent = True

                # logger.debug("i")
                try:
                    data = connection.read(AceConfig.readchunksize)
                except:
                    break;
                # logger.debug("d: %s c:%s" % (data, clients))
                if data and clients:
                    with self._lock:
                        if len(self._streamReaderQueue) == AceConfig.readcachesize:
                            self._streamReaderQueue.popleft()
                        self._streamReaderQueue.append(data)

                    for c in clients:
                        try:
                            c.addChunk(data, 5.0)
                        except Queue.Full:
                            if len(clients) > 1:
                                logger.debug("Disconnecting client: %s" % str(c))
                                c.destroy()
                elif not clients:
                    logger.debug("All clients disconnected - closing video stream")
                    break
                else:
                    logger.warning("No data received")
                    break
        except urllib2.URLError:
            logger.error("Failed to open video stream")
            logger.error(traceback.format_exc())
        except:
            logger.error(traceback.format_exc())
            if counter.getClientsPT(cid):
                logger.error("Failed to read video stream")
        finally:
            self.closeStreamReader()
            with self._lock:
                self._streamReaderState = 3
                self._lock.notifyAll()
            counter.deleteAll(cid)
Beispiel #40
0
class AioFile(object):

    _keep_awake_thread = None
    _keep_awake_refs = 0

    chunk_size = (16 << 10)

    def __init__(self, path, tmp_dir=None):
        self.path = path
        self.tmp_dir = tmp_dir
        self.event = AsyncResult()

    @classmethod
    def _start_keep_awake_thread(cls):
        if not cls._keep_awake_thread:
            cls._keep_awake_thread = gevent.spawn(cls._keep_awake)
        cls._keep_awake_refs += 1

    @classmethod
    def _stop_keep_awake_thread(cls):
        cls._keep_awake_refs -= 1
        if cls._keep_awake_refs <= 0:
            cls._keep_awake_thread.kill()

    @classmethod
    def _keep_awake(cls):
        while True:
            gevent.sleep(0.001)

    def _write_callback(self, ret, errno):
        if ret > 0:
            self.event.set(ret)
        else:
            exc = IOError(errno, os.strerror(errno))
            self.event.set_exception(exc)

    def _write_piece(self, fd, data, data_len, offset):
        remaining = data_len - offset
        if remaining > self.chunk_size:
            remaining = self.chunk_size
        piece = data[offset:offset+remaining]
        aio_write(fd, piece, offset, self._write_callback)
        return self.event.get()

    def dump(self, data):
        data_view = memoryview(data)
        data_len = len(data)
        offset = 0
        self._start_keep_awake_thread()
        fd, filename = mkstemp(dir=self.tmp_dir)
        try:
            while True:
                ret = self._write_piece(fd, data_view, data_len, offset)
                offset += ret
                if offset >= data_len:
                    break
            os.rename(filename, self.path)
        finally:
            os.close(fd)
            self._stop_keep_awake_thread()

    def pickle_dump(self, obj):
        return self.dump(cPickle.dumps(obj, cPickle.HIGHEST_PROTOCOL))

    def _read_callback(self, buf, ret, errno):
        if ret > 0:
            self.event.set(buf)
        elif ret == 0:
            exc = EOFError()
            self.event.set_exception(exc)
        else:
            exc = IOError(errno, os.strerror(errno))
            self.event.set_exception(exc)

    def _read_piece(self, fd, offset):
        aio_read(fd, offset, self.chunk_size, self._read_callback)
        return self.event.get()

    def load(self):
        data = bytearray()
        offset = 0
        self._start_keep_awake_thread()
        fd = os.open(self.path, os.O_RDONLY)
        try:
            while True:
                buf = self._read_piece(fd, offset)
                offset += len(buf)
                data.extend(buf)
        except EOFError:
            return str(data)
        finally:
            os.close(fd)
            self._stop_keep_awake_thread()

    def pickle_load(self):
        return cPickle.loads(self.load())
Beispiel #41
0
class TaskExecutor(object):
    def __init__(self, balancer, index):
        self.balancer = balancer
        self.index = index
        self.task = None
        self.proc = None
        self.pid = None
        self.conn = None
        self.state = WorkerState.STARTING
        self.key = str(uuid.uuid4())
        self.result = AsyncResult()
        self.exiting = False
        self.killed = False
        self.thread = gevent.spawn(self.executor)
        self.cv = Condition()
        self.status_lock = RLock()

    def checkin(self, conn):
        with self.cv:
            self.balancer.logger.debug("Check-in of worker #{0} (key {1})".format(self.index, self.key))
            self.conn = conn
            self.state = WorkerState.IDLE
            self.cv.notify_all()

    def put_progress(self, progress):
        st = TaskStatus(None)
        st.__setstate__(progress)
        self.task.set_state(progress=st)

    def put_status(self, status):
        with self.cv:
            # Try to collect rusage at this point, when process is still alive
            try:
                kinfo = self.balancer.dispatcher.threaded(bsd.kinfo_getproc, self.pid)
                self.task.rusage = kinfo.rusage
            except LookupError:
                pass

            if status["status"] == "ROLLBACK":
                self.task.set_state(TaskState.ROLLBACK)

            if status["status"] == "FINISHED":
                self.result.set(status["result"])

            if status["status"] == "FAILED":
                error = status["error"]

                if error["type"] in ERROR_TYPES:
                    cls = ERROR_TYPES[error["type"]]
                    exc = cls(
                        code=error["code"],
                        message=error["message"],
                        stacktrace=error["stacktrace"],
                        extra=error.get("extra"),
                    )
                else:
                    exc = OtherException(
                        code=error["code"],
                        message=error["message"],
                        stacktrace=error["stacktrace"],
                        type=error["type"],
                        extra=error.get("extra"),
                    )

                self.result.set_exception(exc)

    def put_warning(self, warning):
        self.task.add_warning(warning)

    def run(self, task):
        def match_file(module, f):
            name, ext = os.path.splitext(f)
            return module == name and ext in [".py", ".pyc", ".so"]

        with self.cv:
            self.cv.wait_for(lambda: self.state == WorkerState.ASSIGNED)
            self.result = AsyncResult()
            self.task = task
            self.task.set_state(TaskState.EXECUTING)
            self.state = WorkerState.EXECUTING
            self.cv.notify_all()

        self.balancer.logger.debug("Actually starting task {0}".format(task.id))

        filename = None
        module_name = inspect.getmodule(task.clazz).__name__
        for dir in self.balancer.dispatcher.plugin_dirs:
            found = False
            try:
                for root, _, files in os.walk(dir):
                    file = first_or_default(lambda f: match_file(module_name, f), files)
                    if file:
                        filename = os.path.join(root, file)
                        found = True
                        break

                if found:
                    break
            except OSError:
                continue

        try:
            self.conn.call_sync(
                "taskproxy.run",
                {
                    "id": task.id,
                    "user": task.user,
                    "class": task.clazz.__name__,
                    "filename": filename,
                    "args": task.args,
                    "debugger": task.debugger,
                    "environment": task.environment,
                    "hooks": task.hooks,
                },
            )
        except RpcException as e:
            self.balancer.logger.warning(
                "Cannot start task {0} on executor #{1}: {2}".format(task.id, self.index, str(e))
            )

            self.balancer.logger.warning(
                "Killing unresponsive task executor #{0} (pid {1})".format(self.index, self.proc.pid)
            )

            self.terminate()

        try:
            self.result.get()
        except BaseException as e:
            if isinstance(e, OtherException):
                self.balancer.dispatcher.report_error("Task {0} raised invalid exception".format(self.task.name), e)

            if isinstance(e, TaskAbortException):
                self.task.set_state(TaskState.ABORTED, TaskStatus(0, "aborted"))
            else:
                self.task.error = serialize_error(e)
                self.task.set_state(
                    TaskState.FAILED, TaskStatus(0, str(e), extra={"stacktrace": traceback.format_exc()})
                )

            with self.cv:
                self.task.ended.set()

                if self.state == WorkerState.EXECUTING:
                    self.state = WorkerState.IDLE
                    self.cv.notify_all()

            self.balancer.task_exited(self.task)
            return

        with self.cv:
            self.task.result = self.result.value
            self.task.set_state(TaskState.FINISHED, TaskStatus(100, ""))
            self.task.ended.set()
            if self.state == WorkerState.EXECUTING:
                self.state = WorkerState.IDLE
                self.cv.notify_all()

        self.balancer.task_exited(self.task)

    def abort(self):
        self.balancer.logger.info("Trying to abort task #{0}".format(self.task.id))
        # Try to abort via RPC. If this fails, kill process
        try:
            # If task supports abort protocol we don't need to worry about subtasks - it's task
            # responsibility to kill them
            self.conn.call_sync("taskproxy.abort")
        except RpcException as err:
            self.balancer.logger.warning("Failed to abort task #{0} gracefully: {1}".format(self.task.id, str(err)))
            self.balancer.logger.warning("Killing process {0}".format(self.pid))
            self.killed = True
            self.terminate()

            # Now kill all the subtasks
            for subtask in filter(lambda t: t.parent is self.task, self.balancer.task_list):
                self.balancer.logger.warning(
                    "Aborting subtask {0} because parent task {1} died".format(subtask.id, self.task.id)
                )
                self.balancer.abort(subtask.id)

    def terminate(self):
        try:
            self.proc.terminate()
        except OSError:
            self.balancer.logger.warning("Executor process with PID {0} already dead".format(self.proc.pid))

    def executor(self):
        while not self.exiting:
            try:
                self.proc = Popen(
                    [TASKWORKER_PATH, self.key],
                    close_fds=True,
                    preexec_fn=os.setpgrp,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.STDOUT,
                )

                self.pid = self.proc.pid
                self.balancer.logger.debug("Started executor #{0} as PID {1}".format(self.index, self.pid))
            except OSError:
                self.result.set_exception(TaskException(errno.EFAULT, "Cannot spawn task executor"))
                self.balancer.logger.error("Cannot spawn task executor #{0}".format(self.index))
                return

            for line in self.proc.stdout:
                line = line.decode("utf8")
                self.balancer.logger.debug("Executor #{0}: {1}".format(self.index, line.strip()))
                if self.task:
                    self.task.output += line

            self.proc.wait()

            with self.cv:
                self.state = WorkerState.STARTING
                self.cv.notify_all()

            if self.proc.returncode == -signal.SIGTERM:
                self.balancer.logger.info(
                    "Executor process with PID {0} was terminated gracefully".format(self.proc.pid)
                )
            else:
                self.balancer.logger.error(
                    "Executor process with PID {0} died abruptly with exit code {1}".format(
                        self.proc.pid, self.proc.returncode
                    )
                )

            if self.killed:
                self.result.set_exception(TaskException(errno.EFAULT, "Task killed"))
            else:
                self.result.set_exception(TaskException(errno.EFAULT, "Task executor died"))
            gevent.sleep(1)

    def die(self):
        self.exiting = True
        if self.proc:
            self.terminate()
Beispiel #42
0
    def _register_secret_batch(
        self,
        secrets_to_register: List[Secret],
        transaction_result: AsyncResult,
        log_details: Dict[str, Any],
    ) -> None:

        estimated_transaction = self.client.estimate_gas(
            self.proxy, "registerSecretBatch", log_details, secrets_to_register
        )
        msg = None
        transaction_mined = None

        if estimated_transaction is not None:
            gas_limit = safe_gas_limit(
                GAS_REQUIRED_REGISTER_SECRET_BATCH_BASE
                + len(secrets_to_register) * GAS_REQUIRED_PER_SECRET_IN_BATCH,
            )
            assert estimated_transaction.estimated_gas <= gas_limit, (
                f"Our safe gas calculation must be larger than the gas cost estimated by the "
                f"ethereum node, but {estimated_transaction.estimated_gas} > {gas_limit}."
            )
            estimated_transaction.estimated_gas = gas_limit

            try:
                transaction_sent = self.client.transact(estimated_transaction)
                transaction_mined = self.client.poll_transaction(transaction_sent)
            except Exception as e:  # pylint: disable=broad-except
                msg = f"Unexpected exception {e} at sending registerSecretBatch transaction."

        # Clear `open_secret_transactions` regardless of the transaction being
        # successfully executed or not.
        with self._open_secret_transactions_lock:
            for secret in secrets_to_register:
                self.open_secret_transactions.pop(secret)

        # As of version `0.4.0` of the contract has *no* asserts or requires.
        # Therefore the only reason for the transaction to fail is if there is
        # a bug.
        unrecoverable_error = transaction_mined is None or not was_transaction_successfully_mined(
            transaction_mined
        )

        exception: Union[RaidenRecoverableError, RaidenUnrecoverableError]
        if unrecoverable_error:
            # If the transaction was sent it must not fail. If this happened
            # some of our assumptions is broken therefore the error is
            # unrecoverable
            if transaction_mined is not None:
                receipt = transaction_mined.receipt

                if receipt["gasUsed"] == transaction_mined.startgas:
                    # The transaction failed and all gas was used. This can
                    # happen because of:
                    #
                    # - A compiler bug if an invalid opcode was executed.
                    # - A configuration bug if an assert was executed,
                    # because version 0.4.0 of the secret registry does not have an
                    # assert.
                    # - An ethereum client bug if the gas_limit was
                    # underestimated.
                    #
                    # Safety cannot be guaranteed under any of these cases,
                    # this error is unrecoverable.
                    error = (
                        "Secret registration failed because of a bug in either "
                        "the solidity compiler, the running ethereum client, or "
                        "a configuration error in Raiden."
                    )
                else:
                    # The transaction failed and *not* all gas was used. This
                    # can happen because of:
                    #
                    # - A compiler bug if a revert was introduced.
                    # - A configuration bug, because for 0.4.0 the secret
                    # registry does not have a revert.
                    error = (
                        "Secret registration failed because of a configuration "
                        "bug or compiler bug. Please double check the secret "
                        "smart contract is at version 0.4.0, if it is then a "
                        "compiler bug was hit."
                    )

                exception = RaidenUnrecoverableError(error)
                transaction_result.set_exception(exception)
                raise exception

            # If gas_limit is set and there is no receipt then an exception was
            # raised while sending the transaction. This should only happen if
            # the account is being used concurrently, which is not supported.
            # This can happen because:
            #
            # - The nonce of the transaction was already used
            # - The nonce was reused *and* the account didn't have enough ether
            # to pay for the gas
            #
            # Safety cannot be guaranteed under any of these cases, this error
            # is unrecoverable. *Note*: This assumes the ethereum client
            # takes into account the current transactions in the pool.
            if estimated_transaction is not None:
                assert msg, "Unexpected control flow, an exception should have been raised."
                error = (
                    f"Sending the transaction for registerSecretBatch "
                    f"failed with: `{msg}`.  This happens if the same ethereum "
                    f"account is being used by more than one program which is not "
                    f"supported."
                )

                exception = RaidenUnrecoverableError(error)
                transaction_result.set_exception(exception)
                raise exception

            # gas_limit can fail because:
            #
            # - The Ethereum client detected the transaction could not
            # successfully execute, this happens if an assert/revert is hit.
            # - The account is lacking funds to pay for the gas.
            #
            # Either of these is a bug. The contract does not use
            # assert/revert, and the account should always be funded
            self.client.check_for_insufficient_eth(
                transaction_name="registerSecretBatch",
                transaction_executed=True,
                required_gas=GAS_REQUIRED_PER_SECRET_IN_BATCH * len(secrets_to_register),
                block_identifier=self.client.get_checking_block(),
            )
            error = "Call to registerSecretBatch couldn't be done"

            exception = RaidenRecoverableError(error)
            transaction_result.set_exception(exception)
            raise exception

        # The local **MUST** transaction_result be set before waiting for the
        # other results, otherwise we have a dead-lock
        assert transaction_mined is not None, MYPY_ANNOTATION
        transaction_result.set(transaction_mined.transaction_hash)
class Bucket(AsyncBucket):
    def __init__(self, *args, **kwargs):
        """
        This class is a 'GEvent'-optimized subclass of libcouchbase
        which utilizes the underlying IOPS structures and the gevent
        event primitives to efficiently utilize couroutine switching.
        """
        super(Bucket, self).__init__(IOPS(), *args, **kwargs)

    def _do_ctor_connect(self):
        if self.connected:
            return

        self._connect()
        self._evconn = AsyncResult()
        self._conncb = self._on_connected
        self._evconn.get()
        self._evconn = None

    def _on_connected(self, err):
        if err:
            self._evconn.set_exception(err)
        else:
            self._evconn.set(None)

    def _waitwrap(self, cbasync):
        cur_thread = getcurrent()
        errback = lambda r, x, y, z: cur_thread.throw(x, y, z)
        cbasync.set_callbacks(cur_thread.switch, errback)
        try:
            return get_hub().switch()
        finally:
            # Deregister callbacks to prevent another request on the same
            # greenlet to get the result from this context.
            cbasync.set_callbacks(dummy_callback, dummy_callback)

    def _meth_factory(meth, name):
        def ret(self, *args, **kwargs):
            return self._waitwrap(meth(self, *args, **kwargs))
        return ret

    def _http_request(self, **kwargs):
        res = super(Bucket, self)._http_request(**kwargs)

        w = Waiter()
        res.callback = lambda x: w.switch(x)
        res.errback = lambda x, c, o, b: w.throw(c, o, b)
        return w.get()

    def query(self, *args, **kwargs):
        kwargs['itercls'] = GView
        return super(Bucket, self).query(*args, **kwargs)

    def n1ql_query(self, query, *args, **kwargs):
        kwargs['itercls'] = GN1QLRequest
        return super(Bucket, self).n1ql_query(query, *args, **kwargs)

    def _get_close_future(self):
        ev = Event()
        def _dtor_cb(*args):
            ev.set()
        self._dtorcb = _dtor_cb
        return ev


    locals().update(AsyncBucket._gen_memd_wrappers(_meth_factory))
Beispiel #44
0
    def new_netting_channel(
        self,
        partner: typing.Address,
        settle_timeout: int,
    ) -> typing.ChannelID:
        """ Creates a new channel in the TokenNetwork contract.

        Args:
            partner: The peer to open the channel with.
            settle_timeout: The settle timout to use for this channel.

        Returns:
            The ChannelID of the new netting channel.
        """
        if not is_binary_address(partner):
            raise InvalidAddress(
                'Expected binary address format for channel partner')

        invalid_timeout = (settle_timeout < self.settlement_timeout_min()
                           or settle_timeout > self.settlement_timeout_max())
        if invalid_timeout:
            raise InvalidSettleTimeout(
                'settle_timeout must be in range [{}, {}], is {}'.format(
                    self.settlement_timeout_min(),
                    self.settlement_timeout_max(),
                    settle_timeout,
                ))

        if self.node_address == partner:
            raise SamePeerAddress(
                'The other peer must not have the same address as the client.')

        log_details = {
            'peer1': pex(self.node_address),
            'peer2': pex(partner),
        }
        log.debug('new_netting_channel called', **log_details)

        # Prevent concurrent attempts to open a channel with the same token and
        # partner address.
        if partner not in self.open_channel_transactions:
            new_open_channel_transaction = AsyncResult()
            self.open_channel_transactions[
                partner] = new_open_channel_transaction

            try:
                transaction_hash = self._new_netting_channel(
                    partner, settle_timeout)
            except Exception as e:
                log.critical('new_netting_channel failed', **log_details)
                new_open_channel_transaction.set_exception(e)
                raise
            else:
                new_open_channel_transaction.set(transaction_hash)
            finally:
                self.open_channel_transactions.pop(partner, None)
        else:
            # All other concurrent threads should block on the result of opening this channel
            self.open_channel_transactions[partner].get()

        channel_created = self.channel_exists_and_not_settled(
            self.node_address, partner)
        if channel_created is False:
            log.critical('new_netting_channel failed', **log_details)
            raise RaidenUnrecoverableError('creating new channel failed')

        channel_identifier = self.detail_channel(self.node_address,
                                                 partner).channel_identifier
        log_details['channel_identifier'] = channel_identifier
        log.info('new_netting_channel successful', **log_details)

        return channel_identifier
Beispiel #45
0
class _Socket(_original_Socket):
    """Green version of :class:`zmq.core.socket.Socket`

    The following methods are overridden:

        * send
        * recv

    To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or receiving
    is deferred to the hub if a ``zmq.EAGAIN`` (retry) error is raised.
    
    The `__state_changed` method is triggered when the zmq.FD for the socket is
    marked as readable and triggers the necessary read and write events (which
    are waited for in the recv and send methods).

    Some double underscore prefixes are used to minimize pollution of
    :class:`zmq.core.socket.Socket`'s namespace.
    """
    __in_send_multipart = False
    __in_recv_multipart = False
    __writable = None
    __readable = None
    _state_event = None
    _gevent_bug_timeout = 11.6 # timeout for not trusting gevent
    _debug_gevent = False # turn on if you think gevent is missing events
    _poller_class = _Poller
    
    def __init__(self, context, socket_type):
        _original_Socket.__init__(self, context, socket_type)
        self.__in_send_multipart = False
        self.__in_recv_multipart = False
        self.__setup_events()
        

    def __del__(self):
        self.close()

    def close(self, linger=None):
        super(_Socket, self).close(linger)
        self.__cleanup_events()

    def __cleanup_events(self):
        # close the _state_event event, keeps the number of active file descriptors down
        if getattr(self, '_state_event', None):
            _stop(self._state_event)
            self._state_event = None
        # if the socket has entered a close state resume any waiting greenlets
        self.__writable.set()
        self.__readable.set()

    def __setup_events(self):
        self.__readable = AsyncResult()
        self.__writable = AsyncResult()
        self.__readable.set()
        self.__writable.set()
        
        try:
            self._state_event = get_hub().loop.io(self.getsockopt(zmq.FD), 1) # read state watcher
            self._state_event.start(self.__state_changed)
        except AttributeError:
            # for gevent<1.0 compatibility
            from gevent.core import read_event
            self._state_event = read_event(self.getsockopt(zmq.FD), self.__state_changed, persist=True)

    def __state_changed(self, event=None, _evtype=None):
        if self.closed:
            self.__cleanup_events()
            return
        try:
            # avoid triggering __state_changed from inside __state_changed
            events = super(_Socket, self).getsockopt(zmq.EVENTS)
        except zmq.ZMQError as exc:
            self.__writable.set_exception(exc)
            self.__readable.set_exception(exc)
        else:
            if events & zmq.POLLOUT:
                self.__writable.set()
            if events & zmq.POLLIN:
                self.__readable.set()

    def _wait_write(self):
        assert self.__writable.ready(), "Only one greenlet can be waiting on this event"
        self.__writable = AsyncResult()
        # timeout is because libzmq cannot be trusted to properly signal a new send event:
        # this is effectively a maximum poll interval of 1s
        tic = time.time()
        dt = self._gevent_bug_timeout
        if dt:
            timeout = gevent.Timeout(seconds=dt)
        else:
            timeout = None
        try:
            if timeout:
                timeout.start()
            self.__writable.get(block=True)
        except gevent.Timeout as t:
            if t is not timeout:
                raise
            toc = time.time()
            # gevent bug: get can raise timeout even on clean return
            # don't display zmq bug warning for gevent bug (this is getting ridiculous)
            if self._debug_gevent and timeout and toc-tic > dt and \
                    self.getsockopt(zmq.EVENTS) & zmq.POLLOUT:
                print("BUG: gevent may have missed a libzmq send event on %i!" % self.FD, file=sys.stderr)
        finally:
            if timeout:
                timeout.cancel()
            self.__writable.set()

    def _wait_read(self):
        assert self.__readable.ready(), "Only one greenlet can be waiting on this event"
        self.__readable = AsyncResult()
        # timeout is because libzmq cannot always be trusted to play nice with libevent.
        # I can only confirm that this actually happens for send, but lets be symmetrical
        # with our dirty hacks.
        # this is effectively a maximum poll interval of 1s
        tic = time.time()
        dt = self._gevent_bug_timeout
        if dt:
            timeout = gevent.Timeout(seconds=dt)
        else:
            timeout = None
        try:
            if timeout:
                timeout.start()
            self.__readable.get(block=True)
        except gevent.Timeout as t:
            if t is not timeout:
                raise
            toc = time.time()
            # gevent bug: get can raise timeout even on clean return
            # don't display zmq bug warning for gevent bug (this is getting ridiculous)
            if self._debug_gevent and timeout and toc-tic > dt and \
                    self.getsockopt(zmq.EVENTS) & zmq.POLLIN:
                print("BUG: gevent may have missed a libzmq recv event on %i!" % self.FD, file=sys.stderr)
        finally:
            if timeout:
                timeout.cancel()
            self.__readable.set()

    def send(self, data, flags=0, copy=True, track=False):
        """send, which will only block current greenlet
        
        state_changed always fires exactly once (success or fail) at the
        end of this method.
        """
        
        # if we're given the NOBLOCK flag act as normal and let the EAGAIN get raised
        if flags & zmq.NOBLOCK:
            try:
                msg = super(_Socket, self).send(data, flags, copy, track)
            finally:
                if not self.__in_send_multipart:
                    self.__state_changed()
            return msg
        # ensure the zmq.NOBLOCK flag is part of flags
        flags |= zmq.NOBLOCK
        while True: # Attempt to complete this operation indefinitely, blocking the current greenlet
            try:
                # attempt the actual call
                msg = super(_Socket, self).send(data, flags, copy, track)
            except zmq.ZMQError as e:
                # if the raised ZMQError is not EAGAIN, reraise
                if e.errno != zmq.EAGAIN:
                    if not self.__in_send_multipart:
                        self.__state_changed()
                    raise
            else:
                if not self.__in_send_multipart:
                    self.__state_changed()
                return msg
            # defer to the event loop until we're notified the socket is writable
            self._wait_write()

    def recv(self, flags=0, copy=True, track=False):
        """recv, which will only block current greenlet
        
        state_changed always fires exactly once (success or fail) at the
        end of this method.
        """
        if flags & zmq.NOBLOCK:
            try:
                msg = super(_Socket, self).recv(flags, copy, track)
            finally:
                if not self.__in_recv_multipart:
                    self.__state_changed()
            return msg
        
        flags |= zmq.NOBLOCK
        while True:
            try:
                msg = super(_Socket, self).recv(flags, copy, track)
            except zmq.ZMQError as e:
                if e.errno != zmq.EAGAIN:
                    if not self.__in_recv_multipart:
                        self.__state_changed()
                    raise
            else:
                if not self.__in_recv_multipart:
                    self.__state_changed()
                return msg
            self._wait_read()
    
    def send_multipart(self, *args, **kwargs):
        """wrap send_multipart to prevent state_changed on each partial send"""
        self.__in_send_multipart = True
        try:
            msg = super(_Socket, self).send_multipart(*args, **kwargs)
        finally:
            self.__in_send_multipart = False
            self.__state_changed()
        return msg
    
    def recv_multipart(self, *args, **kwargs):
        """wrap recv_multipart to prevent state_changed on each partial recv"""
        self.__in_recv_multipart = True
        try:
            msg = super(_Socket, self).recv_multipart(*args, **kwargs)
        finally:
            self.__in_recv_multipart = False
            self.__state_changed()
        return msg
    
    def get(self, opt):
        """trigger state_changed on getsockopt(EVENTS)"""
        optval = super(_Socket, self).get(opt)
        if opt == zmq.EVENTS:
            self.__state_changed()
        return optval
Beispiel #46
0
class _Socket(_original_Socket):
    """Green version of :class:`zmq.Socket`

    The following methods are overridden:

        * send
        * recv

    To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or receiving
    is deferred to the hub if a ``zmq.EAGAIN`` (retry) error is raised.

    The `__state_changed` method is triggered when the zmq.FD for the socket is
    marked as readable and triggers the necessary read and write events (which
    are waited for in the recv and send methods).

    Some double underscore prefixes are used to minimize pollution of
    :class:`zmq.Socket`'s namespace.
    """

    __in_send_multipart = False
    __in_recv_multipart = False
    __writable = None
    __readable = None
    _state_event = None
    _gevent_bug_timeout = 11.6  # timeout for not trusting gevent
    _debug_gevent = False  # turn on if you think gevent is missing events
    _poller_class = _Poller
    _repr_cls = "zmq.green.Socket"

    def __init__(self, *a, **kw):
        super(_Socket, self).__init__(*a, **kw)
        self.__in_send_multipart = False
        self.__in_recv_multipart = False
        self.__setup_events()

    def __del__(self):
        self.close()

    def close(self, linger=None):
        super(_Socket, self).close(linger)
        self.__cleanup_events()

    def __cleanup_events(self):
        # close the _state_event event, keeps the number of active file descriptors down
        if getattr(self, '_state_event', None):
            _stop(self._state_event)
            self._state_event = None
        # if the socket has entered a close state resume any waiting greenlets
        self.__writable.set()
        self.__readable.set()

    def __setup_events(self):
        self.__readable = AsyncResult()
        self.__writable = AsyncResult()
        self.__readable.set()
        self.__writable.set()

        try:
            self._state_event = get_hub().loop.io(self.getsockopt(zmq.FD),
                                                  1)  # read state watcher
            self._state_event.start(self.__state_changed)
        except AttributeError:
            # for gevent<1.0 compatibility
            from gevent.core import read_event

            self._state_event = read_event(self.getsockopt(zmq.FD),
                                           self.__state_changed,
                                           persist=True)

    def __state_changed(self, event=None, _evtype=None):
        if self.closed:
            self.__cleanup_events()
            return
        try:
            # avoid triggering __state_changed from inside __state_changed
            events = super(_Socket, self).getsockopt(zmq.EVENTS)
        except zmq.ZMQError as exc:
            self.__writable.set_exception(exc)
            self.__readable.set_exception(exc)
        else:
            if events & zmq.POLLOUT:
                self.__writable.set()
            if events & zmq.POLLIN:
                self.__readable.set()

    def _wait_write(self):
        assert self.__writable.ready(
        ), "Only one greenlet can be waiting on this event"
        self.__writable = AsyncResult()
        # timeout is because libzmq cannot be trusted to properly signal a new send event:
        # this is effectively a maximum poll interval of 1s
        tic = time.time()
        dt = self._gevent_bug_timeout
        if dt:
            timeout = gevent.Timeout(seconds=dt)
        else:
            timeout = None
        try:
            if timeout:
                timeout.start()
            self.__writable.get(block=True)
        except gevent.Timeout as t:
            if t is not timeout:
                raise
            toc = time.time()
            # gevent bug: get can raise timeout even on clean return
            # don't display zmq bug warning for gevent bug (this is getting ridiculous)
            if (self._debug_gevent and timeout and toc - tic > dt
                    and self.getsockopt(zmq.EVENTS) & zmq.POLLOUT):
                print(
                    "BUG: gevent may have missed a libzmq send event on %i!" %
                    self.FD,
                    file=sys.stderr,
                )
        finally:
            if timeout:
                timeout.close()
            self.__writable.set()

    def _wait_read(self):
        assert self.__readable.ready(
        ), "Only one greenlet can be waiting on this event"
        self.__readable = AsyncResult()
        # timeout is because libzmq cannot always be trusted to play nice with libevent.
        # I can only confirm that this actually happens for send, but lets be symmetrical
        # with our dirty hacks.
        # this is effectively a maximum poll interval of 1s
        tic = time.time()
        dt = self._gevent_bug_timeout
        if dt:
            timeout = gevent.Timeout(seconds=dt)
        else:
            timeout = None
        try:
            if timeout:
                timeout.start()
            self.__readable.get(block=True)
        except gevent.Timeout as t:
            if t is not timeout:
                raise
            toc = time.time()
            # gevent bug: get can raise timeout even on clean return
            # don't display zmq bug warning for gevent bug (this is getting ridiculous)
            if (self._debug_gevent and timeout and toc - tic > dt
                    and self.getsockopt(zmq.EVENTS) & zmq.POLLIN):
                print(
                    "BUG: gevent may have missed a libzmq recv event on %i!" %
                    self.FD,
                    file=sys.stderr,
                )
        finally:
            if timeout:
                timeout.close()
            self.__readable.set()

    def send(self, data, flags=0, copy=True, track=False, **kwargs):
        """send, which will only block current greenlet

        state_changed always fires exactly once (success or fail) at the
        end of this method.
        """

        # if we're given the NOBLOCK flag act as normal and let the EAGAIN get raised
        if flags & zmq.NOBLOCK:
            try:
                msg = super(_Socket, self).send(data, flags, copy, track,
                                                **kwargs)
            finally:
                if not self.__in_send_multipart:
                    self.__state_changed()
            return msg
        # ensure the zmq.NOBLOCK flag is part of flags
        flags |= zmq.NOBLOCK
        while (
                True
        ):  # Attempt to complete this operation indefinitely, blocking the current greenlet
            try:
                # attempt the actual call
                msg = super(_Socket, self).send(data, flags, copy, track)
            except zmq.ZMQError as e:
                # if the raised ZMQError is not EAGAIN, reraise
                if e.errno != zmq.EAGAIN:
                    if not self.__in_send_multipart:
                        self.__state_changed()
                    raise
            else:
                if not self.__in_send_multipart:
                    self.__state_changed()
                return msg
            # defer to the event loop until we're notified the socket is writable
            self._wait_write()

    def recv(self, flags=0, copy=True, track=False):
        """recv, which will only block current greenlet

        state_changed always fires exactly once (success or fail) at the
        end of this method.
        """
        if flags & zmq.NOBLOCK:
            try:
                msg = super(_Socket, self).recv(flags, copy, track)
            finally:
                if not self.__in_recv_multipart:
                    self.__state_changed()
            return msg

        flags |= zmq.NOBLOCK
        while True:
            try:
                msg = super(_Socket, self).recv(flags, copy, track)
            except zmq.ZMQError as e:
                if e.errno != zmq.EAGAIN:
                    if not self.__in_recv_multipart:
                        self.__state_changed()
                    raise
            else:
                if not self.__in_recv_multipart:
                    self.__state_changed()
                return msg
            self._wait_read()

    def send_multipart(self, *args, **kwargs):
        """wrap send_multipart to prevent state_changed on each partial send"""
        self.__in_send_multipart = True
        try:
            msg = super(_Socket, self).send_multipart(*args, **kwargs)
        finally:
            self.__in_send_multipart = False
            self.__state_changed()
        return msg

    def recv_multipart(self, *args, **kwargs):
        """wrap recv_multipart to prevent state_changed on each partial recv"""
        self.__in_recv_multipart = True
        try:
            msg = super(_Socket, self).recv_multipart(*args, **kwargs)
        finally:
            self.__in_recv_multipart = False
            self.__state_changed()
        return msg

    def get(self, opt):
        """trigger state_changed on getsockopt(EVENTS)"""
        if opt in TIMEOS:
            warnings.warn("TIMEO socket options have no effect in zmq.green",
                          UserWarning)
        optval = super(_Socket, self).get(opt)
        if opt == zmq.EVENTS:
            self.__state_changed()
        return optval

    def set(self, opt, val):
        """set socket option"""
        if opt in TIMEOS:
            warnings.warn("TIMEO socket options have no effect in zmq.green",
                          UserWarning)
        return super(_Socket, self).set(opt, val)