class Worker(object):
    '''
    子进程运行的代码,通过起一个协程来和主进程通信
    包括接受任务分配请求,退出信号(零字节包),及反馈任务执行进度
    然后主协程等待停止信号并中止进程(stop_event用于协程间同步)。
    '''
    def __init__(self, address):
        self.address = address
        self.stop_event = Event()

        self.wp = open('fetched_%s.txt' % os.getpid(), 'w')
        self.uf_url_p = open('unfetched_%s.txt' % os.getpid(), 'w')

        gevent.spawn(self.communicate)
        self.stop_event.wait()
        print 'worker(%s):will stop' % os.getpid()

    def exec_task(self, url):
        try:
            article = g.extract(url=url)
            title = article.title
            content = article.cleaned_text

            if content != None and len(content) > 0:
                self.wp.write( url )
                #self.wp.write( ' ' )
                #self.wp.write( title.replace(' ', '').replace('\n', '').encode('utf8') )
                self.wp.write( ' ' )
                self.wp.write( content.replace('\n', '').encode('utf8') )
                self.wp.write( '\n' )
            else:
                self.uf_url_p.write( url )
                self.uf_url_p.write( '\n' )
        except (TypeError, IncompleteRead, AttributeError), e:
            return
Beispiel #2
0
class ConditionPoller(Thread):
    """
    generic polling mechanism: every interval seconds, check if condition returns a true value. if so, pass the value to callback
    if condition or callback raise exception, stop polling.
    """
    def __init__(self, condition, condition_callback, exception_callback, interval):
        self.polling_interval = interval
        self._shutdown_now = Event()
        self._condition = condition
        self._callback = condition_callback
        self._on_exception = exception_callback
        super(ConditionPoller,self).__init__()
    def shutdown(self):
        self.is_shutting_down = True
        self._shutdown_now.set()
    def run(self):
        try:
            while not self._shutdown_now.is_set():
                self._check_condition()
                self._shutdown_now.wait(self.polling_interval)
        except:
            log.error('thread failed', exc_info=True)
    def _check_condition(self):
        try:
            value = self._condition()
            if value:
                self._callback(value)
        except Exception as e:
            log.debug('stopping poller after exception', exc_info=True)
            self.shutdown()
            if self._on_exception:
                self._on_exception(e)
    def start(self):
        super(ConditionPoller,self).start()
Beispiel #3
0
 def waitForPublishResult(self, eventName):
   try:
     event = self.eventPublishResult[eventName]
   except KeyError:
     event = Event()
     self.eventPublishResult[eventName] = event      
   event.wait()
Beispiel #4
0
def StartGRpcServer(sJsonRpcHostPort, sModule, cnMaster, cnWorker):
    from gevent.event import Event
    from grpc import RpcServer

    lsHP = sJsonRpcHostPort.split(':')
    if len(lsHP)<2:
        PrintTimeMsg("sJsonRpcHostPort=(%s) error!" % sJsonRpcHostPort)
        return

    endpoint = (lsHP[0], int(lsHP[1]))
    try:#WeiYF.20150414 经测试,采用importModuleClass才行
        #classMaster = eval("CMaster") #eval(cnMaster)
        #classWorker = eval(cnWorker)
        # classMaster = importClassString("grcs.grsVtrbtctx.CMaster")
        # classWorker = importClassString("grcs.grsVtrbtctx.CWorker")
        classMaster = importModuleClass(sModule,cnMaster)
        classWorker = importModuleClass(sModule,cnWorker)
        master = classMaster(endpoint)
        svr = RpcServer()
        svr.bind(endpoint)
        t1 = classWorker()
        svr.register(t1)
        svr.register(master)
        t1.set_master(svr.get_export_proxy(master))
        svr.start()
        PrintTimeMsg('Starting GRPC server(%s,%s)...' % (sModule,sJsonRpcHostPort))
        wait = Event()
        wait.wait()
    #except KeyboardInterrupt:
    except Exception, e:
        import traceback
        traceback.print_exc() #WeiYF.20151022 打印异常整个堆栈 这个对于动态加载非常有用
        PrintTimeMsg('StartGRpcServer.Exception.e=(%s)' % (str(e)))
Beispiel #5
0
class TestEvent(Actor):

    '''**Generates a test event at the chosen interval.**

    This module is only available for testing purposes and has further hardly any use.

    Events have following format:

        { "header":{}, "data":"test" }

    Parameters:

        - name (str):               The instance name when initiated.

        - interval (float):         The interval in seconds between each generated event.
                                    Should have a value > 0.
                                    default: 1

    Queues:

        - outbox:    Contains the generated events.
    '''

    def __init__(self, name, interval=1):
        Actor.__init__(self, name, setupbasic=False)
        self.createQueue("outbox")
        self.name = name
        self.interval=interval
        if interval == 0:
            self.sleep = self.doNoSleep
        else:
            self.sleep = self.doSleep

        self.throttle=Event()
        self.throttle.set()

    def preHook(self):
        spawn(self.go)

    def go(self):
        switcher = self.getContextSwitcher(100)
        while switcher():
            self.throttle.wait()
            try:
                self.queuepool.outbox.put({"header":{},"data":"test"})
            except (QueueFull, QueueLocked):
                self.queuepool.outbox.waitUntilPutAllowed()
            self.sleep(self.interval)

    def doSleep(self, interval):
        sleep(interval)

    def doNoSleep(self, interval):
        pass

    def enableThrottling(self):
        self.throttle.clear()

    def disableThrottling(self):
        self.throttle.set()
Beispiel #6
0
 def test_apply_async(self):
     done = Event()
     def some_work(x):
         done.set()
     pool = self.klass(2)
     pool.apply_async(some_work, ('x', ))
     done.wait()
Beispiel #7
0
def run(ctx, dev):
    """Start the client"""
    # create app
    app = EthApp(ctx.obj['config'])

    if dev:
        gevent.get_hub().SYSTEM_ERROR = BaseException
        try:
            ctx.obj['config']['client_version'] += '/' + os.getlogin()
        except:
            log.warn("can't get and add login name to client_version")
            pass

    # register services
    for service in services:
        assert issubclass(service, BaseService)
        if service.name not in app.config['deactivated_services']:
            assert service.name not in app.services
            service.register_with_app(app)
            assert hasattr(app.services, service.name)

    # start app
    app.start()

    # wait for interupt
    evt = Event()
    gevent.signal(signal.SIGQUIT, evt.set)
    gevent.signal(signal.SIGTERM, evt.set)
    gevent.signal(signal.SIGINT, evt.set)
    evt.wait()

    # finally stop
    app.stop()
Beispiel #8
0
class ClusterCoordinator(Service):
    port = Setting('cluster_port', default=4440)

    def __init__(self, identity, leader=None, cluster=None):
        leader = leader or identity
        self.server = PeerServer(self, identity)
        self.client = PeerClient(self, leader, identity)
        self.set = cluster or ObservableSet()
        self.promoted = Event()

        self.add_service(self.server)
        if leader != identity:
            self.add_service(self.client)
            self.is_leader = False
        else:
            self.is_leader = True

    def wait_for_promotion(self):
        self.promoted.wait()

    @property
    def leader(self):
        return self.client.leader

    @property
    def identity(self):
        return self.client.identity
class Worker(object):
    '''
    子进程运行的代码,通过起一个协程来和主进程通信
    包括接受任务分配请求,退出信号(零字节包),及反馈任务执行进度
    然后主协程等待停止信号并中止进程(stop_event用于协程间同步)。
    '''
    def __init__(self, url):
        self.url = url
        self.stop_event = Event()
        gevent.spawn(self.communicate)
        self.stop_event.wait()
        print 'worker(%s):will stop' % os.getpid()
    def exec_task(self, task):
        print 'worker(%s):execute task:%s' % (os.getpid(), task.rstrip('\n'))
    def communicate(self):
        print 'worker(%s):started' % os.getpid()
        client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        client.connect(self.url)
        fp = client.makefile()
        while True:
            line = fp.readline()
            if not line:
                self.stop_event.set()
                break
            '单独起一个协程去执行任务,防止通信协程阻塞'
            gevent.spawn(self.exec_task, line)
Beispiel #10
0
def test_client_shutdown(copper_client):
    have_stream = Event()
    may_respond = Event()
    def handler(stream):
        have_stream.set()
        may_respond.wait()
        stream.write('Hello, world!')
    with copper_client.publish('test:helloworld', handler):
        with copper_client.subscribe('test:helloworld') as sub:
            with sub.open() as stream:
                have_stream.wait()
                with pytest.raises(Timeout):
                    with Timeout(0.005):
                        # This initiates the shutdown, but it should not
                        # complete on its own, because handler is still
                        # running. Being stopped with a timeout does not
                        # stop the shutdown procedure.
                        copper_client.shutdown(unpublish=False)
                # Verify our handler can still reply successfully
                may_respond.set()
                assert stream.read() == 'Hello, world!'
            with sub.open() as stream:
                # Verify any new streams fail with ECONNSHUTDOWN (since our
                # code didn't unpublish the service), and don't reach our
                # handler.
                with pytest.raises(ConnectionShutdownError):
                    stream.read()
            # Verify shutdown now finishes successfully.
            copper_client.shutdown()
Beispiel #11
0
 def test_reentrant(self):
     pool = self.klass(1)
     result = pool.apply(pool.apply, (lambda a: a + 1, (5, )))
     self.assertEqual(result, 6)
     evt = Event()
     pool.apply_async(evt.set)
     evt.wait()
Beispiel #12
0
def test_with_remoting(defer):
    random_data = str(random.randint(0, 10000000000))

    node1 = Node('localhost:20001', enable_remoting=True)
    defer(node1.stop)
    node2 = Node('localhost:20002', enable_remoting=True)
    defer(node2.stop)

    f_src = tempfile.NamedTemporaryFile()
    f_src.write(random_data)
    f_src.flush()

    class Sender(Actor):
        def run(self, receiver):
            ref = serve_file(f_src.name)
            receiver << ref

    class Receiver(Actor):
        def receive(self, fref):
            for _ in range(2):
                fetched_path = fref.fetch()
                with open(fetched_path) as f_dst:
                    eq_(random_data, f_dst.read())
            received.set()

    received = Event()
    node2.spawn(Receiver, name='receiver')
    receiver_ref = node1.lookup_str('localhost:20002/receiver')
    node1.spawn(Sender.using(receiver=receiver_ref))
    received.wait()
Beispiel #13
0
class RecurringTask(object):

    def __init__(self, interval, fn):
        self.interval = interval
        self.fn = fn
        self._wakeup = Event()
        self._stopped = Event()
        self._gthread = None

    def touch(self):
        """Make sure the task is executed now."""
        self._wakeup.set()
    
    def start(self):
        self._gthread = gevent.spawn(self._run)

    def stop(self):
        self._stopped.set()
        self._wakeup.set()

    def _run(self):
        while not self._stopped.is_set():
            self.fn()
            self._wakeup.wait(timeout=self.interval)
            self._wakeup.clear()
Beispiel #14
0
class Lock(object):

    def __init__(self, etcd, key, name, ttl=30):
        """."""
        self.etcd = etcd
        self.key = key
        self.name = name
        self._gthread = None
        self._ttl = ttl
        self._stopped = Event()

    def _heartbeat(self):
        while True:
            self._stopped.wait(self._ttl / 2)
            if self._stopped.is_set():
                break
            self.etcd.testandset(self.key, self.name, self.name,
                                 ttl=self._ttl)

    def lock(self):
        # This is to work around bugs in etcd.  Not very atomic
        # at all :(
        while True:
            try:
                e = self.etcd.get(self.key)
            except EtcdError, err:
                logging.error("lock: %s: error: %r" % (
                        self.key, err))
                self.etcd.set(self.key, self.name)
                self._gthread = gevent.spawn(self._heartbeat)
                break
            else:
                time.sleep(self._ttl / 2)
Beispiel #15
0
class _FormationCache(object):
    """A cache of instance data for a formation."""

    def __init__(self, client, form_name, factory, interval):
        self.client = client
        self.form_name = form_name
        self.factory = factory
        self.interval = interval
        self._gthread = None
        self._cache = {}
        self._stopped = Event()
        self._running = Event()

    def start(self):
        self._gthread = gevent.spawn(self._loop)
        self._running.wait(timeout=0.1)
        return self

    def stop(self, timeout=None):
        self._stopped.set()
        self._gthread.join(timeout)

    def _update(self):
        self._cache = self.client.query_formation(self.form_name,
                                                  self.factory)

    def _loop(self):
        while not self._stopped.isSet():
            self._update()
            self._running.set()
            self._stopped.wait(self.interval)

    def query(self):
        """Return all instances and their names."""
        return dict(self._cache)
Beispiel #16
0
class _Registration(object):
    """A service registration."""

    def __init__(self, client, form_name, instance_name, data,
                 interval=3):
        self.stopped = Event()
        self.client = client
        self.form_name = form_name
        self.instance_name = instance_name
        self.data = data
        self.interval = interval
        self.gthread = None

    def _loop(self):
        uri = '/%s/%s' % (self.form_name, self.instance_name)
        while not self.stopped.isSet():
            response = self.client._request('PUT', uri,
                                            data=json.dumps(self.data))
            self.stopped.wait(self.interval)

    def start(self):
        self.gthread = gevent.spawn(self._loop)
        return self

    def stop(self, timeout=None):
        self.stopped.set()
        self.gthread.join(timeout)
Beispiel #17
0
    def test_semaphore(self):
        edge = APIEdge(MockApp(), self.get_settings())
        api = edge.app.api
        edge.max_concurrent_calls = 1

        in_first_method = Event()
        finish_first_method = Event()
        def first_method():
            in_first_method.set()
            finish_first_method.wait()
        api.first_method = first_method

        in_second_method = Event()
        def second_method():
            in_second_method.set()
        api.second_method = second_method

        gevent.spawn(edge.execute, Call("first_method"))
        in_first_method.wait()

        gevent.spawn(edge.execute, Call("second_method"))
        gevent.sleep(0)

        assert_logged("too many concurrent callers")
        assert not in_second_method.is_set()

        finish_first_method.set()
        in_second_method.wait()
        self.assert_edge_clean(edge)
Beispiel #18
0
class Pinger(Greenlet):
    """ Very simple test 'app'
    """
    def __init__(self, id):
        super(Pinger,self).__init__()
        self.event = Event()
        self.conn = None
        self.id = id

    def _run(self):
        logger.debug("Pinger starting")
        self.conn = connection.AMQPConnection(self)
        self.conn.connect()
        #self.conn.connection.join()
        self.event.wait()
        logger.debug("Pinger exiting")
        self.amqp.close()
        self.conn.close()

    def on_connect(self, connection):
        self.amqp = PingerAMQPManager(connection, self, self.id)

    def handle_message(self, message):
        if message.routing_key.endswith('pinger.exit'):
            #self.conn.connection.close()
            self.event.set()
Beispiel #19
0
class Console(BaseService):

    """A service starting an interactive ipython session when receiving the
    SIGSTP signal (e.g. via keyboard shortcut CTRL-Z).
    """

    name = 'console'

    def __init__(self, app):
        super(Console, self).__init__(app)
        self.interrupt = Event()
        gevent.signal(signal.SIGTSTP, self.interrupt.set)
        self.console_locals = []

    def start(self):
        super(Console, self).start()
        self.console_locals = {}
        self.console_locals.update(self.app.services)
        self.console_locals['app'] = self.app

    def _run(self):
        while True:
            self.interrupt.wait()
            IPython.start_ipython(argv=['--gui', 'gevent'], user_ns=self.console_locals)
            self.interrupt.clear()
Beispiel #20
0
class Chat(object):
    def __init__(self):
        self.new_msg_event = Event()

    def write_message(self, request):
        if not request.user.is_authenticated() or request.method != 'POST':
            return HttpResponse(status=404)
        form = MessageForm(request.POST)
        output = dict(success=False)
        if form.is_valid():
            form.save(request.user)
            output['success'] = True
        else:
            output['errors'] = form.get_errors()
        self.new_msg_event.set()
        self.new_msg_event.clear()
        return HttpResponse(json.dumps(output))

    def get_messages(self, request):
        if not request.user.is_authenticated():
            return HttpResponse(status=404)
        pk = int(request.GET.get('pk', 1))
        messages = [{'created_at': DateFormat(el.created_at).format('H:i:s'),
                    'username': el.username, 'pk': el.pk,
                    'msg': el.msg} for el in Message.objects.filter(pk__gt=int(pk))
                                            .order_by('-created_at')[:100]]
        if not messages:
            self.new_msg_event.wait()
        return HttpResponse(json.dumps(messages[::-1]))
Beispiel #21
0
class Chat(object):
    def __init__(self):
        # at some point, may want to implement a buffer for messages
        # self.buffer = []
        self.msg_event = Event()

    def index(self, request):
        form = models.MsgForm()
        msg_list = models.Msg.objects.all()
        return render(request, 'index.html', {
            'form': form,
            'msg_list': msg_list,
        })

    def send(self, request):
        if request.method == 'POST':
            form = models.MsgForm(request.POST)
            if form.is_valid():
                form.save()
                # tell everyone who's waiting on msg_event that a msg was just
                # posted
                self.msg_event.set()
                self.msg_event.clear()
                return HttpResponse(json.dumps(True), mimetype='application/json')
        return HttpResponse(json.dumps(False), mimetype='application/json')

    def update(self, request):
        check_time = datetime.now()
        # wait for next msg post
        self.msg_event.wait()
        msg_list = models.Msg.objects.filter(time_stamp__gte=check_time)
        return HttpResponse(serializers.serialize('xml', msg_list), mimetype='text/xml')
Beispiel #22
0
class StdioPipedWebSocketClient(WebSocketClient):

    def __init__(self, scheme, host, port, path, opts):
        url = "{0}://{1}:{2}{3}".format(scheme, host, port, path)
        WebSocketClient.__init__(self, url)

        self.path = path
        self.shutdown_cond = Event()
        self.opts = opts
        self.iohelper = common.StdioPipedWebSocketHelper(self.shutdown_cond, opts)

    def received_message(self, m):
        #TODO here we can retrieve the msg
        self.iohelper.received_message(self, m)

    def opened(self):
        if self.opts.verbosity >= 1:
            peername, peerport = self.sock.getpeername()
            print >> sys.stderr, "[%s] %d open for path '%s'" % (peername, peerport, self.path)
        self.iohelper.opened(self)

    def closed(self, code, reason):
        self.shutdown_cond.set()

    def connect_and_wait(self):
        self.connect()
        self.shutdown_cond.wait()
Beispiel #23
0
class ServerRack(object):

    def __init__(self, servers):
        self.servers = servers
        self.ev = Event()

    def start(self):
        started = []
        try:
            for server in self.servers[:]:
                server.start()
                started.append(server)
                name = getattr(server, 'name', None) or server.__class__.__name__ or 'Server'
        except:
            self.stop(started)
            raise
        
    def serve_forever(self):
        self.start()
        self.ev.wait() 

    def stop(self, servers=None):
        if servers is None:
            servers = self.servers[:]
        for server in servers:
            try:
                server.stop()
            except:
                if hasattr(server, 'loop'): #gevent >= 1.0
                    server.loop.handle_error(server.stop, *sys.exc_info())
                else: # gevent <= 0.13
                    import traceback
                    traceback.print_exc()
        self.ev.set()
Beispiel #24
0
class GeventCursor(net.Cursor):
    def __init__(self, *args, **kwargs):
        super(GeventCursor, self).__init__(*args, **kwargs)
        self.new_response = Event()

    def __iter__(self):
        return self

    def __next__(self):
        return self._get_next(None)

    def _empty_error(self):
        return GeventCursorEmpty()

    def _extend(self, res):
        super(GeventCursor, self)._extend(res)
        self.new_response.set()
        self.new_response.clear()

    def _get_next(self, timeout):
        with gevent.Timeout(timeout, RqlTimeoutError()) as timeout:
            self._maybe_fetch_batch()
            while len(self.items) == 0:
                if self.error is not None:
                    raise self.error
                self.new_response.wait()
            return self.items.popleft()
Beispiel #25
0
 def evt_user_input(cls, arg):
     trans, ilet = arg
     evt = Event()
     ilet.event = evt
     process_msg(('evt_user_input', arg))
     evt.wait()
     return ilet
Beispiel #26
0
def run(ctx, dev, nodial, fake):
    """Start the client ( --dev to stop on error)"""
    config = ctx.obj['config']
    if nodial:
        # config['deactivated_services'].append(PeerManager.name)
        # config['deactivated_services'].append(NodeDiscovery.name)
        config['discovery']['bootstrap_nodes'] = []
        config['discovery']['listen_port'] = 29873
        config['p2p']['listen_port'] = 29873
        config['p2p']['min_peers'] = 0

    if fake:
        from ethereum import blocks
        blocks.GENESIS_DIFFICULTY = 1024
        blocks.BLOCK_DIFF_FACTOR = 16
        blocks.MIN_GAS_LIMIT = blocks.GENESIS_GAS_LIMIT / 2
        # workaround for genesis.json hack
        blocks.GENESIS_JSON["difficulty"] = blocks.int_to_hex(blocks.GENESIS_DIFFICULTY)

    # create app
    app = EthApp(config)

    # development mode
    if dev:
        gevent.get_hub().SYSTEM_ERROR = BaseException
        try:
            config['client_version'] += '/' + os.getlogin()
        except:
            log.warn("can't get and add login name to client_version")
            pass

    # dump config
    konfig.dump_config(config)

    # register services
    for service in services:
        assert issubclass(service, BaseService)
        if service.name not in app.config['deactivated_services']:
            assert service.name not in app.services
            service.register_with_app(app)
            assert hasattr(app.services, service.name)

    unlock_accounts(ctx.obj['unlock'], app.services.accounts, password=ctx.obj['password'])

    # start app
    log.info('starting')
    app.start()

    if config['post_app_start_callback'] is not None:
        config['post_app_start_callback'](app)

    # wait for interrupt
    evt = Event()
    gevent.signal(signal.SIGQUIT, evt.set)
    gevent.signal(signal.SIGTERM, evt.set)
    gevent.signal(signal.SIGINT, evt.set)
    evt.wait()

    # finally stop
    app.stop()
Beispiel #27
0
class WSClientTransport(WebSocketClient):
    APP_FACTORY = None

    def __init__(self, url):
        self._close_event = Event()
        # patch socket.sendall to protect it with lock,
        # in order to prevent sending data from multiple greenlets concurrently
        WebSocketClient.__init__(self, url)
        self._app = None
        self._lock = RLock()
        _sendall = self.sock.sendall

        def sendall(data):
            self._lock.acquire()
            try:
                _sendall(data)
            except:
                raise
            finally:
                self._lock.release()
        self.sock.sendall = sendall

    def connect(self):
        super(WSClientTransport, self).connect()
        self._app = self.APP_FACTORY(self)
        log.info("Connected to websocket server {0}".format(self.url))

    def closed(self, code, reason=None):
        app, self._app = self._app, None
        if app:
            app.on_close()
        self._close_event.set()

    def ponged(self, pong):
        pass

    def received_message(self, message):
        log.debug("Received message {0}".format(message))
        if self._app:
            self._app.on_received_packet(STRING(message))
        else:
            log.warning('Websocket client app already closed')

    def send_packet(self, data):
        log.debug("Sending message {0}".format(data))
        self.send(data)

    def force_shutdown(self):
        # called by the upper layer, and no callback will be possible when closed
        self._app = None
        self.close()
        self._close_event.set()
        log.info('Websocket client closed')

    def wait_close(self):
        self._close_event.wait()

    def app(self):
        return self._app
class Master():
    '''
    主进程运行代码,启动单独协程监听一个端口以供子进程连接和通信用,
    通过subprocess.Popen启动CPU个数个子进程,注册SIGTERM信号以便在
    KILL自己时通知子进程退出,主协程等待停止事件并退出主
    '''
    def __init__(self, address):
        self.address = address
        self.workers = []
        self.stop_event = Event()

        gevent.spawn(self.communicate)
        gevent.sleep(0) #让communicate协程有机会执行,否则子进程会先启动

        self.process = [subprocess.Popen(('python',sys.argv[0],'worker')) for i in xrange(12)] #启动multiprocessing.cpucount-1个子进程

        gevent.signal(signal.SIGTERM, self.stop) #拦截kill信号

        gevent.spawn(self.start) #分发任务

        self.stop_event.wait() 

    def communicate(self):
        print 'master(%s):started' % os.getpid()
        server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        server.bind(address)
        server.listen(1024)
        while True:
            worker, addr = server.accept()
            print 'master(%s):new worker' % os.getpid()
            self.workers.append(worker)

    def stop(self):
        print 'master stop'
        for worker in self.workers:
            worker.close()
        for p in self.process:
            p.wait()
        self.stop_event.set()

    def start(self):
        while not self.workers:
            gevent.sleep(1)
            continue

        with open(url_file, 'rb') as fp:
            line_count = 0
            t = time.time()
            for line in fp:
                url = line.split()[0]
                worker = random.choice(self.workers)
                worker.send(url + '\n')

                line_count += 1
                if line_count % 1000 == 0:
                    print 'process %s line used %s seconds' % (line_count, time.time() - t)
                    gevent.sleep(10)
class Session:
    def __init__(self,handler,tg,chatid,user,name):
        self.handler=handler
        self.tg = tg
        self.name = name
        self.user=user
        self.chatid=chatid
        self.event=None
        self.returnmsg=None

    def _activate(self):
        self.handler.activeSessions[self.user]=self

    def _processmarkup(self,markup):
        if markup!=None:
            markup2={}
            markup2["resize_keyboard"]=True
            markup2["one_time_keyboard"]=True 
            markup2["keyboard"]=markup
            return j.data.serializer.json.dumps(markup2)
        return markup

    def getArgument(self,key,descr="",global_=False,markup=None,presetvalue=None):
        if global_:
            hkey="config"
        else:
            hkey=self.name
        res=self.handler.redisconfig.hget(hkey,key)
        if res==None:
            # if descr=="":
            #     session.send_message("Cannot find global variable: '%s' please define by using '%s=yourinput'."%(key,key))
            # else:
            if presetvalue!=None:
                res=presetvalue
            elif descr=="":
                res=self.send_message("Cannot find variable: '%s', please specify"%key,True,markup=markup)
            else:
                # self.send_message("Cannot find global variable: '%s', please specify"%key)
                res=self.send_message(descr,True,markup=markup)
            self.handler.redisconfig.hset(hkey,key,res)
        return res

    def send_message(self,msg,feedback=False,markup=None):
        # print "spawn:%s"%msg
        self.tg.send_message(self.chatid,msg, reply_to_message_id="",reply_markup=self._processmarkup(markup))
        if feedback:
            self.start_communication()
            self.event=Event()   
            self.event.wait()     
            return self.returnmsg.text     

    def start_communication(self):
        self._activate()
        self.handler.activeCommunications[self.user]=self

    def stop_communication(self):
        if self.user in self.handler.activeCommunications:
            self.handler.activeCommunications.pop(self.user)
Beispiel #30
0
class AvailManager(object):
    SIM_RUNNING = False
    sim_thread = None

    def __init__(self):
        self.updates = []
        self.event = Event()


    def run_sim(self, delay, size):
        roomids = Room.objects.filter(avail=True).values_list('id',flat=True)
        #print 'Room ids: %s ' % roomids
        ids = []
        for i in range(0, len(roomids)):
            ids.append(roomids[i])
            random.shuffle(ids)
        for i in range(0, len(ids)):
            roomg = Room.objects.filter(id=ids[i])
            updateavail(Room.objects.filter(id=ids[i]))
            room = roomg[0]
            self.updates.append(RoomUpdate(room))
            self.event.set()
            self.event.clear()
            if i % size == 0:
                sleep(delay)
        self.SIM_RUNNING = False

    def start_sim(self, delay, size=1):
        if self.SIM_RUNNING:
            kill(self.sim_thread)
        self.updates = []
        Room.objects.all().update(avail=True)
        self.sim_thread = spawn(self.run_sim, delay=delay, size=size)
        self.SIM_RUNNING = True
        return HttpResponse('Started Simulation with delay %d' % delay)

    def stop_sim(self):
        if not self.SIM_RUNNING:
            return HttpResponse('No current simulation')
        kill(self.sim_thread)
        self.SIM_RUNNING = False
        return HttpResponse('Stopped simulation')
    
    def check_avail(self, timestamp):
        if len(self.updates) == 0 or timestamp > self.updates[0].timestamp:
            self.event.wait()
        room_ids = []
        i = len(self.updates) - 1
        while i >= 0:
            i = i - 1
            update = self.updates[i]
            if timestamp <= update.timestamp:
                room_ids.append(update.room_id)
            else:
                break

        return {'timestamp':int(time.time()),
                'rooms':room_ids}
Beispiel #31
0
class Console(BaseService):
    """ A service starting an interactive ipython session when receiving the
    SIGSTP signal (e.g. via keyboard shortcut CTRL-Z).
    """
    def __init__(self, app):
        super().__init__(app)
        self.interrupt = Event()
        self.console_locals = {}
        if app.start_console:
            self.start()
            self.interrupt.set()
        else:
            SigINTHandler(self.interrupt)

    def start(self):
        # start console service
        super().start()

        class Raiden:
            def __init__(self, app):
                self.app = app

        self.console_locals = dict(
            _raiden=Raiden(self.app),
            raiden=self.app.raiden,
            chain=self.app.raiden.chain,
            discovery=self.app.discovery,
            tools=ConsoleTools(
                self.app.raiden,
                self.app.discovery,
                self.app.config['settle_timeout'],
                self.app.config['reveal_timeout'],
            ),
            denoms=denoms,
            true=True,
            false=False,
            usage=print_usage,
        )

    def _run(self):  # pylint: disable=method-hidden
        self.interrupt.wait()
        print('\n' * 2)
        print('Entering Console' + OKGREEN)
        print('Tip:' + OKBLUE)
        print_usage()

        # Remove handlers that log to stderr
        root = getLogger()
        for handler in root.handlers[:]:
            if isinstance(handler,
                          StreamHandler) and handler.stream == sys.stderr:
                root.removeHandler(handler)

        stream = io.StringIO()
        handler = StreamHandler(stream=stream)
        handler.formatter = Formatter(u'%(levelname)s:%(name)s %(message)s')
        root.addHandler(handler)

        def lastlog(n=10, prefix=None, level=None):
            """ Print the last `n` log lines to stdout.
            Use `prefix='p2p'` to filter for a specific logger.
            Use `level=INFO` to filter for a specific level.
            Level- and prefix-filtering are applied before tailing the log.
            """
            lines = (stream.getvalue().strip().split('\n') or [])
            if prefix:
                lines = [
                    line for line in lines
                    if line.split(':')[1].startswith(prefix)
                ]
            if level:
                lines = [line for line in lines if line.split(':')[0] == level]
            for line in lines[-n:]:
                print(line)

        self.console_locals['lastlog'] = lastlog

        err = io.StringIO()
        sys.stderr = err

        def lasterr(n=1):
            """ Print the last `n` entries of stderr to stdout. """
            for line in (err.getvalue().strip().split('\n') or [])[-n:]:
                print(line)

        self.console_locals['lasterr'] = lasterr

        IPython.start_ipython(argv=['--gui', 'gevent'],
                              user_ns=self.console_locals)
        self.interrupt.clear()

        sys.exit(0)
Beispiel #32
0
class MDSThrasher(Thrasher, Greenlet):
    """
    MDSThrasher::

    The MDSThrasher thrashes MDSs during execution of other tasks (workunits, etc).

    The config is optional.  Many of the config parameters are a a maximum value
    to use when selecting a random value from a range.  To always use the maximum
    value, set no_random to true.  The config is a dict containing some or all of:

    max_thrash: [default: 1] the maximum number of active MDSs per FS that will be thrashed at
      any given time.

    max_thrash_delay: [default: 30] maximum number of seconds to delay before
      thrashing again.

    max_replay_thrash_delay: [default: 4] maximum number of seconds to delay while in
      the replay state before thrashing.

    max_revive_delay: [default: 10] maximum number of seconds to delay before
      bringing back a thrashed MDS.

    randomize: [default: true] enables randomization and use the max/min values

    seed: [no default] seed the random number generator

    thrash_in_replay: [default: 0.0] likelihood that the MDS will be thrashed
      during replay.  Value should be between 0.0 and 1.0.

    thrash_max_mds: [default: 0.05] likelihood that the max_mds of the mds
      cluster will be modified to a value [1, current) or (current, starting
      max_mds]. Value should be between 0.0 and 1.0.

    thrash_while_stopping: [default: false] thrash an MDS while there
      are MDS in up:stopping (because max_mds was changed and some
      MDS were deactivated).

    thrash_weights: allows specific MDSs to be thrashed more/less frequently.
      This option overrides anything specified by max_thrash.  This option is a
      dict containing mds.x: weight pairs.  For example, [mds.a: 0.7, mds.b:
      0.3, mds.c: 0.0].  Each weight is a value from 0.0 to 1.0.  Any MDSs not
      specified will be automatically given a weight of 0.0 (not thrashed).
      For a given MDS, by default the trasher delays for up to
      max_thrash_delay, trashes, waits for the MDS to recover, and iterates.
      If a non-zero weight is specified for an MDS, for each iteration the
      thrasher chooses whether to thrash during that iteration based on a
      random value [0-1] not exceeding the weight of that MDS.

    Examples::


      The following example sets the likelihood that mds.a will be thrashed
      to 80%, mds.b to 20%, and other MDSs will not be thrashed.  It also sets the
      likelihood that an MDS will be thrashed in replay to 40%.
      Thrash weights do not have to sum to 1.

      tasks:
      - ceph:
      - mds_thrash:
          thrash_weights:
            - mds.a: 0.8
            - mds.b: 0.2
          thrash_in_replay: 0.4
      - ceph-fuse:
      - workunit:
          clients:
            all: [suites/fsx.sh]

      The following example disables randomization, and uses the max delay values:

      tasks:
      - ceph:
      - mds_thrash:
          max_thrash_delay: 10
          max_revive_delay: 1
          max_replay_thrash_delay: 4

    """

    def __init__(self, ctx, manager, config, fs, max_mds):
        super(MDSThrasher, self).__init__()

        self.config = config
        self.ctx = ctx
        self.logger = log.getChild('fs.[{f}]'.format(f = fs.name))
        self.fs = fs
        self.manager = manager
        self.max_mds = max_mds
        self.name = 'thrasher.fs.[{f}]'.format(f = fs.name)
        self.stopping = Event()

        self.randomize = bool(self.config.get('randomize', True))
        self.thrash_max_mds = float(self.config.get('thrash_max_mds', 0.05))
        self.max_thrash = int(self.config.get('max_thrash', 1))
        self.max_thrash_delay = float(self.config.get('thrash_delay', 120.0))
        self.thrash_in_replay = float(self.config.get('thrash_in_replay', False))
        assert self.thrash_in_replay >= 0.0 and self.thrash_in_replay <= 1.0, 'thrash_in_replay ({v}) must be between [0.0, 1.0]'.format(
            v=self.thrash_in_replay)
        self.max_replay_thrash_delay = float(self.config.get('max_replay_thrash_delay', 4.0))
        self.max_revive_delay = float(self.config.get('max_revive_delay', 10.0))

    def _run(self):
        try:
            self.do_thrash()
        except Exception as e:
            # Log exceptions here so we get the full backtrace (gevent loses them).
            # Also allow successful completion as gevent exception handling is a broken mess:
            #
            # 2017-02-03T14:34:01.259 CRITICAL:root:  File "gevent.libev.corecext.pyx", line 367, in gevent.libev.corecext.loop.handle_error (src/gevent/libev/gevent.corecext.c:5051)
            #   File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/gevent/hub.py", line 558, in handle_error
            #     self.print_exception(context, type, value, tb)
            #   File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/gevent/hub.py", line 605, in print_exception
            #     traceback.print_exception(type, value, tb, file=errstream)
            #   File "/usr/lib/python2.7/traceback.py", line 124, in print_exception
            #     _print(file, 'Traceback (most recent call last):')
            #   File "/usr/lib/python2.7/traceback.py", line 13, in _print
            #     file.write(str+terminator)
            # 2017-02-03T14:34:01.261 CRITICAL:root:IOError
            self.set_thrasher_exception(e)
            self.logger.exception("exception:")
            # allow successful completion so gevent doesn't see an exception...

    def log(self, x):
        """Write data to the logger assigned to MDSThrasher"""
        self.logger.info(x)

    def stop(self):
        self.stopping.set()

    def kill_mds(self, mds):
        if self.config.get('powercycle'):
            (remote,) = (self.ctx.cluster.only('mds.{m}'.format(m=mds)).
                         remotes.keys())
            self.log('kill_mds on mds.{m} doing powercycle of {s}'.
                     format(m=mds, s=remote.name))
            self._assert_ipmi(remote)
            remote.console.power_off()
        else:
            self.ctx.daemons.get_daemon('mds', mds).stop()

    @staticmethod
    def _assert_ipmi(remote):
        assert remote.console.has_ipmi_credentials, (
            "powercycling requested but RemoteConsole is not "
            "initialized.  Check ipmi config.")

    def revive_mds(self, mds):
        """
        Revive mds -- do an ipmpi powercycle (if indicated by the config)
        and then restart.
        """
        if self.config.get('powercycle'):
            (remote,) = (self.ctx.cluster.only('mds.{m}'.format(m=mds)).
                         remotes.keys())
            self.log('revive_mds on mds.{m} doing powercycle of {s}'.
                     format(m=mds, s=remote.name))
            self._assert_ipmi(remote)
            remote.console.power_on()
            self.manager.make_admin_daemon_dir(self.ctx, remote)
        args = []
        self.ctx.daemons.get_daemon('mds', mds).restart(*args)

    def wait_for_stable(self, rank = None, gid = None):
        self.log('waiting for mds cluster to stabilize...')
        for itercount in itertools.count():
            status = self.fs.status()
            max_mds = status.get_fsmap(self.fs.id)['mdsmap']['max_mds']
            ranks = list(status.get_ranks(self.fs.id))
            stopping = sum(1 for _ in ranks if "up:stopping" == _['state'])
            actives = sum(1 for _ in ranks
                          if "up:active" == _['state'] and "laggy_since" not in _)

            if not bool(self.config.get('thrash_while_stopping', False)) and stopping > 0:
                if itercount % 5 == 0:
                    self.log('cluster is considered unstable while MDS are in up:stopping (!thrash_while_stopping)')
            else:
                if rank is not None:
                    try:
                        info = status.get_rank(self.fs.id, rank)
                        if info['gid'] != gid and "up:active" == info['state']:
                            self.log('mds.{name} has gained rank={rank}, replacing gid={gid}'.format(name = info['name'], rank = rank, gid = gid))
                            return status
                    except:
                        pass # no rank present
                    if actives >= max_mds:
                        # no replacement can occur!
                        self.log("cluster has {actives} actives (max_mds is {max_mds}), no MDS can replace rank {rank}".format(
                            actives=actives, max_mds=max_mds, rank=rank))
                        return status
                else:
                    if actives == max_mds:
                        self.log('mds cluster has {count} alive and active, now stable!'.format(count = actives))
                        return status, None
            if itercount > 300/2: # 5 minutes
                 raise RuntimeError('timeout waiting for cluster to stabilize')
            elif itercount % 5 == 0:
                self.log('mds map: {status}'.format(status=status))
            else:
                self.log('no change')
            sleep(2)

    def do_thrash(self):
        """
        Perform the random thrashing action
        """

        self.log('starting mds_do_thrash for fs {fs}'.format(fs = self.fs.name))
        stats = {
            "max_mds": 0,
            "deactivate": 0,
            "kill": 0,
        }

        while not self.stopping.is_set():
            delay = self.max_thrash_delay
            if self.randomize:
                delay = random.randrange(0.0, self.max_thrash_delay)

            if delay > 0.0:
                self.log('waiting for {delay} secs before thrashing'.format(delay=delay))
                self.stopping.wait(delay)
                if self.stopping.is_set():
                    continue

            status = self.fs.status()

            if random.random() <= self.thrash_max_mds:
                max_mds = status.get_fsmap(self.fs.id)['mdsmap']['max_mds']
                options = [i for i in range(1, self.max_mds + 1) if i != max_mds]
                if len(options) > 0:
                    new_max_mds = random.choice(options)
                    self.log('thrashing max_mds: %d -> %d' % (max_mds, new_max_mds))
                    self.fs.set_max_mds(new_max_mds)
                    stats['max_mds'] += 1
                    self.wait_for_stable()

            count = 0
            for info in status.get_ranks(self.fs.id):
                name = info['name']
                label = 'mds.' + name
                rank = info['rank']
                gid = info['gid']

                # if thrash_weights isn't specified and we've reached max_thrash,
                # we're done
                count = count + 1
                if 'thrash_weights' not in self.config and count > self.max_thrash:
                    break

                weight = 1.0
                if 'thrash_weights' in self.config:
                    weight = self.config['thrash_weights'].get(label, '0.0')
                skip = random.randrange(0.0, 1.0)
                if weight <= skip:
                    self.log('skipping thrash iteration with skip ({skip}) > weight ({weight})'.format(skip=skip, weight=weight))
                    continue

                self.log('kill {label} (rank={rank})'.format(label=label, rank=rank))
                self.kill_mds(name)
                stats['kill'] += 1

                # wait for mon to report killed mds as crashed
                last_laggy_since = None
                itercount = 0
                while True:
                    status = self.fs.status()
                    info = status.get_mds(name)
                    if not info:
                        break
                    if 'laggy_since' in info:
                        last_laggy_since = info['laggy_since']
                        break
                    if any([(f == name) for f in status.get_fsmap(self.fs.id)['mdsmap']['failed']]):
                        break
                    self.log(
                        'waiting till mds map indicates {label} is laggy/crashed, in failed state, or {label} is removed from mdsmap'.format(
                            label=label))
                    itercount = itercount + 1
                    if itercount > 10:
                        self.log('mds map: {status}'.format(status=status))
                    sleep(2)

                if last_laggy_since:
                    self.log(
                        '{label} reported laggy/crashed since: {since}'.format(label=label, since=last_laggy_since))
                else:
                    self.log('{label} down, removed from mdsmap'.format(label=label))

                # wait for a standby mds to takeover and become active
                status = self.wait_for_stable(rank, gid)

                # wait for a while before restarting old active to become new
                # standby
                delay = self.max_revive_delay
                if self.randomize:
                    delay = random.randrange(0.0, self.max_revive_delay)

                self.log('waiting for {delay} secs before reviving {label}'.format(
                    delay=delay, label=label))
                sleep(delay)

                self.log('reviving {label}'.format(label=label))
                self.revive_mds(name)

                for itercount in itertools.count():
                    if itercount > 300/2: # 5 minutes
                        raise RuntimeError('timeout waiting for MDS to revive')
                    status = self.fs.status()
                    info = status.get_mds(name)
                    if info and info['state'] in ('up:standby', 'up:standby-replay', 'up:active'):
                        self.log('{label} reported in {state} state'.format(label=label, state=info['state']))
                        break
                    self.log(
                        'waiting till mds map indicates {label} is in active, standby or standby-replay'.format(label=label))
                    sleep(2)

        for stat in stats:
            self.log("stat['{key}'] = {value}".format(key = stat, value = stats[stat]))
Beispiel #33
0
class APNSService(BaseService):

    service_type = 'apns'

    def __init__(self, sandbox=True, **kwargs):
        self._send_queue = Queue()
        self._send_queue_cleared = Event()
        self._send_greenlet = None
        self.timeout = INITIAL_TIMEOUT
        self._feedback_queue = Queue()
        if "certfile" not in kwargs:
            raise ValueError(u"Must specify a PEM bundle.")
        if not os.path.exists(kwargs['certfile']):
            raise ValueError('PEM bundle file not found')
        self._sslargs = kwargs
        self._push_connection = None
        self._sandbox = sandbox
        self._error_queue = Queue()
        self._send_greenlet = None
        self._error_greenlet = None
        self._feedback_connection = None
        self._feedback_greenlet = None
        self.last_err = None

    def start(self):
        """Start the message sending loop."""
        if self._send_greenlet is None:
            self._send_greenlet = gevent.spawn(self.save_err, self._send_loop)

    def _send_loop(self):
        self._send_greenlet = gevent.getcurrent()
        try:
            logger.info("%s service started" % self.service_type)
            while True:
                message = self._send_queue.get()
                try:
                    self.send_notification(message)
                except Exception:
                    self.error_sending_notification(message)
                else:
                    self.timeout = INITIAL_TIMEOUT
                finally:
                    if self._send_queue.qsize() < 1 and \
                            not self._send_queue_cleared.is_set():
                        self._send_queue_cleared.set()
        except gevent.GreenletExit:
            pass
        finally:
            self._send_greenlet = None
        logger.info("%s service stopped" % self.service_type)

    def _check_send_connection(self):
        if self._push_connection is None:
            tcp_socket = socket.socket(
                socket.AF_INET, socket.SOCK_STREAM, 0)
            s = ssl.wrap_socket(tcp_socket, ssl_version=ssl.PROTOCOL_TLSv1,
                                **self._sslargs)
            addr = ["gateway.push.apple.com", 2195]
            if self._sandbox:
                addr[0] = "gateway.sandbox.push.apple.com"
            logger.debug('Connecting to %s' % addr[0])
            s.connect_ex(tuple(addr))
            self._push_connection = s
            self._error_greenlet = gevent.spawn(self.save_err,
                                                self._error_loop)

    def _check_feedback_connection(self):
        if self._feedback_connection is None:
            tcp_socket = socket.socket(
                socket.AF_INET, socket.SOCK_STREAM, 0)
            s = ssl.wrap_socket(tcp_socket, ssl_version=ssl.PROTOCOL_TLSv1,
                                **self._sslargs)
            addr = ["feedback.push.apple.com", 2196]
            if self._sandbox:
                addr[0] = "feedback.sandbox.push.apple.com"
            logger.debug('Connecting to %s' % addr[0])
            s.connect_ex(tuple(addr))

            self._feedback_connection = s

    def _error_loop(self):
        self._error_greenlet = gevent.getcurrent()
        try:
            while True:
                if self._push_connection is None:
                    break
                msg = self._push_connection.recv(1 + 1 + 4)
                if len(msg) < 6:
                    return
                data = struct.unpack("!bbI", msg)
                self._error_queue.put((data[1], data[2]))
        except gevent.GreenletExit:
            logger.exception('Error')
        finally:
            if self._push_connection is not None:
                self._push_connection.close()
                self._push_connection = None
            self._error_greenlet = None

    def _feedback_loop(self):
        self._feedback_greenlet = gevent.getcurrent()
        try:
            self._check_feedback_connection()
            while True:
                msg = self._feedback_connection.recv(4 + 2 + 32)
                if len(msg) < 38:
                    return
                data = struct.unpack("!IH32s", msg)
                token = binascii.b2a_hex(data[2]).decode('ascii')
                self._feedback_queue.put((data[0], token))
        except gevent.GreenletExit:
            logger.exception('Error')
        finally:
            if self._feedback_connection:
                self._feedback_connection.close()
                self._feedback_connection = None
            self._feedback_greenlet = None

    def queue_notification(self, obj):
        """Send a push notification"""
        if not isinstance(obj, APNSNotification):
            raise ValueError(u"You can only send APNSNotification objects.")
        self._send_queue.put(obj)

    def get_error(self, block=True, timeout=None):
        """
        Get the next error message.

        Each error message is a 2-tuple of (status, identifier)."""
        return self._error_queue.get(block=block, timeout=timeout)

    def get_feedback(self, block=True, timeout=None):
        """
        Get the next feedback message.

        Each feedback message is a 2-tuple of (timestamp, device_token)."""
        if self._feedback_greenlet is None:
            self._feedback_greenlet = gevent.spawn(self.save_err,
                                                   self._feedback_loop)
        return self._feedback_queue.get(
            block=block,
            timeout=timeout)

    def stop(self, timeout=10.0):
        """
        Send all pending messages, close connection.
        Returns True if no message left to sent. False if dirty.

        - timeout: seconds to wait for sending remaining messages. disconnect
          immediately if None.
        """
        if (self._send_greenlet is not None) and \
                (self._send_queue.qsize() > 0):
            self.wait_send(timeout=timeout)

        if self._send_greenlet is not None:
            gevent.kill(self._send_greenlet)
            self._send_greenlet = None

        if self._error_greenlet is not None:
            gevent.kill(self._error_greenlet)
            self._error_greenlet = None
        if self._feedback_greenlet is not None:
            gevent.kill(self._feedback_greenlet)
            self._feedback_greenlet = None

        return self._send_queue.qsize() < 1

    def wait_send(self, timeout=None):
        self._send_queue_cleared.clear()
        return self._send_queue_cleared.wait(timeout=timeout)

    def error_sending_notification(self, notification):
        if self._push_connection is not None:
            self._push_connection.close()
            self._push_connection = None
        logger.exception("Error while pushing")
        self._send_queue.put(notification)
        gevent.sleep(self.timeout)
        # approaching Fibonacci series
        timeout = int(round(float(self.timeout) * 1.6))
        self.timeout = min(timeout, MAX_TIMEOUT)

    def send_notification(self, notification):
        self._check_send_connection()
        logger.debug('Sending APNS notification')
        self._push_connection.send(notification.pack())

    def save_err(self, func, *args, **kwargs):
        try:
            func(*args, **kwargs)
        except Exception as e:
            self.last_err = e
            raise

    def get_last_error(self):
        return self.last_err
Beispiel #34
0
class Group(object):
    """Maintain a group of greenlets that are still running.

    Links to each item and removes it upon notification.
    """
    greenlet_class = Greenlet

    def __init__(self, *args):
        assert len(args) <= 1, args
        self.greenlets = set(*args)
        if args:
            for greenlet in args[0]:
                greenlet.rawlink(self._discard)
        # each item we kill we place in dying, to avoid killing the same greenlet twice
        self.dying = set()
        self._empty_event = Event()
        self._empty_event.set()

    def __repr__(self):
        return '<%s at 0x%x %s>' % (self.__class__.__name__, id(self),
                                    self.greenlets)

    def __len__(self):
        return len(self.greenlets)

    def __contains__(self, item):
        return item in self.greenlets

    def __iter__(self):
        return iter(self.greenlets)

    def add(self, greenlet):
        try:
            rawlink = greenlet.rawlink
        except AttributeError:
            pass  # non-Greenlet greenlet, like MAIN
        else:
            rawlink(self._discard)
        self.greenlets.add(greenlet)
        self._empty_event.clear()

    def _discard(self, greenlet):
        self.greenlets.discard(greenlet)
        self.dying.discard(greenlet)
        if not self.greenlets:
            self._empty_event.set()

    def discard(self, greenlet):
        self._discard(greenlet)
        try:
            unlink = greenlet.unlink
        except AttributeError:
            pass  # non-Greenlet greenlet, like MAIN
        else:
            unlink(self._discard)

    def start(self, greenlet):
        self.add(greenlet)
        greenlet.start()

    def spawn(self, *args, **kwargs):
        greenlet = self.greenlet_class(*args, **kwargs)
        self.start(greenlet)
        return greenlet


#     def close(self):
#         """Prevents any more tasks from being submitted to the pool"""
#         self.add = RaiseException("This %s has been closed" % self.__class__.__name__)

    def join(self, timeout=None, raise_error=False):
        if raise_error:
            greenlets = self.greenlets.copy()
            self._empty_event.wait(timeout=timeout)
            for greenlet in greenlets:
                if greenlet.exception is not None:
                    raise greenlet.exception
        else:
            self._empty_event.wait(timeout=timeout)

    def kill(self, exception=GreenletExit, block=True, timeout=None):
        timer = Timeout.start_new(timeout)
        try:
            try:
                while self.greenlets:
                    for greenlet in list(self.greenlets):
                        if greenlet not in self.dying:
                            try:
                                kill = greenlet.kill
                            except AttributeError:
                                _kill(greenlet, exception)
                            else:
                                kill(exception, block=False)
                            self.dying.add(greenlet)
                    if not block:
                        break
                    joinall(self.greenlets)
            except Timeout:
                ex = sys.exc_info()[1]
                if ex is not timer:
                    raise
        finally:
            timer.cancel()

    def killone(self,
                greenlet,
                exception=GreenletExit,
                block=True,
                timeout=None):
        if greenlet not in self.dying and greenlet in self.greenlets:
            greenlet.kill(exception, block=False)
            self.dying.add(greenlet)
            if block:
                greenlet.join(timeout)

    def apply(self, func, args=None, kwds=None):
        """Equivalent of the apply() builtin function. It blocks till the result is ready."""
        if args is None:
            args = ()
        if kwds is None:
            kwds = {}
        if getcurrent() in self:
            return func(*args, **kwds)
        else:
            return self.spawn(func, *args, **kwds).get()

    def apply_cb(self, func, args=None, kwds=None, callback=None):
        result = self.apply(func, args, kwds)
        if callback is not None:
            Greenlet.spawn(callback, result)
        return result

    def apply_async(self, func, args=None, kwds=None, callback=None):
        """A variant of the apply() method which returns a Greenlet object.

        If callback is specified then it should be a callable which accepts a single argument. When the result becomes ready
        callback is applied to it (unless the call failed)."""
        if args is None:
            args = ()
        if kwds is None:
            kwds = {}
        if self.full():
            # cannot call spawn() directly because it will block
            return Greenlet.spawn(self.apply_cb, func, args, kwds, callback)
        else:
            greenlet = self.spawn(func, *args, **kwds)
            if callback is not None:
                greenlet.link(pass_value(callback))
            return greenlet

    def map(self, func, iterable):
        return list(self.imap(func, iterable))

    def map_cb(self, func, iterable, callback=None):
        result = self.map(func, iterable)
        if callback is not None:
            callback(result)
        return result

    def map_async(self, func, iterable, callback=None):
        """
        A variant of the map() method which returns a Greenlet object.

        If callback is specified then it should be a callable which accepts a
        single argument.
        """
        return Greenlet.spawn(self.map_cb, func, iterable, callback)

    def imap(self, func, iterable):
        """An equivalent of itertools.imap()"""
        return IMap.spawn(func, iterable, spawn=self.spawn)

    def imap_unordered(self, func, iterable):
        """The same as imap() except that the ordering of the results from the
        returned iterator should be considered in arbitrary order."""
        return IMapUnordered.spawn(func, iterable, spawn=self.spawn)

    def full(self):
        return False

    def wait_available(self):
        pass
    def test_replay_with_parameters(self):
        #--------------------------------------------------------------------------------
        # Create the configurations and the dataset
        #--------------------------------------------------------------------------------
        # Get a precompiled parameter dictionary with basic ctd fields
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)
        context_ids = self.dataset_management.read_parameter_contexts(
            pdict_id, id_only=True)

        # Add a field that supports binary data input.
        bin_context = ParameterContext('binary', param_type=ArrayType())
        context_ids.append(
            self.dataset_management.create_parameter_context(
                'binary', bin_context.dump()))
        # Add another field that supports dictionary elements.
        rec_context = ParameterContext('records', param_type=RecordType())
        context_ids.append(
            self.dataset_management.create_parameter_context(
                'records', rec_context.dump()))

        pdict_id = self.dataset_management.create_parameter_dictionary(
            'replay_pdict',
            parameter_context_ids=context_ids,
            temporal_context='time')

        stream_def_id = self.pubsub_management.create_stream_definition(
            'replay_stream', parameter_dictionary_id=pdict_id)

        stream_id, route = self.pubsub_management.create_stream(
            'replay_with_params',
            exchange_point=self.exchange_point_name,
            stream_definition_id=stream_def_id)
        config_id = self.get_ingestion_config()
        dataset_id = self.create_dataset(pdict_id)
        self.ingestion_management.persist_data_stream(
            stream_id=stream_id,
            ingestion_configuration_id=config_id,
            dataset_id=dataset_id)

        dataset_modified = Event()

        def cb(*args, **kwargs):
            dataset_modified.set()

        es = EventSubscriber(event_type=OT.DatasetModified,
                             callback=cb,
                             origin=dataset_id)
        es.start()

        self.addCleanup(es.stop)

        self.publish_fake_data(stream_id, route)

        self.assertTrue(dataset_modified.wait(30))

        query = {
            'start_time': 0 - 2208988800,
            'end_time': 20 - 2208988800,
            'stride_time': 2,
            'parameters': ['time', 'temp']
        }
        retrieved_data = self.data_retriever.retrieve(dataset_id=dataset_id,
                                                      query=query)

        rdt = RecordDictionaryTool.load_from_granule(retrieved_data)
        comp = np.arange(0, 20, 2) == rdt['time']
        self.assertTrue(comp.all(), '%s' % rdt.pretty_print())
        self.assertEquals(set(rdt.iterkeys()), set(['time', 'temp']))

        extents = self.dataset_management.dataset_extents(
            dataset_id=dataset_id, parameters=['time', 'temp'])
        self.assertTrue(extents['time'] >= 20)
        self.assertTrue(extents['temp'] >= 20)

        self.streams.append(stream_id)
        self.stop_ingestion(stream_id)
Beispiel #36
0
class Queue(Greenlet):
    """Manages the queue of |Envelope| objects waiting for delivery. This is
    not a standard FIFO queue, a message's place in the queue depends entirely
    on the timestamp of its next delivery attempt.

    :param store: Object implementing :class:`QueueStorage`.
    :param relay: |Relay| object used to attempt message deliveries. If this
                  is not given, no deliveries will be attempted on received
                  messages.
    :param backoff: Function that, given an |Envelope| and number of delivery
                    attempts, will return the number of seconds before the next
                    attempt. If it returns ``None``, the message will be
                    permanently failed. The default backoff function simply
                    returns ``None`` and messages are never retried.
    :param bounce_factory: Function that produces a |Bounce| object given the
                           same parameters as the |Bounce| constructor. If the
                           function returns ``None``, no bounce is delivered.
                           By default, a new |Bounce| is created in every case.
    :param bounce_queue: |Queue| object that will be used for delivering bounce
                         messages. The default is ``self``.
    :param store_pool: Number of simultaneous operations performable against
                       the ``store`` object. Default is unlimited.
    :param relay_pool: Number of simultaneous operations performable against
                       the ``relay`` object. Default is unlimited.

    """
    def __init__(self,
                 store,
                 relay=None,
                 backoff=None,
                 bounce_factory=None,
                 bounce_queue=None,
                 store_pool=None,
                 relay_pool=None):
        super(Queue, self).__init__()
        self.store = store
        self.relay = relay
        self.backoff = backoff or self._default_backoff
        self.bounce_factory = bounce_factory or Bounce
        self.bounce_queue = bounce_queue or self
        self.wake = Event()
        self.queued = []
        self.active_ids = set()
        self.queued_ids = set()
        self.queued_lock = Semaphore(1)
        self.queue_policies = []
        self._use_pool('store_pool', store_pool)
        self._use_pool('relay_pool', relay_pool)

    def add_policy(self, policy):
        """Adds a |QueuePolicy| to be executed before messages are persisted
        to storage.

        :param policy: |QueuePolicy| object to execute.

        """
        if isinstance(policy, QueuePolicy):
            self.queue_policies.append(policy)
        else:
            raise TypeError('Argument not a QueuePolicy.')

    @staticmethod
    def _default_backoff(envelope, attempts):
        pass

    def _run_policies(self, envelope):
        results = [envelope]

        def recurse(current, i):
            try:
                policy = self.queue_policies[i]
            except IndexError:
                return
            ret = policy.apply(current)
            if ret:
                results.remove(current)
                results.extend(ret)
                for env in ret:
                    recurse(env, i + 1)
            else:
                recurse(current, i + 1)

        recurse(envelope, 0)
        return results

    def _use_pool(self, attr, pool):
        if pool is None:
            pass
        elif isinstance(pool, Pool):
            setattr(self, attr, pool)
        else:
            setattr(self, attr, Pool(pool))

    def _pool_run(self, which, func, *args, **kwargs):
        pool = getattr(self, which + '_pool', None)
        if pool:
            ret = pool.spawn(func, *args, **kwargs)
            return ret.get()
        else:
            return func(*args, **kwargs)

    def _pool_imap(self, which, func, *iterables):
        pool = getattr(self, which + '_pool', gevent)
        threads = imap(pool.spawn, repeat(func), *iterables)
        ret = []
        for thread in threads:
            thread.join()
            ret.append(thread.exception or thread.value)
        return ret

    def _pool_spawn(self, which, func, *args, **kwargs):
        pool = getattr(self, which + '_pool', gevent)
        return pool.spawn(func, *args, **kwargs)

    def _add_queued(self, entry):
        timestamp, id = entry
        if id not in self.queued_ids | self.active_ids:
            bisect.insort(self.queued, entry)
            self.queued_ids.add(id)
            self.wake.set()

    def enqueue(self, envelope):
        """Drops a new message in the queue for delivery. The first delivery
        attempt is made immediately (depending on relay pool availability).
        This method is not typically called directly, |Edge| objects use it
        when they receive new messages.

        :param envelope: |Envelope| object to enqueue.
        :returns: Zipped list of envelopes and their respective queue IDs (or
                  thrown :exc:`QueueError` objects).

        """
        now = time.time()
        envelopes = self._run_policies(envelope)
        ids = self._pool_imap('store', self.store.write, envelopes,
                              repeat(now))
        results = zip(envelopes, ids)
        for env, id in results:
            if not isinstance(id, BaseException):
                if self.relay:
                    self.active_ids.add(id)
                    self._pool_spawn('relay', self._attempt, id, env, 0)
            elif not isinstance(id, QueueError):
                raise id  # Re-raise exceptions that are not QueueError.
        return results

    def _load_all(self):
        for entry in self.store.load():
            self._add_queued(entry)

    def _remove(self, id):
        self._pool_spawn('store', self.store.remove, id)
        self.queued_ids.discard(id)
        self.active_ids.discard(id)

    def _bounce(self, envelope, reply):
        bounce = self.bounce_factory(envelope, reply)
        if bounce:
            return self.bounce_queue.enqueue(bounce)

    def _perm_fail(self, id, envelope, reply):
        if id is not None:
            self._remove(id)
        if envelope.sender:  # Can't bounce to null-sender.
            self._pool_spawn('bounce', self._bounce, envelope, reply)

    def _split_by_reply(self, envelope, replies):
        if isinstance(replies, Reply):
            return [(replies, envelope)]
        groups = []
        for i, rcpt in enumerate(envelope.recipients):
            for reply, group_env in groups:
                if replies[i] == reply:
                    group_env.recipients.append(rcpt)
                    break
            else:
                group_env = envelope.copy([rcpt])
                groups.append((replies[i], group_env))
        return groups

    def _retry_later(self, id, envelope, replies):
        attempts = self.store.increment_attempts(id)
        wait = self.backoff(envelope, attempts)
        if wait is None:
            for reply, group_env in self._split_by_reply(envelope, replies):
                reply.message += ' (Too many retries)'
                self._perm_fail(None, group_env, reply)
            self._remove(id)
            return False
        else:
            when = time.time() + wait
            self.store.set_timestamp(id, when)
            self.active_ids.discard(id)
            self._add_queued((when, id))
            return True

    def _attempt(self, id, envelope, attempts):
        try:
            results = self.relay._attempt(envelope, attempts)
        except TransientRelayError as e:
            self._pool_spawn('store', self._retry_later, id, envelope, e.reply)
        except PermanentRelayError as e:
            self._perm_fail(id, envelope, e.reply)
        except Exception as e:
            log_exception(__name__)
            reply = Reply('450', '4.0.0 Unhandled delivery error: ' + str(e))
            self._pool_spawn('store', self._retry_later, id, envelope, reply)
            raise
        else:
            if isinstance(results, collections.Sequence):
                self._handle_partial_relay(id, envelope, attempts, results)
            else:
                self._remove(id)

    def _handle_partial_relay(self, id, envelope, attempts, results):
        delivered = []
        tempfails = []
        permfails = []
        for i, rcpt in enumerate(envelope.recipients):
            rcpt_res = results[i]
            if not rcpt_res:
                delivered.append(i)
            elif isinstance(rcpt_res, PermanentRelayError):
                delivered.append(i)
                permfails.append((rcpt, rcpt_res.reply))
            elif isinstance(rcpt_res, TransientRelayError):
                tempfails.append((rcpt, rcpt_res.reply))
        if permfails:
            rcpts, replies = zip(*permfails)
            fail_env = envelope.copy(rcpts)
            for reply, group_env in self._split_by_reply(fail_env, replies):
                self._perm_fail(None, group_env, reply)
        if tempfails:
            rcpts, replies = zip(*tempfails)
            fail_env = envelope.copy(rcpts)
            if not self._retry_later(id, fail_env, replies):
                return
        else:
            self.store.remove(id)
            return
        self.store.set_recipients_delivered(id, delivered)

    def _dequeue(self, id):
        try:
            envelope, attempts = self.store.get(id)
        except KeyError:
            return
        self.active_ids.add(id)
        self._pool_spawn('relay', self._attempt, id, envelope, attempts)

    def _check_ready(self, now):
        last_i = 0
        for i, entry in enumerate(self.queued):
            timestamp, id = entry
            if now >= timestamp:
                self._pool_spawn('store', self._dequeue, id)
                last_i = i + 1
            else:
                break
        if last_i > 0:
            self.queued = self.queued[last_i:]
            self.queued_ids = set([id for timestamp, id in self.queued])

    def _wait_store(self):
        while True:
            try:
                for entry in self.store.wait():
                    self._add_queued(entry)
            except NotImplementedError:
                return

    def _wait_ready(self, now):
        try:
            first = self.queued[0]
        except IndexError:
            self.wake.wait()
            self.wake.clear()
            return
        first_timestamp = first[0]
        if first_timestamp > now:
            self.wake.wait(first_timestamp - now)
            self.wake.clear()

    def flush(self):
        """Attempts to immediately flush all messages waiting in the queue,
        regardless of their retry timers.

        .. warning::

           This can be a very expensive operation, use with care.

        """
        self.wake.set()
        self.wake.clear()
        self.queued_lock.acquire()
        try:
            for entry in self.queued:
                self._pool_spawn('store', self._dequeue, entry[1])
            self.queued = []
        finally:
            self.queued_lock.release()

    def kill(self):
        """This method is used by |Queue| and |Queue|-like objects to properly
        end any associated services (such as running :class:`~gevent.Greenlet`
        threads) and close resources.

        """
        super(Queue, self).kill()

    def _run(self):
        if not self.relay:
            return
        self._pool_spawn('store', self._load_all)
        self._pool_spawn('store', self._wait_store)
        while True:
            self.queued_lock.acquire()
            try:
                now = time.time()
                self._check_ready(now)
                self._wait_ready(now)
            finally:
                self.queued_lock.release()
class Session:
    def __init__(self, handler, tg, chatid, user, name):
        self.handler = handler
        self.tg = tg
        self.name = name
        self.user = user
        self.chatid = chatid
        self.event = None
        self.returnmsg = None

    def _activate(self):
        self.handler.activeSessions[self.user] = self

    def _processmarkup(self, markup):
        if markup != None:
            markup2 = {}
            markup2["resize_keyboard"] = True
            markup2["one_time_keyboard"] = True
            markup2["keyboard"] = markup
            return json.dumps(markup2)
        return markup

    def getArgument(self,
                    key,
                    descr="",
                    global_=False,
                    markup=None,
                    presetvalue=None):
        if global_:
            hkey = "config"
        else:
            hkey = self.name
        res = self.handler.redisconfig.hget(hkey, key)
        if res == None:
            # if descr=="":
            #     session.send_message("Cannot find global variable: '%s' please define by using '%s=yourinput'."%(key,key))
            # else:
            if presetvalue != None:
                res = presetvalue
            elif descr == "":
                res = self.send_message(
                    "Cannot find variable: '%s', please specify" % key,
                    True,
                    markup=markup)
            else:
                # self.send_message("Cannot find global variable: '%s', please specify"%key)
                res = self.send_message(descr, True, markup=markup)
            self.handler.redisconfig.hset(hkey, key, res)
        return res

    def send_message(self, msg, feedback=False, markup=None):
        # print "spawn:%s"%msg
        self.tg.send_message(self.chatid,
                             msg,
                             reply_to_message_id="",
                             reply_markup=self._processmarkup(markup))
        if feedback:
            self.start_communication()
            self.event = Event()
            self.event.wait()
            return self.returnmsg.text

    def start_communication(self):
        self._activate()
        self.handler.activeCommunications[self.user] = self

    def stop_communication(self):
        if self.handler.activeCommunications.has_key(self.user):
            self.handler.activeCommunications.pop(self.user)
Beispiel #38
0
class InboundESL(object):
    def __init__(self, host, port, password):
        self.host = host
        self.port = port
        self.password = password
        self.timeout = 5
        self._run = True
        self._EOL = '\n'
        self._commands_sent = []
        self._auth_request_event = Event()
        self._receive_events_greenlet = None
        self._process_events_greenlet = None
        self.event_handlers = {}
        self.connected = False

        self._esl_event_queue = Queue()
        self._process_esl_event_queue = True

    def connect(self):
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.sock.settimeout(self.timeout)
        self.sock.connect((self.host, self.port))
        self.connected = True
        self.sock.settimeout(None)
        self.sock_file = self.sock.makefile()
        self._receive_events_greenlet = gevent.spawn(self.receive_events)
        self._process_events_greenlet = gevent.spawn(self.process_events)
        self._auth_request_event.wait()
        self.authenticate()

    def receive_events(self):
        buf = ''
        while self._run:
            try:
                data = self.sock_file.readline()
            except Exception:
                self._run = False
                self.connected = False
                self.sock.close()
                # logging.exception("Error reading from socket.")
                break
            if not data:
                if self.connected:
                    logging.error(
                        "Error receiving data, is FreeSWITCH running?")
                    self.connected = False
                break
            # Empty line
            if data == self._EOL:
                event = ESLEvent(buf)
                buf = ''
                self.handle_event(event)
                continue
            buf += data

    @staticmethod
    def _read_socket(sock, length):
        """Receive data from socket until the length is reached."""
        data = sock.read(length)
        data_length = len(data)
        while data_length < length:
            logging.warn(
                'Socket should read %s bytes, but actually read %s bytes. '
                'Consider increasing "net.core.rmem_default".' %
                (length, data_length))
            # FIXME(italo): if not data raise error
            data += sock.read(length - data_length)
            data_length = len(data)
        return data

    def handle_event(self, event):
        if event.headers['Content-Type'] == 'auth/request':
            self._auth_request_event.set()
        elif event.headers['Content-Type'] == 'command/reply':
            async_response = self._commands_sent.pop(0)
            event.data = event.headers['Reply-Text']
            async_response.set(event)
        elif event.headers['Content-Type'] == 'api/response':
            length = int(event.headers['Content-Length'])
            data = self._read_socket(self.sock_file, length)
            event.data = data
            async_response = self._commands_sent.pop(0)
            async_response.set(event)
        elif event.headers['Content-Type'] == 'text/disconnect-notice':
            self.connected = False
        else:
            length = int(event.headers['Content-Length'])
            data = self._read_socket(self.sock_file, length)
            if event.headers.get('Content-Type') == 'log/data':
                event.data = data
            else:
                event.parse_data(data)
            self._esl_event_queue.put(event)

    def _safe_exec_handler(self, handler, event):
        try:
            handler(event)
        except:
            logging.exception('ESL %s raised exception.' % handler.__name__)
            logging.error(pprint.pformat(event.headers))

    def process_events(self):
        logging.debug('Event Processor Running')
        while self._run:
            if not self._process_esl_event_queue:
                gevent.sleep(1)
                continue

            try:
                event = self._esl_event_queue.get(timeout=1)
            except gevent.queue.Empty:
                continue

            if event.headers.get('Event-Name') == 'CUSTOM':
                handlers = self.event_handlers.get(
                    event.headers.get('Event-Subclass'))
            else:
                handlers = self.event_handlers.get(
                    event.headers.get('Event-Name'))

            if not handlers and event.headers.get(
                    'Content-Type') == 'log/data':
                handlers = self.event_handlers.get('log')

            if not handlers:
                continue

            if hasattr(self, 'before_handle'):
                self._safe_exec_handler(self.before_handle, event)

            for handle in handlers:
                self._safe_exec_handler(handle, event)

            if hasattr(self, 'after_handle'):
                self._safe_exec_handler(self.after_handle, event)

    def send(self, data):
        if not self.connected:
            raise NotConnectedError()
        async_response = gevent.event.AsyncResult()
        self._commands_sent.append(async_response)
        raw_msg = (data + self._EOL * 2).encode('utf-8')
        self.sock.send(raw_msg)
        response = async_response.get()
        return response

    def authenticate(self):
        response = self.send('auth %s' % self.password)
        if response.headers['Reply-Text'] != '+OK accepted':
            raise ValueError('Invalid password.')

    def register_handle(self, name, handler):
        if name not in self.event_handlers:
            self.event_handlers[name] = []
        if handler in self.event_handlers[name]:
            return
        self.event_handlers[name].append(handler)

    def unregister_handle(self, name, handler):
        if name not in self.event_handlers:
            raise ValueError('No handlers found for event: %s' % name)
        self.event_handlers[name].remove(handler)
        if not self.event_handlers[name]:
            del self.event_handlers[name]

    def stop(self):
        if self.connected:
            self.send('exit')
        self._run = False
        logging.info("Waiting for receive greenlet exit")
        self._receive_events_greenlet.join()
        logging.info("Waiting for event processing greenlet exit")
        self._process_events_greenlet.join()
        if self.connected:
            self.sock.close()
            self.sock_file.close()
Beispiel #39
0
class RotkehlchenServer():
    def __init__(self):
        arg_parser = app_args(
            prog='rotkehlchen',
            description='Rotkehlchen Crypto Portfolio Management',
        )
        self.args = arg_parser.parse_args()
        self.rotkehlchen = Rotkehlchen(self.args)
        self.stop_event = Event()
        mainloop_greenlet = self.rotkehlchen.start()
        mainloop_greenlet.link_exception(self.handle_killed_greenlets)
        # Greenlets that will be waited for when we shutdown
        self.waited_greenlets = [mainloop_greenlet]
        # Greenlets that can be killed instead of waited for when we shutdown
        self.killable_greenlets = []
        self.task_lock = Semaphore()
        self.task_id = 0
        self.task_results = {}

    def new_task_id(self):
        with self.task_lock:
            task_id = self.task_id
            self.task_id += 1
        return task_id

    def write_task_result(self, task_id, result):
        with self.task_lock:
            self.task_results[task_id] = result

    def get_task_result(self, task_id):
        with self.task_lock:
            return self.task_results[task_id]

    def port(self):
        return self.args.zerorpc_port

    def shutdown(self):
        log.debug('Shutdown initiated')
        self.zerorpc.stop()
        self.rotkehlchen.shutdown()
        log.debug('Waiting for greenlets')
        gevent.wait(self.waited_greenlets)
        log.debug('Waited for greenlets. Killing all other greenlets')
        gevent.killall(self.killable_greenlets)
        log.debug('Greenlets killed. Killing zerorpc greenlet')
        self.zerorpc_greenlet.kill()
        log.debug('Killed zerorpc greenlet')
        log.debug('Shutdown completed')
        logging.shutdown()
        self.stop_event.set()

    def logout(self):
        # Kill all queries apart from the main loop -- perhaps a bit heavy handed
        # but the other options would be:
        # 1. to wait for all of them. That could take a lot of time, for no reason.
        #    All results would be discarded anyway since we are logging out.
        # 2. Have an intricate stop() notification system for each greenlet, but
        #   that is going to get complicated fast.
        gevent.killall(self.killable_greenlets)
        with self.task_lock:
            self.task_results = {}
        self.rotkehlchen.logout()

    def set_main_currency(self, currency_text):
        self.rotkehlchen.set_main_currency(currency_text)

    def set_settings(self, settings):
        result, message = self.rotkehlchen.set_settings(settings)
        return {'result': result, 'message': message}

    def handle_killed_greenlets(self, greenlet):
        if not greenlet.exception:
            log.warning('handle_killed_greenlets without an exception')
            return

        log.error(
            'Greenlet for task {} dies with exception: {}.\n'
            'Exception Name: {}\nException Info: {}\nTraceback:\n {}'.format(
                greenlet.task_id,
                greenlet.exception,
                greenlet.exc_info[0],
                greenlet.exc_info[1],
                ''.join(traceback.format_tb(greenlet.exc_info[2])),
            ))
        # also write an error for the task result
        result = {
            'error': str(greenlet.exception),
        }
        self.write_task_result(greenlet.task_id, result)

    def _query_async(self, command, task_id, **kwargs):
        result = getattr(self, command)(**kwargs)
        self.write_task_result(task_id, result)

    def query_async(self, command, **kwargs):
        task_id = self.new_task_id()
        log.debug("NEW TASK {} (kwargs:{}) with ID: {}".format(
            command, kwargs, task_id))
        greenlet = gevent.spawn(
            self._query_async,
            command,
            task_id,
            **kwargs,
        )
        greenlet.task_id = task_id
        greenlet.link_exception(self.handle_killed_greenlets)
        self.killable_greenlets.append(greenlet)
        return task_id

    def query_task_result(self, task_id):
        with self.task_lock:
            len1 = len(self.task_results)
            ret = self.task_results.pop(int(task_id), None)
            if not ret and len1 != len(self.task_results):
                log.error("Popped None from results task but lost an entry")
            if ret:
                log.debug("Found response for task {}".format(task_id))
        return ret

    @staticmethod
    def get_fiat_exchange_rates(currencies):
        rates = Inquirer().get_fiat_usd_exchange_rates(currencies)
        res = {'exchange_rates': rates}
        return process_result(res)

    def get_settings(self):
        return process_result(self.rotkehlchen.data.db.get_settings())

    def remove_exchange(self, name):
        result, message = self.rotkehlchen.remove_exchange(name)
        return {'result': result, 'message': message}

    def setup_exchange(self, name, api_key, api_secret):
        result, message = self.rotkehlchen.setup_exchange(
            name, api_key, api_secret)
        return {'result': result, 'message': message}

    def query_otctrades(self):
        trades = self.rotkehlchen.data.get_external_trades()
        result = {'result': trades, 'message': ''}
        return process_result(result)

    def add_otctrade(self, data):
        result, message = self.rotkehlchen.data.add_external_trade(data)
        return {'result': result, 'message': message}

    def edit_otctrade(self, data):
        result, message = self.rotkehlchen.data.edit_external_trade(data)
        return {'result': result, 'message': message}

    def delete_otctrade(self, trade_id):
        result, message = self.rotkehlchen.data.delete_external_trade(trade_id)
        return {'result': result, 'message': message}

    def set_premium_credentials(self, api_key, api_secret):
        msg = ''
        result = False
        try:
            self.rotkehlchen.set_premium_credentials(api_key, api_secret)
            result = True
        except (AuthenticationError, IncorrectApiKeyFormat) as e:
            msg = str(e)
        return {'result': result, 'message': msg}

    def set_premium_option_sync(self, should_sync):
        self.rotkehlchen.data.db.update_premium_sync(should_sync)
        return True

    def query_exchange_balances(self, name):
        res = {'name': name}
        balances, msg = getattr(self.rotkehlchen, name).query_balances()
        if balances is None:
            res['error'] = msg
        else:
            res['balances'] = balances

        return process_result(res)

    def query_exchange_balances_async(self, name):
        res = self.query_async('query_exchange_balances', name=name)
        return {'task_id': res}

    def query_blockchain_balances(self):
        result, empty_or_error = self.rotkehlchen.blockchain.query_balances()
        return process_result({'result': result, 'message': empty_or_error})

    def query_blockchain_balances_async(self):
        res = self.query_async('query_blockchain_balances')
        return {'task_id': res}

    def query_fiat_balances(self):
        res = self.rotkehlchen.query_fiat_balances()
        return process_result(res)

    def query_netvalue_data(self):
        res = self.rotkehlchen.data.db.get_netvalue_data()
        result = {'times': res[0], 'data': res[1]}
        return process_result(result)

    def query_timed_balances_data(self, asset: str, start_ts: int,
                                  end_ts: int):
        start_ts = Timestamp(start_ts)
        end_ts = Timestamp(end_ts)
        res = self.rotkehlchen.data.db.query_timed_balances(
            from_ts=start_ts,
            to_ts=end_ts,
            asset=asset,
        )
        result = {'result': res, 'messsage': ''}
        return process_result(result)

    def query_owned_assets(self):
        res = self.rotkehlchen.data.db.query_owned_assets()
        result = {'result': res, 'message': ''}
        return process_result(result)

    def query_latest_location_value_distribution(self):
        res = self.rotkehlchen.data.db.get_latest_location_value_distribution()
        result = {'result': res, 'message': ''}
        return process_result(result)

    def query_latest_asset_value_distribution(self):
        res = self.rotkehlchen.data.db.get_latest_asset_value_distribution()
        result = {'result': res, 'message': ''}
        return process_result(result)

    def consume_messages(self):
        """Consumes all errors and warnings from the messages aggregator"""
        warnings = self.rotkehlchen.msg_aggregator.consume_warnings()
        errors = self.rotkehlchen.msg_aggregator.consume_errors()
        result = {
            'result': {
                'warnings': warnings,
                'errors': errors
            },
            'message': ''
        }
        return process_result(result)

    def query_statistics_renderer(self):
        result_dict = {'result': '', 'message': 'user does not have premium'}
        if not self.rotkehlchen.premium:
            return process_result(result_dict)

        active = self.rotkehlchen.premium.is_active()
        if not active:
            return process_result(result_dict)

        try:
            result = self.rotkehlchen.premium.query_statistics_renderer()
            result_dict['result'] = result
            result_dict['message'] = ''
        except RemoteError as e:
            result_dict['message'] = str(e)

        return process_result(result_dict)

    def set_fiat_balance(self, currency: str, balance: str):
        result, message = self.rotkehlchen.data.set_fiat_balance(
            currency, balance)
        return {'result': result, 'message': message}

    def query_trade_history(self, location: str, start_ts: int, end_ts: int):
        start_ts = Timestamp(start_ts)
        end_ts = Timestamp(end_ts)
        if location == 'all':
            return self.rotkehlchen.trades_historian.get_history(
                start_ts, end_ts)

        try:
            exchange = getattr(self.rotkehlchen, location)
        except AttributeError:
            raise ValueError(f'Unknown location {location} given')

        return process_result_list(
            exchange.query_trade_history(start_ts, end_ts, end_ts))

    def process_trade_history(self, start_ts, end_ts):
        start_ts = int(start_ts)
        end_ts = int(end_ts)
        result, error_or_empty = self.rotkehlchen.process_history(
            start_ts, end_ts)
        response = {'result': result, 'message': error_or_empty}
        return process_result(response)

    def process_trade_history_async(self, start_ts, end_ts):
        res = self.query_async('process_trade_history',
                               start_ts=start_ts,
                               end_ts=end_ts)
        return {'task_id': res}

    def export_processed_history_csv(self, dirpath):
        result, message = self.rotkehlchen.accountant.csvexporter.create_files(
            dirpath)
        return {'result': result, 'message': message}

    def query_balances(self, save_data=False):
        if isinstance(save_data, str) and (save_data == 'save'
                                           or save_data == 'True'):
            save_data = True

        result = self.rotkehlchen.query_balances(save_data)
        print(pretty_json_dumps(result))
        return process_result(result)

    def query_balances_async(self, save_data=False):
        res = self.query_async('query_balances', save_data=save_data)
        return {'task_id': res}

    def query_periodic_data(self):
        """Will query for some client data that can change frequently"""
        result = self.rotkehlchen.query_periodic_data()
        return process_result(result)

    def get_eth_tokens(self):
        result = {
            'all_eth_tokens': self.rotkehlchen.data.eth_tokens,
            'owned_eth_tokens': self.rotkehlchen.blockchain.eth_tokens,
        }
        return process_result(result)

    def add_owned_eth_tokens(self, tokens):
        return self.rotkehlchen.add_owned_eth_tokens(tokens)

    def remove_owned_eth_tokens(self, tokens):
        return self.rotkehlchen.remove_owned_eth_tokens(tokens)

    def add_blockchain_account(self, given_blockchain: str,
                               given_account: str):
        try:
            blockchain = SupportedBlockchain(given_blockchain)
        except ValueError:
            msg = f'Tried to add blockchain account for unsupported blockchain {given_blockchain}'
            return simple_result(False, msg)
        return self.rotkehlchen.add_blockchain_account(blockchain,
                                                       given_account)

    def remove_blockchain_account(self, given_blockchain: str,
                                  given_account: str):
        try:
            blockchain = SupportedBlockchain(given_blockchain)
        except ValueError:
            msg = (
                f'Tried to remove blockchain account for unsupported blockchain {given_blockchain}'
            )
            return simple_result(False, msg)
        return self.rotkehlchen.remove_blockchain_account(
            blockchain, given_account)

    def get_ignored_assets(self):
        result = {
            'ignored_assets': [
                identifier for identifier in
                self.rotkehlchen.data.db.get_ignored_assets()
            ],
        }
        return result

    def add_ignored_asset(self, asset: str):
        result, message = self.rotkehlchen.data.add_ignored_asset(asset)
        return {'result': result, 'message': message}

    def remove_ignored_asset(self, asset: str):
        result, message = self.rotkehlchen.data.remove_ignored_asset(asset)
        return {'result': result, 'message': message}

    def unlock_user(self, user, password, create_new, sync_approval, api_key,
                    api_secret):
        """Either unlock an existing user or create a new one"""
        res = {'result': True, 'message': ''}

        assert isinstance(sync_approval,
                          str), "sync_approval should be a string"
        assert isinstance(api_key, str), "api_key should be a string"
        assert isinstance(api_secret, str), "api_secret should be a string"

        if not isinstance(create_new, bool):
            if not isinstance(create_new, str):
                raise ValueError('create_new can only be boolean or str')

            if create_new in ('False', 'false', 'FALSE'):
                create_new = False
            elif create_new in ('True', 'true', 'TRUE'):
                create_new = True
            else:
                raise ValueError(
                    f'Invalid string value for create_new {create_new}')

        valid_actions = ['unknown', 'yes', 'no']
        valid_approve = isinstance(sync_approval,
                                   str) and sync_approval in valid_actions
        if not valid_approve:
            raise ValueError('Provided invalid value for sync_approval')

        if api_key != '' and create_new is False:
            raise ValueError(
                'Should not ever have api_key provided during a normal login')

        if api_key != '' and api_secret == '' or api_secret != '' and api_key == '':
            raise ValueError('Must provide both or neither of api key/secret')

        try:
            self.rotkehlchen.unlock_user(
                user,
                password,
                create_new,
                sync_approval,
                api_key,
                api_secret,
            )
            res['exchanges'] = self.rotkehlchen.connected_exchanges
            res['premium'] = self.rotkehlchen.premium is not None
            res['settings'] = self.rotkehlchen.data.db.get_settings()
        except AuthenticationError as e:
            res['result'] = False
            res['message'] = str(e)
        except RotkehlchenPermissionError as e:
            res['result'] = False
            res['permission_needed'] = True
            res['message'] = str(e)

        return res

    def main(self):
        if os.name != 'nt':
            gevent.hub.signal(signal.SIGQUIT, self.shutdown)
        gevent.hub.signal(signal.SIGINT, self.shutdown)
        gevent.hub.signal(signal.SIGTERM, self.shutdown)
        # self.zerorpc = zerorpc.Server(self, heartbeat=15)
        self.zerorpc = zerorpc.Server(self)
        addr = 'tcp://127.0.0.1:' + str(self.port())
        self.zerorpc.bind(addr)
        print('start running on {}'.format(addr))
        self.zerorpc_greenlet = gevent.spawn(self.zerorpc.run)
        self.stop_event.wait()
Beispiel #40
0
class InstanceRunner(object):
    """Maintainer for a single instance."""
    def __init__(self,
                 app,
                 log_queue,
                 name=None,
                 ppid=0,
                 timeout=30,
                 uid=None,
                 gid=None):

        self.app = app
        self.log_queue = log_queue
        self.name = name or self.__class__.__name__
        self.ppid = ppid
        self.uid = uid or os.geteuid()
        self.gid = gid or os.getegid()
        self.timeout = timeout
        self.booted = False
        self.tmp = NotifyFile(uid, gid)
        self.shutdown = Event()
        self.log = logging.getLogger('child')

    def run(self):
        """Main-loop of the instance runner."""
        self.init_process()
        self.log.info("Booting child with pid: %d", os.getpid())

        self.app.start()
        self.booted = True

        while not self.shutdown.is_set():
            self.update_proc_title()
            if os.getppid() != self.ppid:
                # Parent changed - lets drop out
                break
            self.tmp.notify()
            self.shutdown.wait(1)

        self.app.stop()

    def update_proc_title(self):
        util._setproctitle('worker %s: %s' % (
            self.name,
            str(self.app),
        ))

    def capture_stdout(self):
        """Setup so that stdout and stderr are sent to a logger."""
        sys.stdout = StreamToLogger(logging.getLogger('sys.stdout'),
                                    logging.INFO)
        sys.stderr = StreamToLogger(logging.getLogger('sys.stderr'),
                                    logging.ERROR)

    def init_process(self):
        """Initialize process."""
        random.seed()  # FIXME: seed with pid?
        # Initialize logging.
        logger = logging.getLogger()
        logger.addHandler(ChildLogHandler(self.log_queue))
        for handler in logger.handlers:
            if not isinstance(handler, ChildLogHandler):
                logger.removeHandler(handler)
        self.capture_stdout()
        # Initialize the rest.
        util.close_on_exec(self.tmp.fileno())
        self.init_signals()

    def init_signals(self):
        signal.signal(signal.SIGINT, signal.SIG_IGN)
        gevent.signal(signal.SIGQUIT, self.handle_quit)
        gevent.signal(signal.SIGTERM, self.handle_exit)
        gevent.signal(signal.SIGWINCH, self.handle_winch)

    def handle_quit(self):
        self.log.info('Received quit')
        self.shutdown.set()

    def handle_exit(self):
        self.log.info('Received exit')
        sys.exit(0)

    def handle_winch(self):
        # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.
        return
Beispiel #41
0
def healthcheck(
        transport: UDPTransport,
        recipient: typing.Address,
        event_stop: Event,
        event_healthy: Event,
        event_unhealthy: Event,
        nat_keepalive_retries: int,
        nat_keepalive_timeout: int,
        nat_invitation_timeout: int,
        ping_nonce: int,
):
    """ Sends a periodical Ping to `recipient` to check its health. """
    # pylint: disable=too-many-branches

    log.debug(
        'starting healthcheck for',
        node=pex(transport.raiden.address),
        to=pex(recipient),
    )

    # The state of the node is unknown, the events are set to allow the tasks
    # to do work.
    last_state = NODE_NETWORK_UNKNOWN
    transport.set_node_network_state(
        recipient,
        last_state,
    )

    # Always call `clear` before `set`, since only `set` does context-switches
    # it's easier to reason about tasks that are waiting on both events.

    # Wait for the end-point registration or for the node to quit
    try:
        transport.get_host_port(recipient)
    except UnknownAddress:
        log.debug(
            'waiting for endpoint registration',
            node=pex(transport.raiden.address),
            to=pex(recipient),
        )

        event_healthy.clear()
        event_unhealthy.set()

        backoff = udp_utils.timeout_exponential_backoff(
            nat_keepalive_retries,
            nat_keepalive_timeout,
            nat_invitation_timeout,
        )
        sleep = next(backoff)

        while not event_stop.wait(sleep):
            try:
                transport.get_host_port(recipient)
            except UnknownAddress:
                sleep = next(backoff)
            else:
                break

    # Don't wait to send the first Ping and to start sending messages if the
    # endpoint is known
    sleep = 0
    event_unhealthy.clear()
    event_healthy.set()

    while not event_stop.wait(sleep):
        sleep = nat_keepalive_timeout

        ping_nonce['nonce'] += 1
        messagedata = transport.get_ping(ping_nonce['nonce'])
        message_id = ('ping', ping_nonce['nonce'], recipient)

        # Send Ping a few times before setting the node as unreachable
        try:
            acknowledged = udp_utils.retry(
                transport,
                messagedata,
                message_id,
                recipient,
                event_stop,
                [nat_keepalive_timeout] * nat_keepalive_retries,
            )
        except RaidenShuttingDown:  # For a clean shutdown process
            return

        if event_stop.is_set():
            return

        if not acknowledged:
            log.debug(
                'node is unresponsive',
                node=pex(transport.raiden.address),
                to=pex(recipient),
                current_state=last_state,
                new_state=NODE_NETWORK_UNREACHABLE,
                retries=nat_keepalive_retries,
                timeout=nat_keepalive_timeout,
            )

            # The node is not healthy, clear the event to stop all queue
            # tasks
            last_state = NODE_NETWORK_UNREACHABLE
            transport.set_node_network_state(
                recipient,
                last_state,
            )
            event_healthy.clear()
            event_unhealthy.set()

            # Retry until recovery, used for:
            # - Checking node status.
            # - Nat punching.
            try:
                acknowledged = udp_utils.retry(
                    transport,
                    messagedata,
                    message_id,
                    recipient,
                    event_stop,
                    repeat(nat_invitation_timeout),
                )
            except RaidenShuttingDown:  # For a clean shutdown process
                return

        if acknowledged:
            current_state = views.get_node_network_status(
                views.state_from_raiden(transport.raiden),
                recipient,
            )
            log.debug(
                'node answered',
                node=pex(transport.raiden.address),
                to=pex(recipient),
                current_state=current_state,
                new_state=NODE_NETWORK_REACHABLE,
            )

            if last_state != NODE_NETWORK_REACHABLE:
                last_state = NODE_NETWORK_REACHABLE
                transport.set_node_network_state(
                    recipient,
                    last_state,
                )
                event_unhealthy.clear()
                event_healthy.set()
Beispiel #42
0
class JoinableQueue(Queue):
    """
    A subclass of :class:`Queue` that additionally has
    :meth:`task_done` and :meth:`join` methods.
    """
    def __init__(self, maxsize=None, items=None, unfinished_tasks=None):
        """

        .. versionchanged:: 1.1a1
           If *unfinished_tasks* is not given, then all the given *items*
           (if any) will be considered unfinished.

        """
        from gevent.event import Event
        Queue.__init__(self, maxsize, items)
        self._cond = Event()
        self._cond.set()

        if unfinished_tasks:
            self.unfinished_tasks = unfinished_tasks
        elif items:
            self.unfinished_tasks = len(items)
        else:
            self.unfinished_tasks = 0

        if self.unfinished_tasks:
            self._cond.clear()

    def copy(self):
        return type(self)(self.maxsize, self.queue, self.unfinished_tasks)

    def _format(self):
        result = Queue._format(self)
        if self.unfinished_tasks:
            result += ' tasks=%s _cond=%s' % (self.unfinished_tasks,
                                              self._cond)
        return result

    def _put(self, item):
        Queue._put(self, item)
        self.unfinished_tasks += 1
        self._cond.clear()

    def task_done(self):
        '''Indicate that a formerly enqueued task is complete. Used by queue consumer threads.
        For each :meth:`get <Queue.get>` used to fetch a task, a subsequent call to :meth:`task_done` tells the queue
        that the processing on the task is complete.

        If a :meth:`join` is currently blocking, it will resume when all items have been processed
        (meaning that a :meth:`task_done` call was received for every item that had been
        :meth:`put <Queue.put>` into the queue).

        Raises a :exc:`ValueError` if called more times than there were items placed in the queue.
        '''
        if self.unfinished_tasks <= 0:
            raise ValueError('task_done() called too many times')
        self.unfinished_tasks -= 1
        if self.unfinished_tasks == 0:
            self._cond.set()

    def join(self, timeout=None):
        '''
        Block until all items in the queue have been gotten and processed.

        The count of unfinished tasks goes up whenever an item is added to the queue.
        The count goes down whenever a consumer thread calls :meth:`task_done` to indicate
        that the item was retrieved and all work on it is complete. When the count of
        unfinished tasks drops to zero, :meth:`join` unblocks.

        :param float timeout: If not ``None``, then wait no more than this time in seconds
            for all tasks to finish.
        :return: ``True`` if all tasks have finished; if ``timeout`` was given and expired before
            all tasks finished, ``False``.

        .. versionchanged:: 1.1a1
           Add the *timeout* parameter.
        '''
        return self._cond.wait(timeout=timeout)
Beispiel #43
0
class TimerQueue(object):
    """A timer that provides efficient scheduling of large numbers of events in
  the near future."""
    def __init__(self, time_source=time.time, resolution=0.01):
        """
    Args:
      time_source - A callable to get the current time (in seconds).
      resolution - The minimum resolution of the timer.  If set, all events are
                   quantized to this resolution.
    """
        self._queue = []
        self._event = Event()
        self._seq = 0
        self._resolution = resolution
        self._time_source = time_source
        self._worker = gevent.spawn(self._TimerWorker)

    def __del__(self):
        self._worker.kill(block=False)
        self._worker = None

    def _TimerWorker(self):
        while True:
            # If the queue is empty, wait for an item to be added.
            if not self._queue:
                self._event.wait()

            if self._event.is_set():
                self._event.clear()
                # A sleep here is needed to work around a bug with gevent.
                # If the event is cleared and then immediately waited on, the wait will
                # completed instantly and report it timed out.
                gevent.sleep(0)

            # Peek the head of the queue
            at, peeked_seq, cancelled = self._PeekNext()
            if cancelled:
                # The item has already been canceled, remove it and continue.
                heapq.heappop(self._queue)
                continue

            # Wait for a new item to be added to the queue,
            # or for the timeout to expire.
            to_wait = at - self._time_source()
            if to_wait > 0:
                # There's some time to wait, wait for it or a more recent item to be
                # added to the head of the queue.
                wait_timed_out = not self._event.wait(to_wait)
            else:
                # It should already be run, do it right now.
                wait_timed_out = True

            if wait_timed_out:
                # Nothing newer came in before it timed out.
                at, seq, cancelled, action = heapq.heappop(self._queue)
                # This is an assert that should never occur, if it does, we somehow
                # ran events out of order.
                if seq != peeked_seq:
                    LOG.critical("seq != peeked_seq [%d, %d]" %
                                 (seq, peeked_seq))
                if not cancelled:
                    # Run it
                    gevent.spawn(action)
                    # Clear the reference out in this loop
                    del action
            else:
                # A newer item came in, nothing to do here, re-loop
                pass

    def _PeekNext(self):
        return self._queue[0][:3]

    def Schedule(self, deadline, action):
        """Schedule an operation

    Args:
      deadline - The absolute time this event should occur on.
      action - The action to run.
    Returns:
      A callable that can be invoked to cancel the scheduled operation.
    """
        if action is None:
            raise Exception("action must be non-null")

        if self._resolution:
            deadline = int(math.ceil(
                float(deadline) / self._resolution)) * self._resolution

        self._seq += 1
        timeout_args = [deadline, self._seq, False, action]

        def cancel():
            timeout_args[2] = True
            # Null out to avoid holding onto references.
            timeout_args[3] = None

        heapq.heappush(self._queue, timeout_args)
        # Wake up the waiter thread if this is now the newest
        if self._queue[0][0] == deadline:
            self._event.set()
        return cancel
Beispiel #44
0
class UDPTransport(Runnable):
    UDP_MAX_MESSAGE_SIZE = 1200

    def __init__(self, discovery, udpsocket, throttle_policy, config):
        super().__init__()
        # these values are initialized by the start method
        self.queueids_to_queues: typing.Dict
        self.raiden: RaidenService

        self.discovery = discovery
        self.config = config

        self.retry_interval = config['retry_interval']
        self.retries_before_backoff = config['retries_before_backoff']
        self.nat_keepalive_retries = config['nat_keepalive_retries']
        self.nat_keepalive_timeout = config['nat_keepalive_timeout']
        self.nat_invitation_timeout = config['nat_invitation_timeout']

        self.event_stop = Event()
        self.event_stop.set()

        self.greenlets = list()
        self.addresses_events = dict()

        self.messageids_to_asyncresults = dict()

        # Maps the addresses to a dict with the latest nonce (using a dict
        # because python integers are immutable)
        self.nodeaddresses_to_nonces = dict()

        cache = cachetools.TTLCache(
            maxsize=50,
            ttl=CACHE_TTL,
        )
        cache_wrapper = cachetools.cached(cache=cache)
        self.get_host_port = cache_wrapper(discovery.get)

        self.throttle_policy = throttle_policy
        self.server = DatagramServer(udpsocket, handle=self.receive)

    def start(
            self,
            raiden: RaidenService,
    ):
        if not self.event_stop.ready():
            raise RuntimeError('UDPTransport started while running')

        self.event_stop.clear()
        self.raiden = raiden
        self.queueids_to_queues = dict()

        # server.stop() clears the handle. Since this may be a restart the
        # handle must always be set
        self.server.set_handle(self.receive)

        self.server.start()
        super().start()

    def _run(self):
        """ Runnable main method, perform wait on long-running subtasks """
        try:
            self.event_stop.wait()
        except gevent.GreenletExit:  # killed without exception
            self.event_stop.set()
            gevent.killall(self.greenlets)  # kill children
            raise  # re-raise to keep killed status
        except Exception:
            self.stop()  # ensure cleanup and wait on subtasks
            raise

    def stop(self):
        if self.event_stop.ready():
            return  # double call, happens on normal stop, ignore

        self.event_stop.set()

        # Stop handling incoming packets, but don't close the socket. The
        # socket can only be safely closed after all outgoing tasks are stopped
        self.server.stop_accepting()

        # Stop processing the outgoing queues
        gevent.wait(self.greenlets)

        # All outgoing tasks are stopped. Now it's safe to close the socket. At
        # this point there might be some incoming message being processed,
        # keeping the socket open is not useful for these.
        self.server.stop()

        # Calling `.close()` on a gevent socket doesn't actually close the underlying os socket
        # so we do that ourselves here.
        # See: https://github.com/gevent/gevent/blob/master/src/gevent/_socket2.py#L208
        # and: https://groups.google.com/forum/#!msg/gevent/Ro8lRra3nH0/ZENgEXrr6M0J
        try:
            self.server._socket.close()  # pylint: disable=protected-access
        except socket.error:
            pass

        # Set all the pending results to False
        for async_result in self.messageids_to_asyncresults.values():
            async_result.set(False)

    def get_health_events(self, recipient):
        """ Starts a healthcheck task for `recipient` and returns a
        HealthEvents with locks to react on its current state.
        """
        if recipient not in self.addresses_events:
            self.start_health_check(recipient)

        return self.addresses_events[recipient]

    def start_health_check(self, recipient):
        """ Starts a task for healthchecking `recipient` if there is not
        one yet.
        """
        if recipient not in self.addresses_events:
            ping_nonce = self.nodeaddresses_to_nonces.setdefault(
                recipient,
                {'nonce': 0},  # HACK: Allows the task to mutate the object
            )

            events = healthcheck.HealthEvents(
                event_healthy=Event(),
                event_unhealthy=Event(),
            )

            self.addresses_events[recipient] = events

            greenlet_healthcheck = gevent.spawn(
                healthcheck.healthcheck,
                self,
                recipient,
                self.event_stop,
                events.event_healthy,
                events.event_unhealthy,
                self.nat_keepalive_retries,
                self.nat_keepalive_timeout,
                self.nat_invitation_timeout,
                ping_nonce,
            )
            greenlet_healthcheck.name = f'Healthcheck for {pex(recipient)}'
            greenlet_healthcheck.link_exception(self.on_error)
            self.greenlets.append(greenlet_healthcheck)

    def init_queue_for(
            self,
            queue_identifier: QueueIdentifier,
            items: typing.List[QueueItem_T],
    ) -> Queue_T:
        """ Create the queue identified by the queue_identifier
        and initialize it with `items`.
        """
        recipient = queue_identifier.recipient
        queue = self.queueids_to_queues.get(queue_identifier)
        assert queue is None

        queue = NotifyingQueue(items=items)
        self.queueids_to_queues[queue_identifier] = queue

        events = self.get_health_events(recipient)

        greenlet_queue = gevent.spawn(
            single_queue_send,
            self,
            recipient,
            queue,
            queue_identifier,
            self.event_stop,
            events.event_healthy,
            events.event_unhealthy,
            self.retries_before_backoff,
            self.retry_interval,
            self.retry_interval * 10,
        )

        if queue_identifier.channel_identifier == CHANNEL_IDENTIFIER_GLOBAL_QUEUE:
            greenlet_queue.name = f'Queue for {pex(recipient)} - global'
        else:
            greenlet_queue.name = (
                f'Queue for {pex(recipient)} - {queue_identifier.channel_identifier}'
            )

        greenlet_queue.link_exception(self.on_error)
        self.greenlets.append(greenlet_queue)

        log.debug(
            'new queue created for',
            node=pex(self.raiden.address),
            queue_identifier=queue_identifier,
            items_qty=len(items),
        )

        return queue

    def get_queue_for(
            self,
            queue_identifier: QueueIdentifier,
    ) -> Queue_T:
        """ Return the queue identified by the given queue identifier.

        If the queue doesn't exist it will be instantiated.
        """
        queue = self.queueids_to_queues.get(queue_identifier)

        if queue is None:
            items = ()
            queue = self.init_queue_for(queue_identifier, items)

        return queue

    def send_async(
            self,
            queue_identifier: QueueIdentifier,
            message: 'Message',
    ):
        """ Send a new ordered message to recipient.

        Messages that use the same `queue_identifier` are ordered.
        """
        recipient = queue_identifier.recipient
        if not is_binary_address(recipient):
            raise ValueError('Invalid address {}'.format(pex(recipient)))

        # These are not protocol messages, but transport specific messages
        if isinstance(message, (Delivered, Ping, Pong)):
            raise ValueError('Do not use send for {} messages'.format(message.__class__.__name__))

        messagedata = message.encode()
        if len(messagedata) > self.UDP_MAX_MESSAGE_SIZE:
            raise ValueError(
                'message size exceeds the maximum {}'.format(self.UDP_MAX_MESSAGE_SIZE),
            )

        # message identifiers must be unique
        message_id = message.message_identifier

        # ignore duplicates
        if message_id not in self.messageids_to_asyncresults:
            self.messageids_to_asyncresults[message_id] = AsyncResult()

            queue = self.get_queue_for(queue_identifier)
            queue.put((messagedata, message_id))
            assert queue.is_set()

            log.debug(
                'MESSAGE QUEUED',
                node=pex(self.raiden.address),
                queue_identifier=queue_identifier,
                queue_size=len(queue),
                message=message,
            )

    def maybe_send(self, recipient: typing.Address, message: Message):
        """ Send message to recipient if the transport is running. """

        if not is_binary_address(recipient):
            raise InvalidAddress('Invalid address {}'.format(pex(recipient)))

        messagedata = message.encode()
        host_port = self.get_host_port(recipient)

        self.maybe_sendraw(host_port, messagedata)

    def maybe_sendraw_with_result(
            self,
            recipient: typing.Address,
            messagedata: bytes,
            message_id: typing.MessageID,
    ) -> AsyncResult:
        """ Send message to recipient if the transport is running.

        Returns:
            An AsyncResult that will be set once the message is delivered. As
            long as the message has not been acknowledged with a Delivered
            message the function will return the same AsyncResult.
        """
        async_result = self.messageids_to_asyncresults.get(message_id)
        if async_result is None:
            async_result = AsyncResult()
            self.messageids_to_asyncresults[message_id] = async_result

        host_port = self.get_host_port(recipient)
        self.maybe_sendraw(host_port, messagedata)

        return async_result

    def maybe_sendraw(self, host_port: typing.Tuple[int, int], messagedata: bytes):
        """ Send message to recipient if the transport is running. """

        # Don't sleep if timeout is zero, otherwise a context-switch is done
        # and the message is delayed, increasing it's latency
        sleep_timeout = self.throttle_policy.consume(1)
        if sleep_timeout:
            gevent.sleep(sleep_timeout)

        # Check the udp socket is still available before trying to send the
        # message. There must be *no context-switches after this test*.
        if hasattr(self.server, 'socket'):
            self.server.sendto(
                messagedata,
                host_port,
            )

    def receive(
            self,
            messagedata: bytes,
            host_port: typing.Tuple[str, int],  # pylint: disable=unused-argument
    ) -> bool:
        """ Handle an UDP packet. """
        # pylint: disable=unidiomatic-typecheck

        if len(messagedata) > self.UDP_MAX_MESSAGE_SIZE:
            log.warning(
                'INVALID MESSAGE: Packet larger than maximum size',
                node=pex(self.raiden.address),
                message=hexlify(messagedata),
                length=len(messagedata),
            )
            return False

        try:
            message = decode(messagedata)
        except InvalidProtocolMessage as e:
            log.warning(
                'INVALID PROTOCOL MESSAGE',
                error=str(e),
                node=pex(self.raiden.address),
                message=hexlify(messagedata),
            )
            return False

        if type(message) == Pong:
            self.receive_pong(message)
        elif type(message) == Ping:
            self.receive_ping(message)
        elif type(message) == Delivered:
            self.receive_delivered(message)
        elif message is not None:
            self.receive_message(message)
        else:
            log.warning(
                'INVALID MESSAGE: Unknown cmdid',
                node=pex(self.raiden.address),
                message=hexlify(messagedata),
            )
            return False

        return True

    def receive_message(self, message: Message):
        """ Handle a Raiden protocol message.

        The protocol requires durability of the messages. The UDP transport
        relies on the node's WAL for durability. The message will be converted
        to a state change, saved to the WAL, and *processed* before the
        durability is confirmed, which is a stronger property than what is
        required of any transport.
        """
        # pylint: disable=unidiomatic-typecheck

        if on_message(self.raiden, message):

            # Sending Delivered after the message is decoded and *processed*
            # gives a stronger guarantee than what is required from a
            # transport.
            #
            # Alternatives are, from weakest to strongest options:
            # - Just save it on disk and asynchronously process the messages
            # - Decode it, save to the WAL, and asynchronously process the
            #   state change
            # - Decode it, save to the WAL, and process it (the current
            #   implementation)
            delivered_message = Delivered(message.message_identifier)
            self.raiden.sign(delivered_message)

            self.maybe_send(
                message.sender,
                delivered_message,
            )

    def receive_delivered(self, delivered: Delivered):
        """ Handle a Delivered message.

        The Delivered message is how the UDP transport guarantees persistence
        by the partner node. The message itself is not part of the raiden
        protocol, but it's required by this transport to provide the required
        properties.
        """
        processed = ReceiveDelivered(delivered.delivered_message_identifier)
        self.raiden.handle_state_change(processed)

        message_id = delivered.delivered_message_identifier
        async_result = self.raiden.transport.messageids_to_asyncresults.get(message_id)

        # clear the async result, otherwise we have a memory leak
        if async_result is not None:
            del self.messageids_to_asyncresults[message_id]
            async_result.set()
        else:
            log.warn(
                'UNKNOWN DELIVERED MESSAGE RECEIVED',
                message_id=message_id,
            )

    # Pings and Pongs are used to check the health status of another node. They
    # are /not/ part of the raiden protocol, only part of the UDP transport,
    # therefore these messages are not forwarded to the message handler.
    def receive_ping(self, ping: Ping):
        """ Handle a Ping message by answering with a Pong. """

        log_healthcheck.debug(
            'PING RECEIVED',
            node=pex(self.raiden.address),
            message_id=ping.nonce,
            message=ping,
            sender=pex(ping.sender),
        )

        pong = Pong(ping.nonce)
        self.raiden.sign(pong)

        try:
            self.maybe_send(ping.sender, pong)
        except (InvalidAddress, UnknownAddress) as e:
            log.debug("Couldn't send the `Delivered` message", e=e)

    def receive_pong(self, pong: Pong):
        """ Handles a Pong message. """

        message_id = ('ping', pong.nonce, pong.sender)
        async_result = self.messageids_to_asyncresults.get(message_id)

        if async_result is not None:
            log_healthcheck.debug(
                'PONG RECEIVED',
                node=pex(self.raiden.address),
                sender=pex(pong.sender),
                message_id=pong.nonce,
            )

            async_result.set(True)

        else:
            log_healthcheck.warn(
                'UNKNOWN PONG RECEIVED',
                message_id=message_id,
            )

    def get_ping(self, nonce: int) -> Ping:
        """ Returns a signed Ping message.

        Note: Ping messages don't have an enforced ordering, so a Ping message
        with a higher nonce may be acknowledged first.
        """
        message = Ping(nonce)
        self.raiden.sign(message)
        message_data = message.encode()

        return message_data

    def set_node_network_state(self, node_address: typing.Address, node_state):
        state_change = ActionChangeNodeNetworkState(node_address, node_state)
        self.raiden.handle_state_change(state_change)
Beispiel #45
0
class SteamUser(object):
    """
    A data model for a Steam user. Holds user persona state, and related actions

    .. note::
        This is an internal object that can be obtained by :meth:`SteamClient.get_user`
    """
    _pstate = None
    _pstate_requested = False
    steam_id = SteamID()  #: steam id
    relationship = EFriendRelationship.NONE   #: friendship status

    def __init__(self, steam_id, steam):
        self._pstate_ready = Event()
        self._steam = steam
        self.steam_id = SteamID(steam_id)

    def __repr__(self):
        return "<%s(%s, %s, %s)>" % (
            self.__class__.__name__,
            str(self.steam_id),
            self.relationship,
            self.state,
            )

    def refresh(self, wait=True):
        if self._pstate_requested and self._pstate_ready.is_set():
            self._pstate_requested = False

        if not self._pstate_requested:
            self._steam.request_persona_state([self.steam_id])
            self._pstate_ready.clear()
            self._pstate_requested = True

        if wait:
            self._pstate_ready.wait(timeout=5)
            self._pstate_requested = False

    def get_ps(self, field_name, wait_pstate=True):
        """Get property from PersonaState

        `See full list of available fields_names <https://github.com/ValvePython/steam/blob/fa8a5127e9bb23185483930da0b6ae85e93055a7/protobufs/steammessages_clientserver_friends.proto#L125-L153>`_
        """
        if not self._pstate_ready.is_set() and wait_pstate:
            self.refresh()

        if self._pstate is not None:
            return getattr(self._pstate, field_name)
        else:
            return None

    @property
    def last_logon(self):
        """:rtype: :class:`datetime`, :class:`None`"""
        ts = self.get_ps('last_logon')
        return datetime.utcfromtimestamp(ts) if ts else None

    @property
    def last_logoff(self):
        """:rtype: :class:`datetime`, :class:`None`"""
        ts = self.get_ps('last_logoff')
        return datetime.utcfromtimestamp(ts) if ts else None

    @property
    def name(self):
        """Name of the steam user, or ``None`` if it's not available

        :rtype: :class:`str`, :class:`None`
        """
        return self.get_ps('player_name')

    @property
    def state(self):
        """Personsa state (e.g. Online, Offline, Away, Busy, etc)

        :rtype: :class:`.EPersonaState`
        """
        state = self.get_ps('persona_state', False)
        return EPersonaState(state) if state else EPersonaState.Offline

    @property
    def rich_presence(self):
        """Contains Rich Presence key-values

        :rtype: dict
        """
        kvs = self.get_ps('rich_presence')
        data = {}

        if kvs:
            for kv in kvs:
                data[kv.key] = kv.value

        return data

    def get_avatar_url(self, size=2):
        """Get URL to avatar picture

        :param size: possible values are ``0``, ``1``, or ``2`` corresponding to small, medium, large
        :type size: :class:`int`
        :return: url to avatar
        :rtype: :class:`str`
        """
        hashbytes = self.get_ps('avatar_hash')

        if hashbytes != "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000":
            ahash = hexlify(hashbytes).decode('ascii')
        else:
            ahash = 'fef49e7fa7e1997310d705b2a6158ff8dc1cdfeb'

        sizes = {
            0: '',
            1: '_medium',
            2: '_full',
        }
        url = "http://cdn.akamai.steamstatic.com/steamcommunity/public/images/avatars/%s/%s%s.jpg"

        return url % (ahash[:2], ahash, sizes[size])

    def send_message(self, message):
        """Send chat message to this steam user

        :param message: message to send
        :type message: str
        """
        # new chat
        if self._steam.chat_mode == 2:
            self._steam.send_um("FriendMessages.SendMessage#1", {
                'steamid': self.steam_id,
                'message': message,
                'chat_entry_type': EChatEntryType.ChatMsg,
                })
        # old chat
        else:
            self._steam.send(MsgProto(EMsg.ClientFriendMsg), {
                'steamid': self.steam_id,
                'chat_entry_type': EChatEntryType.ChatMsg,
                'message': message.encode('utf8'),
                })

    def block(self):
        """Block user"""
        self._steam.friends.block(self)

    def unblock(self):
        """Unblock user"""
        self._steam.friends.unblock(self)
Beispiel #46
0
def main():
    stop_event = Event()

    parser = argparse.ArgumentParser()

    parser.add_argument(
        '--mock-networking',
        action='store_true',
        help='In-Process Trader, MessageBroker and CommitmentService')
    parser.add_argument(
        '--mock',
        action='store_true',
        help='Spawns mock offers to simulate trading activity"')
    parser.add_argument('--seed',
                        type=str,
                        default='raidex-node',
                        help='Use the keccak privkey from seed')
    parser.add_argument('--keyfile',
                        type=argparse.FileType('r'),
                        help='path to keyfile')
    parser.add_argument('--pwfile',
                        type=argparse.FileType('r'),
                        help='path to pw')
    parser.add_argument("--api", action='store_true', help='Run the REST-API')
    parser.add_argument("--api-port",
                        type=int,
                        help='Specify the port for the api, default is 50001',
                        default=50001)
    parser.add_argument("--offer-lifetime",
                        type=int,
                        help='Lifetime of offers spawned by LimitOrders',
                        default=30)
    parser.add_argument(
        "--broker-host",
        type=str,
        help='Specify the host for the message broker, default is localhost',
        default='localhost')
    parser.add_argument(
        "--broker-port",
        type=int,
        help='Specify the port for the message broker, default is 5000',
        default=5000)
    parser.add_argument(
        "--trader-host",
        type=str,
        help='Specify the host for the trader mock, default is localhost',
        default='localhost')
    parser.add_argument(
        "--trader-port",
        type=int,
        help='Specify the port for the trader mock, default is 5001',
        default=5001)
    parser.add_argument('--bots',
                        nargs='+',
                        help='Start a set of (/subset of) multiple tradi'
                        'ng bots.\
                                                  <Options:\"liquidity\", \"random\", \"manipulator\">'
                        )
    parser.add_argument(
        '--token-address',
        type=str,
        help='Token address of token to trade against WETH on kovan',
        default='0x92276aD441CA1F3d8942d614a6c3c87592dd30bb')

    args = parser.parse_args()

    if args.mock_networking is True:
        message_broker = MessageBroker()
        commitment_service = CommitmentService.build_service(message_broker,
                                                             fee_rate=1)
        raidex_app = App.build_from_mocks(message_broker,
                                          commitment_service.address,
                                          base_token_addr=args.token_address,
                                          quote_token_addr=KOVAN_WETH_ADDRESS,
                                          keyfile=args.keyfile,
                                          pw_file=args.pwfile,
                                          offer_lifetime=args.offer_lifetime)
        commitment_service.start()
    else:
        raidex_app = App.build_default_from_config(
            keyfile=args.keyfile,
            pw_file=args.pwfile,
            cs_address=CS_ADDRESS,
            base_token_addr=args.token_address,
            quote_token_addr=KOVAN_WETH_ADDRESS,
            message_broker_host=args.broker_host,
            message_broker_port=args.broker_port,
            trader_host=args.trader_host,
            trader_port=args.trader_port,
            mock_trading_activity=args.mock,
            offer_lifetime=args.offer_lifetime)

    raidex_app.start()

    if args.api is True:
        api = APIServer('', args.api_port, raidex_app.raidex_node)
        api.start()

    bots = args.bots
    if bots:
        initial_price = 100.

        if 'liquidity' in bots:
            liquidity_provider = LiquidityProvider(raidex_app, initial_price)
            liquidity_provider.start()
        if 'random' in bots:
            gevent.sleep(5)  # give liquidity provider head start
            random_walker = RandomWalker(raidex_app, initial_price)
            random_walker.start()
        if 'maniplulator' in bots:
            if 'random' not in bots:
                gevent.sleep(5)  # give liquidity provider head start
            manipulator = Manipulator(raidex_app, initial_price)
            manipulator.start()

    stop_event.wait()  # runs forever
class ReplayProcess(BaseReplayProcess):

    '''
    ReplayProcess - A process spawned for the purpose of replaying data
    --------------------------------------------------------------------------------
    Configurations
    ==============
    process:
      dataset_id:      ""     # Dataset to be replayed
      delivery_format: {}     # Delivery format to be replayed back (unused for now)
      query:
        start_time: 0         # Start time (index value) to be replayed
        end_time:   0         # End time (index value) to be replayed
        parameters: []        # List of parameters to form in the granule
      

    '''
    process_type  = 'standalone'
    publish_limit = 10
    dataset_id    = None
    delivery_format = {}
    start_time      = None
    end_time        = None
    stride_time     = None
    parameters      = None
    stream_id       = ''
    stream_def_id   = ''


    def __init__(self, *args, **kwargs):
        super(ReplayProcess,self).__init__(*args,**kwargs)
        self.deserializer = IonObjectDeserializer(obj_registry=get_obj_registry())
        self.publishing   = Event()
        self.play         = Event()
        self.end          = Event()

    def on_start(self):
        '''
        Starts the process
        '''
        log.info('Replay Process Started')
        super(ReplayProcess,self).on_start()
        dsm_cli = DatasetManagementServiceProcessClient(process=self)
        pubsub  = PubsubManagementServiceProcessClient(process=self)

        self.dataset_id      = self.CFG.get_safe('process.dataset_id', None)
        self.delivery_format = self.CFG.get_safe('process.delivery_format',{})
        self.start_time      = self.CFG.get_safe('process.query.start_time', None)
        self.end_time        = self.CFG.get_safe('process.query.end_time', None)
        self.stride_time     = self.CFG.get_safe('process.query.stride_time', None)
        self.parameters      = self.CFG.get_safe('process.query.parameters',None)
        self.publish_limit   = self.CFG.get_safe('process.query.publish_limit', 10)
        self.tdoa            = self.CFG.get_safe('process.query.tdoa',None)
        self.stream_id       = self.CFG.get_safe('process.publish_streams.output', '')
        self.stream_def      = pubsub.read_stream_definition(stream_id=self.stream_id)
        self.stream_def_id   = self.stream_def._id
        self.replay_thread   = None

        self.publishing.clear()
        self.play.set()
        self.end.clear()

        if self.dataset_id is None:
            raise BadRequest('dataset_id not specified')

        self.dataset = dsm_cli.read_dataset(self.dataset_id)
        self.pubsub = PubsubManagementServiceProcessClient(process=self)


    @classmethod
    def get_time_idx(cls, coverage, timeval):
        corrected_time = cls.convert_time(coverage, timeval)

        idx = TimeUtils.get_relative_time(coverage, corrected_time)
        return idx

    @classmethod
    def convert_time(cls, coverage, timeval):
        tname = coverage.temporal_parameter_name
        uom = coverage.get_parameter_context(tname).uom

        corrected_time = TimeUtils.ts_to_units(uom, timeval)
        return corrected_time

    @classmethod
    def _data_dict_to_rdt(cls, data_dict, stream_def_id, coverage):
        if stream_def_id:
            rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)
        else:
            rdt = RecordDictionaryTool(param_dictionary=coverage.parameter_dictionary)
        if not data_dict:
            log.warning('Retrieve returning empty set')
            return rdt

        if 'time' in data_dict and data_dict['time'].shape[0] == 0:
            log.warning('Retrieve returning empty set')
            return rdt


        rdt[coverage.temporal_parameter_name] = data_dict[coverage.temporal_parameter_name]
        for field in rdt.fields:
            if field == coverage.temporal_parameter_name:
                continue
            # The values have already been inside a coverage so we know they're safe and they exist, so they can be inserted directly.
            if field in data_dict:
                rdt._rd[field] = data_dict[field]
            #rdt[k] = v

        return rdt

    @classmethod
    def _cov2granule(cls, coverage, start_time=None, end_time=None, stride_time=None, stream_def_id=None, parameters=None, tdoa=None, sort_parameter=None):
        # Deal with the NTP
        if start_time:
            start_time += 2208988800
        if end_time:
            end_time += 2208988800

        if tdoa is None:
            if start_time is None and end_time is None:
                data_dict = coverage.get_parameter_values(param_names=parameters, stride_length=stride_time, fill_empty_params=True, sort_parameter=sort_parameter).get_data()
            else:
                data_dict = coverage.get_parameter_values(param_names=parameters, time_segment=(start_time, end_time), stride_length=stride_time, fill_empty_params=True, sort_parameter=sort_parameter).get_data()
        elif isinstance(tdoa, slice):
            log.warning("Using tdoa argument on large datasets can consume too much memory")
            data_dict = coverage.get_parameter_values(param_names=parameters, fill_empty_params=True).get_data()
            data_dict = { k : v[tdoa] for k,v in data_dict.iteritems() }
        else:
            raise TypeError("tdoa is incorrect type: %s" % type(tdoa))

        return cls._data_dict_to_rdt(data_dict, stream_def_id, coverage)
       


    def execute_retrieve(self):
        '''
        execute_retrieve Executes a retrieval and returns the result 
        as a value in lieu of publishing it on a stream
        '''
        try: 
            coverage = DatasetManagementService._get_coverage(self.dataset_id,mode='r')
            if coverage.is_empty():
                log.info('Reading from an empty coverage')
                rdt = RecordDictionaryTool(param_dictionary=coverage.parameter_dictionary)
            else: 
                rdt = ReplayProcess._cov2granule(coverage=coverage, 
                        start_time=self.start_time, 
                        end_time=self.end_time,
                        stride_time=self.stride_time, 
                        parameters=self.parameters, 
                        stream_def_id=self.delivery_format, 
                        tdoa=self.tdoa)
        except:
            log.exception('Problems reading from the coverage')
            raise BadRequest('Problems reading from the coverage')
        finally:
            coverage.close(timeout=5)
        return rdt.to_granule()

    @classmethod
    def get_last_values(cls, dataset_id, number_of_points=100, delivery_format=''):
        stream_def_id = delivery_format
        try:
            cov = DatasetManagementService._get_coverage(dataset_id, mode='r')
            if cov.is_empty():
                rdt = RecordDictionaryTool(param_dictionary=cov.parameter_dictionary)
            else:
                time_array = cov.get_parameter_values([cov.temporal_parameter_name], sort_parameter=cov.temporal_parameter_name).get_data()
                time_array = time_array[cov.temporal_parameter_name][-number_of_points:]

                t0 = np.asscalar(time_array[0])
                t1 = np.asscalar(time_array[-1])

                data_dict = cov.get_parameter_values(time_segment=(t0, t1), fill_empty_params=True).get_data()
                rdt = cls._data_dict_to_rdt(data_dict, stream_def_id, cov)
        except:
            log.exception('Problems reading from the coverage')
            raise BadRequest('Problems reading from the coverage')
        finally:
            if cov is not None:
                cov.close(timeout=5)
        return rdt


    def execute_replay(self):
        '''
        execute_replay Performs a replay and publishes the results on a stream. 
        '''
        if self.publishing.is_set():
            return False
        self.replay_thread = self._process.thread_manager.spawn(self.replay)
        return True

    def replay(self):
        self.publishing.set() # Minimal state, supposed to prevent two instances of the same process from replaying on the same stream
        for rdt in self._replay():
            if self.end.is_set():
                return
            self.play.wait()
            self.output.publish(rdt.to_granule())

        self.publishing.clear()
        return 

    def pause(self):
        self.play.clear()

    def resume(self):
        self.play.set()

    def stop(self):
        self.end.set()

    def _replay(self):
        coverage = DatasetManagementService._get_coverage(self.dataset_id,mode='r')
        rdt = self._cov2granule(coverage=coverage, start_time=self.start_time, end_time=self.end_time, stride_time=self.stride_time, parameters=self.parameters, stream_def_id=self.stream_def_id)
        elements = len(rdt)
        
        for i in xrange(elements / self.publish_limit):
            outgoing = RecordDictionaryTool(stream_definition_id=self.stream_def_id)
            fields = self.parameters or outgoing.fields
            for field in fields:
                v = rdt[field]
                if v is not None:
                    outgoing[field] = v[(i*self.publish_limit) : ((i+1)*self.publish_limit)]
            yield outgoing
        coverage.close(timeout=5)
        return 
Beispiel #48
0
class Group(GroupMappingMixin):
    """
    Maintain a group of greenlets that are still running, without
    limiting their number.

    Links to each item and removes it upon notification.

    Groups can be iterated to discover what greenlets they are tracking,
    they can be tested to see if they contain a greenlet, and they know the
    number (len) of greenlets they are tracking. If they are not tracking any
    greenlets, they are False in a boolean context.

    .. attribute:: greenlet_class

        Either :class:`gevent.Greenlet` (the default) or a subclass.
        These are the type of
        object we will :meth:`spawn`. This can be
        changed on an instance or in a subclass.
    """

    greenlet_class = Greenlet

    def __init__(self, *args):
        assert len(args) <= 1, args
        self.greenlets = set(*args)
        if args:
            for greenlet in args[0]:
                greenlet.rawlink(self._discard)
        # each item we kill we place in dying, to avoid killing the same greenlet twice
        self.dying = set()
        self._empty_event = Event()
        self._empty_event.set()

    def __repr__(self):
        return '<%s at 0x%x %s>' % (self.__class__.__name__, id(self), self.greenlets)

    def __len__(self):
        """
        Answer how many greenlets we are tracking. Note that if we are empty,
        we are False in a boolean context.
        """
        return len(self.greenlets)

    def __contains__(self, item):
        """
        Answer if we are tracking the given greenlet.
        """
        return item in self.greenlets

    def __iter__(self):
        """
        Iterate across all the greenlets we are tracking, in no particular order.
        """
        return iter(self.greenlets)

    def add(self, greenlet):
        """
        Begin tracking the *greenlet*.

        If this group is :meth:`full`, then this method may block
        until it is possible to track the greenlet.

        Typically the *greenlet* should **not** be started when
        it is added because if this object blocks in this method,
        then the *greenlet* may run to completion before it is tracked.
        """
        try:
            rawlink = greenlet.rawlink
        except AttributeError:
            pass  # non-Greenlet greenlet, like MAIN
        else:
            rawlink(self._discard)
        self.greenlets.add(greenlet)
        self._empty_event.clear()

    def _discard(self, greenlet):
        self.greenlets.discard(greenlet)
        self.dying.discard(greenlet)
        if not self.greenlets:
            self._empty_event.set()

    def discard(self, greenlet):
        """
        Stop tracking the greenlet.
        """
        self._discard(greenlet)
        try:
            unlink = greenlet.unlink
        except AttributeError:
            pass  # non-Greenlet greenlet, like MAIN
        else:
            unlink(self._discard)

    def start(self, greenlet):
        """
        Add the **unstarted** *greenlet* to the collection of greenlets
        this group is monitoring, and then start it.
        """
        self.add(greenlet)
        greenlet.start()

    def spawn(self, *args, **kwargs): # pylint:disable=arguments-differ
        """
        Begin a new greenlet with the given arguments (which are passed
        to the greenlet constructor) and add it to the collection of greenlets
        this group is monitoring.

        :return: The newly started greenlet.
        """
        greenlet = self.greenlet_class(*args, **kwargs)
        self.start(greenlet)
        return greenlet

#     def close(self):
#         """Prevents any more tasks from being submitted to the pool"""
#         self.add = RaiseException("This %s has been closed" % self.__class__.__name__)

    def join(self, timeout=None, raise_error=False):
        """
        Wait for this group to become empty *at least once*.

        If there are no greenlets in the group, returns immediately.

        .. note:: By the time the waiting code (the caller of this
           method) regains control, a greenlet may have been added to
           this group, and so this object may no longer be empty. (That
           is, ``group.join(); assert len(group) == 0`` is not
           guaranteed to hold.) This method only guarantees that the group
           reached a ``len`` of 0 at some point.

        :keyword bool raise_error: If True (*not* the default), if any
            greenlet that finished while the join was in progress raised
            an exception, that exception will be raised to the caller of
            this method. If multiple greenlets raised exceptions, which
            one gets re-raised is not determined. Only greenlets currently
            in the group when this method is called are guaranteed to
            be checked for exceptions.

        :return bool: A value indicating whether this group became empty.
           If the timeout is specified and the group did not become empty
           during that timeout, then this will be a false value. Otherwise
           it will be a true value.

        .. versionchanged:: 1.2a1
           Add the return value.
        """
        greenlets = list(self.greenlets) if raise_error else ()
        result = self._empty_event.wait(timeout=timeout)

        for greenlet in greenlets:
            if greenlet.exception is not None:
                if hasattr(greenlet, '_raise_exception'):
                    greenlet._raise_exception()
                raise greenlet.exception

        return result

    def kill(self, exception=GreenletExit, block=True, timeout=None):
        """
        Kill all greenlets being tracked by this group.
        """
        timer = Timeout._start_new_or_dummy(timeout)
        try:
            while self.greenlets:
                for greenlet in list(self.greenlets):
                    if greenlet in self.dying:
                        continue
                    try:
                        kill = greenlet.kill
                    except AttributeError:
                        _kill(greenlet, exception)
                    else:
                        kill(exception, block=False)
                    self.dying.add(greenlet)
                if not block:
                    break
                joinall(self.greenlets)
        except Timeout as ex:
            if ex is not timer:
                raise
        finally:
            timer.cancel()

    def killone(self, greenlet, exception=GreenletExit, block=True, timeout=None):
        """
        If the given *greenlet* is running and being tracked by this group,
        kill it.
        """
        if greenlet not in self.dying and greenlet in self.greenlets:
            greenlet.kill(exception, block=False)
            self.dying.add(greenlet)
            if block:
                greenlet.join(timeout)

    def full(self):
        """
        Return a value indicating whether this group can track more greenlets.

        In this implementation, because there are no limits on the number of
        tracked greenlets, this will always return a ``False`` value.
        """
        return False

    def wait_available(self, timeout=None):
        """
        Block until it is possible to :meth:`spawn` a new greenlet.

        In this implementation, because there are no limits on the number
        of tracked greenlets, this will always return immediately.
        """

    # MappingMixin methods

    def _apply_immediately(self):
        # If apply() is called from one of our own
        # worker greenlets, don't spawn a new one---if we're full, that
        # could deadlock.
        return getcurrent() in self

    def _apply_async_cb_spawn(self, callback, result):
        Greenlet.spawn(callback, result)

    def _apply_async_use_greenlet(self):
        # cannot call self.spawn() because it will block, so
        # use a fresh, untracked greenlet that when run will
        # (indirectly) call self.spawn() for us.
        return self.full()
class TestDMEnd2End(IonIntegrationTestCase):
    def setUp(self):  # Love the non pep-8 convention
        self._start_container()

        self.container.start_rel_from_url('res/deploy/r2deploy.yml')

        self.process_dispatcher = ProcessDispatcherServiceClient()
        self.pubsub_management = PubsubManagementServiceClient()
        self.resource_registry = ResourceRegistryServiceClient()
        self.dataset_management = DatasetManagementServiceClient()
        self.ingestion_management = IngestionManagementServiceClient()
        self.data_retriever = DataRetrieverServiceClient()
        self.pids = []
        self.event = Event()
        self.exchange_space_name = 'test_granules'
        self.exchange_point_name = 'science_data'
        self.i = 0

        self.purge_queues()
        self.queue_buffer = []
        self.streams = []
        self.addCleanup(self.stop_all_ingestion)

    def purge_queues(self):
        xn = self.container.ex_manager.create_xn_queue(
            'science_granule_ingestion')
        xn.purge()

    def tearDown(self):
        self.purge_queues()
        for pid in self.pids:
            self.container.proc_manager.terminate_process(pid)
        IngestionManagementIntTest.clean_subscriptions()
        for queue in self.queue_buffer:
            if isinstance(queue, ExchangeNameQueue):
                queue.delete()
            elif isinstance(queue, str):
                xn = self.container.ex_manager.create_xn_queue(queue)
                xn.delete()

    #--------------------------------------------------------------------------------
    # Helper/Utility methods
    #--------------------------------------------------------------------------------

    def create_dataset(self, parameter_dict_id=''):
        '''
        Creates a time-series dataset
        '''
        tdom, sdom = time_series_domain()
        sdom = sdom.dump()
        tdom = tdom.dump()
        if not parameter_dict_id:
            parameter_dict_id = self.dataset_management.read_parameter_dictionary_by_name(
                'ctd_parsed_param_dict', id_only=True)

        dataset_id = self.dataset_management.create_dataset(
            'test_dataset_%i' % self.i,
            parameter_dictionary_id=parameter_dict_id,
            spatial_domain=sdom,
            temporal_domain=tdom)
        return dataset_id

    def get_datastore(self, dataset_id):
        '''
        Gets an instance of the datastore
            This method is primarily used to defeat a bug where integration tests in multiple containers may sometimes 
            delete a CouchDB datastore and the other containers are unaware of the new state of the datastore.
        '''
        dataset = self.dataset_management.read_dataset(dataset_id)
        datastore_name = dataset.datastore_name
        datastore = self.container.datastore_manager.get_datastore(
            datastore_name, DataStore.DS_PROFILE.SCIDATA)
        return datastore

    def get_ingestion_config(self):
        '''
        Grab the ingestion configuration from the resource registry
        '''
        # The ingestion configuration should have been created by the bootstrap service
        # which is configured through r2deploy.yml

        ingest_configs, _ = self.resource_registry.find_resources(
            restype=RT.IngestionConfiguration, id_only=True)
        return ingest_configs[0]

    def launch_producer(self, stream_id=''):
        '''
        Launch the producer
        '''

        pid = self.container.spawn_process(
            'better_data_producer', 'ion.processes.data.example_data_producer',
            'BetterDataProducer', {'process': {
                'stream_id': stream_id
            }})

        self.pids.append(pid)

    def make_simple_dataset(self):
        '''
        Makes a stream, a stream definition and a dataset, the essentials for most of these tests
        '''
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)
        stream_def_id = self.pubsub_management.create_stream_definition(
            'ctd data', parameter_dictionary_id=pdict_id)
        stream_id, route = self.pubsub_management.create_stream(
            'ctd stream %i' % self.i,
            'xp1',
            stream_definition_id=stream_def_id)

        dataset_id = self.create_dataset(pdict_id)

        self.get_datastore(dataset_id)
        self.i += 1
        return stream_id, route, stream_def_id, dataset_id

    def publish_hifi(self, stream_id, stream_route, offset=0):
        '''
        Publish deterministic data
        '''

        pub = StandaloneStreamPublisher(stream_id, stream_route)

        stream_def = self.pubsub_management.read_stream_definition(
            stream_id=stream_id)
        stream_def_id = stream_def._id
        rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)
        rdt['time'] = np.arange(10) + (offset * 10)
        rdt['temp'] = np.arange(10) + (offset * 10)
        pub.publish(rdt.to_granule())

    def publish_fake_data(self, stream_id, route):
        '''
        Make four granules
        '''
        for i in xrange(4):
            self.publish_hifi(stream_id, route, i)

    def start_ingestion(self, stream_id, dataset_id):
        '''
        Starts ingestion/persistence for a given dataset
        '''
        ingest_config_id = self.get_ingestion_config()
        self.ingestion_management.persist_data_stream(
            stream_id=stream_id,
            ingestion_configuration_id=ingest_config_id,
            dataset_id=dataset_id)

    def stop_ingestion(self, stream_id):
        ingest_config_id = self.get_ingestion_config()
        self.ingestion_management.unpersist_data_stream(
            stream_id=stream_id, ingestion_configuration_id=ingest_config_id)

    def stop_all_ingestion(self):
        try:
            [self.stop_ingestion(sid) for sid in self.streams]
        except:
            pass

    def validate_granule_subscription(self, msg, route, stream_id):
        '''
        Validation for granule format
        '''
        if msg == {}:
            return
        rdt = RecordDictionaryTool.load_from_granule(msg)
        log.info('%s', rdt.pretty_print())
        self.assertIsInstance(
            msg, Granule, 'Message is improperly formatted. (%s)' % type(msg))
        self.event.set()

    def wait_until_we_have_enough_granules(self, dataset_id='', data_size=40):
        '''
        Loops until there is a sufficient amount of data in the dataset
        '''
        done = False
        with gevent.Timeout(40):
            while not done:
                extents = self.dataset_management.dataset_extents(
                    dataset_id, 'time')[0]
                granule = self.data_retriever.retrieve_last_data_points(
                    dataset_id, 1)
                rdt = RecordDictionaryTool.load_from_granule(granule)
                if rdt['time'] and rdt['time'][0] != rdt._pdict.get_context(
                        'time').fill_value and extents >= data_size:
                    done = True
                else:
                    gevent.sleep(0.2)

    #--------------------------------------------------------------------------------
    # Test Methods
    #--------------------------------------------------------------------------------

    @attr('SMOKE')
    def test_dm_end_2_end(self):
        #--------------------------------------------------------------------------------
        # Set up a stream and have a mock instrument (producer) send data
        #--------------------------------------------------------------------------------
        self.event.clear()

        # Get a precompiled parameter dictionary with basic ctd fields
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)
        context_ids = self.dataset_management.read_parameter_contexts(
            pdict_id, id_only=True)

        # Add a field that supports binary data input.
        bin_context = ParameterContext('binary', param_type=ArrayType())
        context_ids.append(
            self.dataset_management.create_parameter_context(
                'binary', bin_context.dump()))
        # Add another field that supports dictionary elements.
        rec_context = ParameterContext('records', param_type=RecordType())
        context_ids.append(
            self.dataset_management.create_parameter_context(
                'records', rec_context.dump()))

        pdict_id = self.dataset_management.create_parameter_dictionary(
            'replay_pdict',
            parameter_context_ids=context_ids,
            temporal_context='time')

        stream_definition = self.pubsub_management.create_stream_definition(
            'ctd data', parameter_dictionary_id=pdict_id)

        stream_id, route = self.pubsub_management.create_stream(
            'producer',
            exchange_point=self.exchange_point_name,
            stream_definition_id=stream_definition)

        #--------------------------------------------------------------------------------
        # Start persisting the data on the stream
        # - Get the ingestion configuration from the resource registry
        # - Create the dataset
        # - call persist_data_stream to setup the subscription for the ingestion workers
        #   on the stream that you specify which causes the data to be persisted
        #--------------------------------------------------------------------------------

        ingest_config_id = self.get_ingestion_config()
        dataset_id = self.create_dataset(pdict_id)
        self.ingestion_management.persist_data_stream(
            stream_id=stream_id,
            ingestion_configuration_id=ingest_config_id,
            dataset_id=dataset_id)

        #--------------------------------------------------------------------------------
        # Now the granules are ingesting and persisted
        #--------------------------------------------------------------------------------

        self.launch_producer(stream_id)
        self.wait_until_we_have_enough_granules(dataset_id, 40)

        #--------------------------------------------------------------------------------
        # Now get the data in one chunk using an RPC Call to start_retreive
        #--------------------------------------------------------------------------------

        replay_data = self.data_retriever.retrieve(dataset_id)
        self.assertIsInstance(replay_data, Granule)
        rdt = RecordDictionaryTool.load_from_granule(replay_data)
        self.assertTrue((rdt['time'][:10] == np.arange(10)).all(),
                        '%s' % rdt['time'][:])
        self.assertTrue((rdt['binary'][:10] == np.array(['hi'] * 10,
                                                        dtype='object')).all())

        #--------------------------------------------------------------------------------
        # Now to try the streamed approach
        #--------------------------------------------------------------------------------
        replay_stream_id, replay_route = self.pubsub_management.create_stream(
            'replay_out',
            exchange_point=self.exchange_point_name,
            stream_definition_id=stream_definition)
        self.replay_id, process_id = self.data_retriever.define_replay(
            dataset_id=dataset_id, stream_id=replay_stream_id)
        log.info('Process ID: %s', process_id)

        replay_client = ReplayClient(process_id)

        #--------------------------------------------------------------------------------
        # Create the listening endpoint for the the retriever to talk to
        #--------------------------------------------------------------------------------
        xp = self.container.ex_manager.create_xp(self.exchange_point_name)
        subscriber = StandaloneStreamSubscriber(
            self.exchange_space_name, self.validate_granule_subscription)
        self.queue_buffer.append(self.exchange_space_name)
        subscriber.start()
        subscriber.xn.bind(replay_route.routing_key, xp)

        self.data_retriever.start_replay_agent(self.replay_id)

        self.assertTrue(replay_client.await_agent_ready(5),
                        'The process never launched')
        replay_client.start_replay()

        self.assertTrue(self.event.wait(10))
        subscriber.stop()

        self.data_retriever.cancel_replay_agent(self.replay_id)

        #--------------------------------------------------------------------------------
        # Test the slicing capabilities
        #--------------------------------------------------------------------------------

        granule = self.data_retriever.retrieve(dataset_id=dataset_id,
                                               query={'tdoa': slice(0, 5)})
        rdt = RecordDictionaryTool.load_from_granule(granule)
        b = rdt['time'] == np.arange(5)
        self.assertTrue(b.all() if not isinstance(b, bool) else b)
        self.streams.append(stream_id)
        self.stop_ingestion(stream_id)

    @unittest.skip('Doesnt work')
    @attr('LOCOINT')
    @unittest.skipIf(os.getenv('CEI_LAUNCH_TEST', False),
                     'Skip test while in CEI LAUNCH mode')
    def test_replay_pause(self):
        # Get a precompiled parameter dictionary with basic ctd fields
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)
        context_ids = self.dataset_management.read_parameter_contexts(
            pdict_id, id_only=True)

        # Add a field that supports binary data input.
        bin_context = ParameterContext('binary', param_type=ArrayType())
        context_ids.append(
            self.dataset_management.create_parameter_context(
                'binary', bin_context.dump()))
        # Add another field that supports dictionary elements.
        rec_context = ParameterContext('records', param_type=RecordType())
        context_ids.append(
            self.dataset_management.create_parameter_context(
                'records', rec_context.dump()))

        pdict_id = self.dataset_management.create_parameter_dictionary(
            'replay_pdict',
            parameter_context_ids=context_ids,
            temporal_context='time')

        stream_def_id = self.pubsub_management.create_stream_definition(
            'replay_stream', parameter_dictionary_id=pdict_id)
        replay_stream, replay_route = self.pubsub_management.create_stream(
            'replay', 'xp1', stream_definition_id=stream_def_id)
        dataset_id = self.create_dataset(pdict_id)
        scov = DatasetManagementService._get_coverage(dataset_id)

        bb = CoverageCraft(scov)
        bb.rdt['time'] = np.arange(100)
        bb.rdt['temp'] = np.random.random(100) + 30
        bb.sync_with_granule()

        DatasetManagementService._persist_coverage(
            dataset_id,
            bb.coverage)  # This invalidates it for multi-host configurations
        # Set up the subscriber to verify the data
        subscriber = StandaloneStreamSubscriber(
            self.exchange_space_name, self.validate_granule_subscription)
        xp = self.container.ex_manager.create_xp('xp1')
        self.queue_buffer.append(self.exchange_space_name)
        subscriber.start()
        subscriber.xn.bind(replay_route.routing_key, xp)

        # Set up the replay agent and the client wrapper

        # 1) Define the Replay (dataset and stream to publish on)
        self.replay_id, process_id = self.data_retriever.define_replay(
            dataset_id=dataset_id, stream_id=replay_stream)
        # 2) Make a client to the interact with the process (optionall provide it a process to bind with)
        replay_client = ReplayClient(process_id)
        # 3) Start the agent (launch the process)
        self.data_retriever.start_replay_agent(self.replay_id)
        # 4) Start replaying...
        replay_client.start_replay()

        # Wait till we get some granules
        self.assertTrue(self.event.wait(5))

        # We got granules, pause the replay, clear the queue and allow the process to finish consuming
        replay_client.pause_replay()
        gevent.sleep(1)
        subscriber.xn.purge()
        self.event.clear()

        # Make sure there's no remaining messages being consumed
        self.assertFalse(self.event.wait(1))

        # Resume the replay and wait until we start getting granules again
        replay_client.resume_replay()
        self.assertTrue(self.event.wait(5))

        # Stop the replay, clear the queues
        replay_client.stop_replay()
        gevent.sleep(1)
        subscriber.xn.purge()
        self.event.clear()

        # Make sure that it did indeed stop
        self.assertFalse(self.event.wait(1))

        subscriber.stop()

    def test_retrieve_and_transform(self):
        # Make a simple dataset and start ingestion, pretty standard stuff.
        ctd_stream_id, route, stream_def_id, dataset_id = self.make_simple_dataset(
        )
        self.start_ingestion(ctd_stream_id, dataset_id)

        # Stream definition for the salinity data
        salinity_pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)
        sal_stream_def_id = self.pubsub_management.create_stream_definition(
            'sal data', parameter_dictionary_id=salinity_pdict_id)

        rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)
        rdt['time'] = np.arange(10)
        rdt['temp'] = np.random.randn(10) * 10 + 30
        rdt['conductivity'] = np.random.randn(10) * 2 + 10
        rdt['pressure'] = np.random.randn(10) * 1 + 12

        publisher = StandaloneStreamPublisher(ctd_stream_id, route)
        publisher.publish(rdt.to_granule())

        rdt['time'] = np.arange(10, 20)

        publisher.publish(rdt.to_granule())

        self.wait_until_we_have_enough_granules(dataset_id, 20)

        granule = self.data_retriever.retrieve(
            dataset_id,
            None,
            None,
            'ion.processes.data.transforms.ctd.ctd_L2_salinity',
            'CTDL2SalinityTransformAlgorithm',
            kwargs=dict(params=sal_stream_def_id))
        rdt = RecordDictionaryTool.load_from_granule(granule)
        for i in rdt['salinity']:
            self.assertNotEquals(i, 0)
        self.streams.append(ctd_stream_id)
        self.stop_ingestion(ctd_stream_id)

    def test_last_granule(self):
        stream_id, route, stream_def_id, dataset_id = self.make_simple_dataset(
        )
        self.start_ingestion(stream_id, dataset_id)

        self.publish_hifi(stream_id, route, 0)
        self.publish_hifi(stream_id, route, 1)

        self.wait_until_we_have_enough_granules(dataset_id,
                                                20)  # I just need two

        success = False

        def verifier():
            replay_granule = self.data_retriever.retrieve_last_data_points(
                dataset_id, 10)

            rdt = RecordDictionaryTool.load_from_granule(replay_granule)

            comp = rdt['time'] == np.arange(10) + 10
            if not isinstance(comp, bool):
                return comp.all()
            return False

        success = poll(verifier)

        self.assertTrue(success)

        success = False

        def verify_points():
            replay_granule = self.data_retriever.retrieve_last_data_points(
                dataset_id, 5)

            rdt = RecordDictionaryTool.load_from_granule(replay_granule)

            comp = rdt['time'] == np.arange(15, 20)
            if not isinstance(comp, bool):
                return comp.all()
            return False

        success = poll(verify_points)

        self.assertTrue(success)
        self.streams.append(stream_id)
        self.stop_ingestion(stream_id)

    def test_replay_with_parameters(self):
        #--------------------------------------------------------------------------------
        # Create the configurations and the dataset
        #--------------------------------------------------------------------------------
        # Get a precompiled parameter dictionary with basic ctd fields
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name(
            'ctd_parsed_param_dict', id_only=True)
        context_ids = self.dataset_management.read_parameter_contexts(
            pdict_id, id_only=True)

        # Add a field that supports binary data input.
        bin_context = ParameterContext('binary', param_type=ArrayType())
        context_ids.append(
            self.dataset_management.create_parameter_context(
                'binary', bin_context.dump()))
        # Add another field that supports dictionary elements.
        rec_context = ParameterContext('records', param_type=RecordType())
        context_ids.append(
            self.dataset_management.create_parameter_context(
                'records', rec_context.dump()))

        pdict_id = self.dataset_management.create_parameter_dictionary(
            'replay_pdict',
            parameter_context_ids=context_ids,
            temporal_context='time')

        stream_def_id = self.pubsub_management.create_stream_definition(
            'replay_stream', parameter_dictionary_id=pdict_id)

        stream_id, route = self.pubsub_management.create_stream(
            'replay_with_params',
            exchange_point=self.exchange_point_name,
            stream_definition_id=stream_def_id)
        config_id = self.get_ingestion_config()
        dataset_id = self.create_dataset(pdict_id)
        self.ingestion_management.persist_data_stream(
            stream_id=stream_id,
            ingestion_configuration_id=config_id,
            dataset_id=dataset_id)

        dataset_modified = Event()

        def cb(*args, **kwargs):
            dataset_modified.set()

        es = EventSubscriber(event_type=OT.DatasetModified,
                             callback=cb,
                             origin=dataset_id)
        es.start()

        self.addCleanup(es.stop)

        self.publish_fake_data(stream_id, route)

        self.assertTrue(dataset_modified.wait(30))

        query = {
            'start_time': 0 - 2208988800,
            'end_time': 20 - 2208988800,
            'stride_time': 2,
            'parameters': ['time', 'temp']
        }
        retrieved_data = self.data_retriever.retrieve(dataset_id=dataset_id,
                                                      query=query)

        rdt = RecordDictionaryTool.load_from_granule(retrieved_data)
        comp = np.arange(0, 20, 2) == rdt['time']
        self.assertTrue(comp.all(), '%s' % rdt.pretty_print())
        self.assertEquals(set(rdt.iterkeys()), set(['time', 'temp']))

        extents = self.dataset_management.dataset_extents(
            dataset_id=dataset_id, parameters=['time', 'temp'])
        self.assertTrue(extents['time'] >= 20)
        self.assertTrue(extents['temp'] >= 20)

        self.streams.append(stream_id)
        self.stop_ingestion(stream_id)

    def test_repersist_data(self):
        stream_id, route, stream_def_id, dataset_id = self.make_simple_dataset(
        )
        self.start_ingestion(stream_id, dataset_id)
        self.publish_hifi(stream_id, route, 0)
        self.publish_hifi(stream_id, route, 1)
        self.wait_until_we_have_enough_granules(dataset_id, 20)
        config_id = self.get_ingestion_config()
        self.ingestion_management.unpersist_data_stream(
            stream_id=stream_id, ingestion_configuration_id=config_id)
        self.ingestion_management.persist_data_stream(
            stream_id=stream_id,
            ingestion_configuration_id=config_id,
            dataset_id=dataset_id)
        self.publish_hifi(stream_id, route, 2)
        self.publish_hifi(stream_id, route, 3)
        self.wait_until_we_have_enough_granules(dataset_id, 40)
        success = False
        with gevent.timeout.Timeout(5):
            while not success:

                replay_granule = self.data_retriever.retrieve(dataset_id)

                rdt = RecordDictionaryTool.load_from_granule(replay_granule)

                comp = rdt['time'] == np.arange(0, 40)
                if not isinstance(comp, bool):
                    success = comp.all()
                gevent.sleep(1)

        self.assertTrue(success)
        self.streams.append(stream_id)
        self.stop_ingestion(stream_id)

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv(
        'CEI_LAUNCH_TEST', False
    ), 'Host requires file-system access to coverage files, CEI mode does not support.'
                     )
    def test_correct_time(self):

        # There are 2208988800 seconds between Jan 1 1900 and Jan 1 1970, i.e.
        #  the conversion factor between unix and NTP time
        unix_now = np.floor(time.time())
        ntp_now = unix_now + 2208988800

        unix_ago = unix_now - 20
        ntp_ago = unix_ago + 2208988800

        stream_id, route, stream_def_id, dataset_id = self.make_simple_dataset(
        )
        coverage = DatasetManagementService._get_coverage(dataset_id)
        coverage.insert_timesteps(20)
        coverage.set_parameter_values('time', np.arange(ntp_ago, ntp_now))

        temporal_bounds = self.dataset_management.dataset_temporal_bounds(
            dataset_id)

        self.assertTrue(np.abs(temporal_bounds[0] - unix_ago) < 2)
        self.assertTrue(np.abs(temporal_bounds[1] - unix_now) < 2)

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv(
        'CEI_LAUNCH_TEST', False
    ), 'Host requires file-system access to coverage files, CEI mode does not support.'
                     )
    def test_empty_coverage_time(self):

        stream_id, route, stream_def_id, dataset_id = self.make_simple_dataset(
        )
        coverage = DatasetManagementService._get_coverage(dataset_id)
        temporal_bounds = self.dataset_management.dataset_temporal_bounds(
            dataset_id)
        self.assertEquals([coverage.get_parameter_context('time').fill_value] *
                          2, temporal_bounds)

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv(
        'CEI_LAUNCH_TEST', False
    ), 'Host requires file-system access to coverage files, CEI mode does not support.'
                     )
    def test_out_of_band_retrieve(self):
        # Setup the environemnt
        stream_id, route, stream_def_id, dataset_id = self.make_simple_dataset(
        )
        self.start_ingestion(stream_id, dataset_id)

        # Fill the dataset
        self.publish_fake_data(stream_id, route)
        self.wait_until_we_have_enough_granules(dataset_id, 40)

        # Retrieve the data
        granule = DataRetrieverService.retrieve_oob(dataset_id)
        rdt = RecordDictionaryTool.load_from_granule(granule)
        self.assertTrue((rdt['time'] == np.arange(40)).all())

    @attr('LOCOINT')
    @unittest.skipIf(os.getenv(
        'CEI_LAUNCH_TEST', False
    ), 'Host requires file-system access to coverage files, CEI mode does not support.'
                     )
    def test_retrieve_cache(self):
        DataRetrieverService._refresh_interval = 1
        datasets = [self.make_simple_dataset() for i in xrange(10)]
        for stream_id, route, stream_def_id, dataset_id in datasets:
            coverage = DatasetManagementService._get_coverage(dataset_id)
            coverage.insert_timesteps(10)
            coverage.set_parameter_values('time', np.arange(10))
            coverage.set_parameter_values('temp', np.arange(10))

        # Verify cache hit and refresh
        dataset_ids = [i[3] for i in datasets]
        self.assertTrue(
            dataset_ids[0] not in DataRetrieverService._retrieve_cache)
        DataRetrieverService._get_coverage(dataset_ids[0])  # Hit the chache
        cov, age = DataRetrieverService._retrieve_cache[dataset_ids[0]]
        # Verify that it was hit and it's now in there
        self.assertTrue(dataset_ids[0] in DataRetrieverService._retrieve_cache)

        gevent.sleep(DataRetrieverService._refresh_interval + 0.2)

        DataRetrieverService._get_coverage(dataset_ids[0])  # Hit the chache
        cov, age2 = DataRetrieverService._retrieve_cache[dataset_ids[0]]
        self.assertTrue(age2 != age)

        for dataset_id in dataset_ids:
            DataRetrieverService._get_coverage(dataset_id)

        self.assertTrue(
            dataset_ids[0] not in DataRetrieverService._retrieve_cache)

        stream_id, route, stream_def, dataset_id = datasets[0]
        self.start_ingestion(stream_id, dataset_id)
        DataRetrieverService._get_coverage(dataset_id)

        self.assertTrue(dataset_id in DataRetrieverService._retrieve_cache)

        DataRetrieverService._refresh_interval = 100
        self.publish_hifi(stream_id, route, 1)
        self.wait_until_we_have_enough_granules(dataset_id, data_size=20)

        event = gevent.event.Event()
        with gevent.Timeout(20):
            while not event.wait(0.1):
                if dataset_id not in DataRetrieverService._retrieve_cache:
                    event.set()

        self.assertTrue(event.is_set())

    @unittest.skip('Outdated due to ingestion retry')
    @attr('LOCOINT')
    @unittest.skipIf(os.getenv(
        'CEI_LAUNCH_TEST', False
    ), 'Host requires file-system access to coverage files, CEI mode does not support.'
                     )
    def test_ingestion_failover(self):
        stream_id, route, stream_def_id, dataset_id = self.make_simple_dataset(
        )
        self.start_ingestion(stream_id, dataset_id)

        event = Event()

        def cb(*args, **kwargs):
            event.set()

        sub = EventSubscriber(event_type="ExceptionEvent",
                              callback=cb,
                              origin="stream_exception")
        sub.start()

        self.publish_fake_data(stream_id, route)
        self.wait_until_we_have_enough_granules(dataset_id, 40)

        file_path = DatasetManagementService._get_coverage_path(dataset_id)
        master_file = os.path.join(file_path, '%s_master.hdf5' % dataset_id)

        with open(master_file, 'w') as f:
            f.write('this will crash HDF')

        self.publish_hifi(stream_id, route, 5)

        self.assertTrue(event.wait(10))

        sub.stop()
Beispiel #50
0
class SubProcess(Greenlet):
    """
    Threaded execution of a command being called.
    """

    def __init__(self, command, timeout=None):
        """
        Initialize the function

        """
        Greenlet.__init__(self, run=None)

        # we abort if this is set
        self._abort = Event()

        # this is set when an command has completed execution
        self._done = Event()

        # Tracks the PID file of item being executed
        self._pid = None

        # The return code is set after the programs execution
        self._returncode = ReturnCode.Unknown

        # The command itself should a list() identifing the executable as the
        # first entry followed by all of the arguments you wish to pass into
        # it.
        self._cmd = command

        # Since we need to poll until the execution of the process is
        # complete, we need to set a poll time.
        self._throttle = 0.5

        # Track when the execution started
        self._execution_begin = None

        # Track when the execution completed
        self._execution_finish = None

        # The number of seconds at most we will allow the execution of the
        # process to run for before we force it to abort it's operation.

        # Setting this to zero disables this timeout restriction
        self._timeout = 0.0

        if timeout:
            self._timeout = timeout

        # These are populated with the output of the stdout and
        # stderr stream.
        self._stdout = StringIO()
        self._stderr = StringIO()

    def elapsed(self):
        """
        Returns the elapsed time (as a float) of the threaded execution which
        includes the number of microseconds.

        """
        if self._execution_begin is None:
            # No elapsed time has taken place yet
            return 0.0

        if self._execution_finish is not None:
            # Execution has completed, we only want to calculate
            # the execution time.
            elapsed_time = self._execution_finish - self._execution_begin

        else:
            # Calculate Elapsed Time
            elapsed_time = datetime.utcnow() - self._execution_begin

        elapsed_time = (elapsed_time.days * 86400) \
                         + elapsed_time.seconds \
                         + (elapsed_time.microseconds/1e6)

        return elapsed_time

    def _run(self):
        """
        Read from the work_queue, process it using an NNTPRequest object.

        """

        # Make sure our done flag is not set
        self._done.clear()

        # Execute our Process
        p1 = subprocess.Popen(
            self._cmd,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
        )

        # Calculate Current Time
        self._execution_begin = datetime.utcnow()

        # Store some information
        self._pid = p1.pid

        # Calculate Wait Time
        max_wait_time = self._execution_begin + \
                        timedelta(seconds=self._timeout)

        while p1.poll() is None and not self._abort.is_set():
            # Head of Poll Loop

            if self._timeout and \
               datetime.utcnow() >= max_wait_time:
                # Process aborted (took too long)
                try:
                    kill(self._pid, signal.SIGKILL)
                except:
                    pass

                # Force bad return code
                self._returncode = ReturnCode.Timeout

                # Set our elapsed time to now
                self._execution_finish = datetime.utcnow()

                # Retrieve stdout/stderr
                self._stdout = StringIO(p1.stdout.read())
                self._stderr = StringIO(p1.stderr.read())

                # Make sure no one uses the PID anymore
                self._pid = None

                # Set our done flag
                self._done.set()
                return

            # CPU Throttle
            self._abort.wait(self._throttle)

        if p1.poll() is None or self._abort.is_set():
            # Safety
            try:
                kill(self._pid, signal.SIGKILL)
            except:
                pass

            # Force bad return code
            self._returncode = ReturnCode.Aborted

        else:
            # Store return code
            self._returncode = p1.returncode

        # Execution Completion Time
        self._execution_finish = datetime.utcnow()

        # Retrieve stdout/stderr
        self._stdout = StringIO(p1.stdout.read())
        self._stderr = StringIO(p1.stderr.read())

        # Make sure no one uses the PID anymore
        self._pid = None

        # Set our done flag
        self._done.set()

        # We're done!
        return

    def is_complete(self, timeout=None):
        """
        Returns True if the process has completed its execution
        if timeout is set to a time, then the function blocks up until that
        period of time elapses or the call completes.

        Times should be specified as float values (in seconds).

        """
        if timeout is not None:
            self._done.wait(timeout)

        return self._done.is_set()

    def response_code(self):
        """
        Returns the result

        """
        return self._returncode

    def successful(self):
        """
        Returns True if the calling action was successful or not.  This call
        can be subjective because it bases it's response simply on whether or
        not a zero (0) was returned by the program called. Usually a non-zero
        value means there was a failure.

        """
        return self._returncode is 0

    def stdout(self, as_list=True):
        """
        if as_list is set to True, then the stdout results are split on new
        lines into a list object
        """
        # Ensure we're at the head of our buffer
        self._stdout.seek(0L, SEEK_SET)

        if as_list:
            return NEW_LINE_RE.split(self._stdout.read())
        return self._stdout.read()

    def stderr(self, as_list=True):
        """
        if as_list is set to True, then the stdout results are split on new
        lines into a list object

        """
        # Ensure we're at the head of our buffer
        self._stderr.seek(0L, SEEK_SET)

        if as_list:
            return NEW_LINE_RE.split(self._stderr.read())
        return self._stderr.read()

    def pid(self):
        """
        returns the pid number of the running process, but returns None if
        the process is no longer running.
        """
        return self._pid

    def abort(self):
        """
        Abort the executing command

        """
        self._abort.set()
        try:
            kill(self._pid, signal.SIGKILL)
        except:
            pass

        if self._pid:
            self.join(timeout=10.0)

    def __str__(self):
        """
        returns the command being executed

        """
        return ' '.join(self._cmd)

    def __repr__(self):
        """
        Return a printable version of the file being read

        """
        return '<SubProcess cmd=%s execution_time=%ds return_code=%d />' % (
            self._cmd[0],
            self.elapsed(),
            self._returncode,
        )
Beispiel #51
0
class Channel(object):
    __slots__ = ('_recvq', '_closed', '_mon')

    def __init__(self):
        self._mon = Publisher()
        self._recvq = Queue()
        self._closed = Event()

    def __iter__(self):
        while not self.closed:
            msg = self.recv()
            if msg is StopIteration:
                break
            yield msg

    def __len__(self):
        """
        Number of messages in buffer which haven't been delivered to watchers
        """
        return self._recvq.qsize()

    def __del__(self):
        self.close()

    def send(self, msg):
        self._recvq.put_nowait(msg)
        if len(self._mon):
            self.recv()

    def watch(self):
        was_first = len(self._mon) == 0
        subscriber = self._mon.subscribe()
        if was_first:
            self._recvall()
        return subscriber

    def _recvall(self):
        while self._recvq.qsize():
            self.recv()

    def write(self, data):
        self.send(dict(data=data))

    def recv(self):
        if self.closed:
            # XXX: raise better exception
            raise RuntimeError("Closed")
        msg = self._recvq.get()
        self._mon.send(msg)
        if msg is StopIteration:
            self._closed.set()
        return msg

    def wait(self):
        return self._closed.wait()

    @property
    def closed(self):
        return self._closed.ready()

    def close(self):
        if not self.closed:
            self.send(StopIteration)

    def datastream(self):
        return DataStream(self)
Beispiel #52
0
class BaseServer(object):
    """
    An abstract base class that implements some common functionality for the servers in gevent.

    :param listener: Either be an address that the server should bind
        on or a :class:`gevent.socket.socket` instance that is already
        bound (and put into listening mode in case of TCP socket).

    :keyword handle: If given, the request handler. The request
        handler can be defined in a few ways. Most commonly,
        subclasses will implement a ``handle`` method as an
        instance method. Alternatively, a function can be passed
        as the ``handle`` argument to the constructor. In either
        case, the handler can later be changed by calling
        :meth:`set_handle`.

        When the request handler returns, the socket used for the
        request will be closed.

    :keyword spawn: If provided, is called to create a new
        greenlet to run the handler. By default,
        :func:`gevent.spawn` is used (meaning there is no
        artificial limit on the number of concurrent requests). Possible values for *spawn*:

        - a :class:`gevent.pool.Pool` instance -- ``handle`` will be executed
          using :meth:`gevent.pool.Pool.spawn` only if the pool is not full.
          While it is full, no new connections are accepted;
        - :func:`gevent.spawn_raw` -- ``handle`` will be executed in a raw
          greenlet which has a little less overhead then :class:`gevent.Greenlet` instances spawned by default;
        - ``None`` -- ``handle`` will be executed right away, in the :class:`Hub` greenlet.
          ``handle`` cannot use any blocking functions as it would mean switching to the :class:`Hub`.
        - an integer -- a shortcut for ``gevent.pool.Pool(integer)``

    .. versionchanged:: 1.1a1
       When the *handle* function returns from processing a connection,
       the client socket will be closed. This resolves the non-deterministic
       closing of the socket, fixing ResourceWarnings under Python 3 and PyPy.

    """
    # pylint: disable=too-many-instance-attributes,bare-except,broad-except

    #: the number of seconds to sleep in case there was an error in accept() call
    #: for consecutive errors the delay will double until it reaches max_delay
    #: when accept() finally succeeds the delay will be reset to min_delay again
    min_delay = 0.01
    max_delay = 1

    #: Sets the maximum number of consecutive accepts that a process may perform on
    #: a single wake up. High values give higher priority to high connection rates,
    #: while lower values give higher priority to already established connections.
    #: Default is 100. Note, that in case of multiple working processes on the same
    #: listening value, it should be set to a lower value. (pywsgi.WSGIServer sets it
    #: to 1 when environ["wsgi.multiprocess"] is true)
    max_accept = 100

    _spawn = Greenlet.spawn

    #: the default timeout that we wait for the client connections to close in stop()
    stop_timeout = 1

    fatal_errors = (errno.EBADF, errno.EINVAL, errno.ENOTSOCK)

    def __init__(self, listener, handle=None, spawn='default'):
        self._stop_event = Event()
        self._stop_event.set()
        self._watcher = None
        self._timer = None
        self._handle = None
        # XXX: FIXME: Subclasses rely on the presence or absence of the
        # `socket` attribute to determine whether we are open/should be opened.
        # Instead, have it be None.
        self.pool = None
        try:
            self.set_listener(listener)
            self.set_spawn(spawn)
            self.set_handle(handle)
            self.delay = self.min_delay
            self.loop = get_hub().loop
            if self.max_accept < 1:
                raise ValueError('max_accept must be positive int: %r' % (self.max_accept, ))
        except:
            self.close()
            raise

    def set_listener(self, listener):
        if hasattr(listener, 'accept'):
            if hasattr(listener, 'do_handshake'):
                raise TypeError('Expected a regular socket, not SSLSocket: %r' % (listener, ))
            self.family = listener.family
            self.address = listener.getsockname()
            self.socket = listener
        else:
            self.family, self.address = parse_address(listener)

    def set_spawn(self, spawn):
        if spawn == 'default':
            self.pool = None
            self._spawn = self._spawn
        elif hasattr(spawn, 'spawn'):
            self.pool = spawn
            self._spawn = spawn.spawn
        elif isinstance(spawn, integer_types):
            from gevent.pool import Pool
            self.pool = Pool(spawn)
            self._spawn = self.pool.spawn
        else:
            self.pool = None
            self._spawn = spawn
        if hasattr(self.pool, 'full'):
            self.full = self.pool.full
        if self.pool is not None:
            self.pool._semaphore.rawlink(self._start_accepting_if_started)

    def set_handle(self, handle):
        if handle is not None:
            self.handle = handle
        if hasattr(self, 'handle'):
            self._handle = self.handle
        else:
            raise TypeError("'handle' must be provided")

    def _start_accepting_if_started(self, _event=None):
        if self.started:
            self.start_accepting()

    def start_accepting(self):
        if self._watcher is None:
            # just stop watcher without creating a new one?
            self._watcher = self.loop.io(self.socket.fileno(), 1)
            self._watcher.start(self._do_read)

    def stop_accepting(self):
        if self._watcher is not None:
            self._watcher.stop()
            self._watcher = None
        if self._timer is not None:
            self._timer.stop()
            self._timer = None

    def do_handle(self, *args):
        spawn = self._spawn
        handle = self._handle
        close = self.do_close

        try:
            if spawn is None:
                _handle_and_close_when_done(handle, close, args)
            else:
                spawn(_handle_and_close_when_done, handle, close, args)
        except:
            close(*args)
            raise

    def do_close(self, *args):
        pass

    def _do_read(self):
        for _ in xrange(self.max_accept):
            if self.full():
                self.stop_accepting()
                return
            try:
                args = self.do_read()
                self.delay = self.min_delay
                if not args:
                    return
            except:
                self.loop.handle_error(self, *sys.exc_info())
                ex = sys.exc_info()[1]
                if self.is_fatal_error(ex):
                    self.close()
                    sys.stderr.write('ERROR: %s failed with %s\n' % (self, str(ex) or repr(ex)))
                    return
                if self.delay >= 0:
                    self.stop_accepting()
                    self._timer = self.loop.timer(self.delay)
                    self._timer.start(self._start_accepting_if_started)
                    self.delay = min(self.max_delay, self.delay * 2)
                break
            else:
                try:
                    self.do_handle(*args)
                except:
                    self.loop.handle_error((args[1:], self), *sys.exc_info())
                    if self.delay >= 0:
                        self.stop_accepting()
                        self._timer = self.loop.timer(self.delay)
                        self._timer.start(self._start_accepting_if_started)
                        self.delay = min(self.max_delay, self.delay * 2)
                    break

    def full(self):
        # copied from self.pool
        # pylint: disable=method-hidden
        return False

    def __repr__(self):
        return '<%s at %s %s>' % (type(self).__name__, hex(id(self)), self._formatinfo())

    def __str__(self):
        return '<%s %s>' % (type(self).__name__, self._formatinfo())

    def _formatinfo(self):
        if hasattr(self, 'socket'):
            try:
                fileno = self.socket.fileno()
            except Exception as ex:
                fileno = str(ex)
            result = 'fileno=%s ' % fileno
        else:
            result = ''
        try:
            if isinstance(self.address, tuple) and len(self.address) == 2:
                result += 'address=%s:%s' % self.address
            else:
                result += 'address=%s' % (self.address, )
        except Exception as ex:
            result += str(ex) or '<error>'

        handle = self.__dict__.get('handle')
        if handle is not None:
            fself = getattr(handle, '__self__', None)
            try:
                if fself is self:
                    # Checks the __self__ of the handle in case it is a bound
                    # method of self to prevent recursivly defined reprs.
                    handle_repr = '<bound method %s.%s of self>' % (
                        self.__class__.__name__,
                        handle.__name__,
                    )
                else:
                    handle_repr = repr(handle)

                result += ' handle=' + handle_repr
            except Exception as ex:
                result += str(ex) or '<error>'

        return result

    @property
    def server_host(self):
        """IP address that the server is bound to (string)."""
        if isinstance(self.address, tuple):
            return self.address[0]

    @property
    def server_port(self):
        """Port that the server is bound to (an integer)."""
        if isinstance(self.address, tuple):
            return self.address[1]

    def init_socket(self):
        """If the user initialized the server with an address rather than socket,
        then this function will create a socket, bind it and put it into listening mode.

        It is not supposed to be called by the user, it is called by :meth:`start` before starting
        the accept loop."""
        pass

    @property
    def started(self):
        return not self._stop_event.is_set()

    def start(self):
        """Start accepting the connections.

        If an address was provided in the constructor, then also create a socket,
        bind it and put it into the listening mode.
        """
        self.init_socket()
        self._stop_event.clear()
        try:
            self.start_accepting()
        except:
            self.close()
            raise

    def close(self):
        """Close the listener socket and stop accepting."""
        self._stop_event.set()
        try:
            self.stop_accepting()
        finally:
            try:
                self.socket.close()
            except Exception:
                pass
            finally:
                self.__dict__.pop('socket', None)
                self.__dict__.pop('handle', None)
                self.__dict__.pop('_handle', None)
                self.__dict__.pop('_spawn', None)
                self.__dict__.pop('full', None)
                if self.pool is not None:
                    self.pool._semaphore.unlink(self._start_accepting_if_started)

    @property
    def closed(self):
        return not hasattr(self, 'socket')

    def stop(self, timeout=None):
        """
        Stop accepting the connections and close the listening socket.

        If the server uses a pool to spawn the requests, then
        :meth:`stop` also waits for all the handlers to exit. If there
        are still handlers executing after *timeout* has expired
        (default 1 second, :attr:`stop_timeout`), then the currently
        running handlers in the pool are killed.

        If the server does not use a pool, then this merely stops accepting connections;
        any spawned greenlets that are handling requests continue running until
        they naturally complete.
        """
        self.close()
        if timeout is None:
            timeout = self.stop_timeout
        if self.pool:
            self.pool.join(timeout=timeout)
            self.pool.kill(block=True, timeout=1)

    def serve_forever(self, stop_timeout=None):
        """Start the server if it hasn't been already started and wait until it's stopped."""
        # add test that serve_forever exists on stop()
        if not self.started:
            self.start()
        try:
            self._stop_event.wait()
        finally:
            Greenlet.spawn(self.stop, timeout=stop_timeout).join()

    def is_fatal_error(self, ex):
        return isinstance(ex, _socket.error) and ex.args[0] in self.fatal_errors
class GeventSelector(_BaseSelectorImpl):
    """
    A selector implementation using gevent primitives.

    This is a type of :class:`selectors.BaseSelector`, so the documentation
    for that class applies here.

    .. caution::
       As the base class indicates, it is critically important to
       unregister file objects before closing them. (Or close the selector
       they are registered with before closing them.) Failure to do so
       may crash the process or have other unintended results.
    """

    # Notes on the approach:
    #
    # It's easy to wrap a selector implementation around
    # ``gevent.select.poll``; in fact that's what happens by default
    # when monkey-patching in Python 3. But the problem with that is
    # each call to ``selector.select()`` will result in creating and
    # then destroying new kernel-level polling resources, as nothing
    # in ``gevent.select`` can keep watchers around (because the underlying
    # file could be closed at any time). This ends up producing a large
    # number of syscalls that are unnecessary.
    #
    # So here, we take advantage of the fact that it is documented and
    # required that files not be closed while they are registered.
    # This lets us persist watchers. Indeed, it lets us continually
    # accrue events in the background before a call to ``select()`` is even
    # made. We can take advantage of this to return results immediately, without
    # a syscall, if we have them.
    #
    # We create watchers in ``register()`` and destroy them in
    # ``unregister()``. They do not get started until the first call
    # to ``select()``, though. Once they are started, they don't get
    # stopped until they deliver an event.
    # Lifecycle:
    # register() -> inactive_watchers
    # select() -> inactive_watchers -> active_watchers;
    #             active_watchers   -> inactive_watchers

    def __init__(self, hub=None):
        if hub is not None:
            self.hub = hub
        # {fd: watcher}
        self._active_watchers = {}
        self._inactive_watchers = {}
        # {fd: EVENT_READ|EVENT_WRITE}
        self._accumulated_events = defaultdict(int)
        self._ready = Event()
        super(GeventSelector, self).__init__()

    def __callback(self, events, fd):
        if events > 0:
            cur_event_for_fd = self._accumulated_events[fd]
            if events & _EV_READ:
                cur_event_for_fd |= EVENT_READ
            if events & _EV_WRITE:
                cur_event_for_fd |= EVENT_WRITE
            self._accumulated_events[fd] = cur_event_for_fd

        self._ready.set()

    @Lazy
    def hub(self):  # pylint:disable=method-hidden
        return get_hub()

    def register(self, fileobj, events, data=None):
        key = _BaseSelectorImpl.register(self, fileobj, events, data)

        if events == _ALL_EVENTS:
            flags = _POLL_ALL
        elif events == EVENT_READ:
            flags = _EV_READ
        else:
            flags = _EV_WRITE

        loop = self.hub.loop
        io = loop.io
        MAXPRI = loop.MAXPRI

        self._inactive_watchers[key.fd] = watcher = io(key.fd, flags)
        watcher.priority = MAXPRI
        return key

    def unregister(self, fileobj):
        key = _BaseSelectorImpl.unregister(self, fileobj)
        if key.fd in self._active_watchers:
            watcher = self._active_watchers.pop(key.fd)
        else:
            watcher = self._inactive_watchers.pop(key.fd)
        watcher.stop()
        watcher.close()
        self._accumulated_events.pop(key.fd, None)
        return key

    # XXX: Can we implement ``modify`` more efficiently than
    # ``unregister()``+``register()``? We could detect the no-change
    # case and do nothing; recent versions of the standard library
    # do that.

    def select(self, timeout=None):
        """
        Poll for I/O.

        Note that, like the built-in selectors, this will block
        indefinitely if no timeout is given and no files have been
        registered.
        """
        # timeout > 0 : block seconds
        # timeout <= 0 : No blocking.
        # timeout = None: Block forever

        # Event.wait doesn't deal with negative values
        if timeout is not None and timeout < 0:
            timeout = 0

        # Start any watchers that need started. Note that they may
        # not actually get a chance to do anything yet if we already had
        # events set.
        for fd, watcher in iteritems(self._inactive_watchers):
            watcher.start(self.__callback, fd, pass_events=True)
        self._active_watchers.update(self._inactive_watchers)
        self._inactive_watchers.clear()

        # The _ready event is either already set (in which case
        # there are some results waiting in _accumulated_events) or
        # not set, in which case we have to block. But to make the two cases
        # behave the same, we will always yield to the event loop.
        if self._ready.is_set():
            sleep()
        self._ready.wait(timeout)
        self._ready.clear()
        # TODO: If we have nothing ready, but they ask us not to block,
        # should we make an effort to actually spin the event loop and let
        # it check for events?

        result = []
        for fd, event in iteritems(self._accumulated_events):
            key = self._key_from_fd(fd)
            watcher = self._active_watchers.pop(fd)

            ## The below is taken without comment from
            ## https://github.com/gevent/gevent/pull/1523/files and
            ## hasn't been checked:
            #
            # Since we are emulating an epoll object within another epoll object,
            # once a watcher has fired, we must deactivate it until poll is called
            # next. If we did not, someone else could call, e.g., gevent.time.sleep
            # and any unconsumed bytes on our watched fd would prevent the process
            # from sleeping correctly.
            watcher.stop()
            if key:
                result.append((key, event & key.events))
                self._inactive_watchers[fd] = watcher
            else:  # pragma: no cover
                # If the key was gone, then somehow we've been unregistered.
                # Don't put it back in inactive, close it.
                watcher.close()

        self._accumulated_events.clear()
        return result

    def close(self):
        for d in self._active_watchers, self._inactive_watchers:
            if d is None:
                continue  # already closed
            for watcher in itervalues(d):
                watcher.stop()
                watcher.close()
        self._active_watchers = self._inactive_watchers = None
        self._accumulated_events = None
        self.hub = None
        _BaseSelectorImpl.close(self)
Beispiel #54
0
class TestTransforms(IonIntegrationTestCase):
    def setUp(self):
        self._start_container()
        self.queue_cleanup = []
        self.exchange_cleanup = []

    def tearDown(self):
        for queue in self.queue_cleanup:
            xn = self.container.ex_manager.create_xn_queue(queue)
            xn.delete()
        for exchange in self.exchange_cleanup:
            xp = self.container.ex_manager.create_xp(exchange)
            xp.delete()

    def test_stats(self):
        self.container.spawn_process('test', 'ion.core.process.transform',
                                     'TransformBase', {}, 'test_transform')
        test_transform = self.container.proc_manager.procs['test_transform']
        test_transform._stats['hits'] = 100

        retval = TransformBase.stats('test_transform')
        self.assertEquals(retval, {'hits': 100})

    def test_stream_transforms(self):

        self.verified = Event()
        input_route = StreamRoute('test_exchange', 'input')
        output_route = StreamRoute('test_exchange', 'output')

        def verify(m, route, stream_id):
            self.assertEquals(route, output_route)
            self.assertEquals(m, 'test')
            self.verified.set()

        #                       Create I/O Processes
        #--------------------------------------------------------------------------------

        pub_proc = TransformBase()
        pub_proc.container = self.container
        publisher = StreamPublisher(process=pub_proc, stream_route=input_route)

        transform = self.container.spawn_process(
            'transform', 'ion.core.process.test.test_transform',
            'EmptyDataProcess', {
                'process': {
                    'queue_name': 'transform_input',
                    'exchange_point': output_route.exchange_point,
                    'routing_key': output_route.routing_key
                }
            }, 'transformpid')
        transform = self.container.proc_manager.procs[transform]

        sub_proc = TransformBase()
        sub_proc.container = self.container
        subscriber = StreamSubscriber(process=sub_proc,
                                      exchange_name='subscriber',
                                      callback=verify)

        #                       Bind the transports
        #--------------------------------------------------------------------------------

        transform.subscriber.xn.bind(input_route.routing_key, publisher.xp)
        subscriber.xn.bind(output_route.routing_key, transform.publisher.xp)
        subscriber.start()
        self.addCleanup(subscriber.stop)

        publisher.publish('test')

        self.assertTrue(self.verified.wait(4))
Beispiel #55
0
def main(default_config_file, is_gateway):
    global logger

    import futile.logging
    logger = futile.logging.get_logger(__name__)

    from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter

    config_locations = (".", "/etc/openmtc/gevent", "/etc/openmtc")

    parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
    parser.add_argument("-f",
                        "--configfile",
                        default=None,
                        help="Location of the configuration file. If "
                        "unspecified the system will look for a file called %s"
                        " in these locations: %s" %
                        (default_config_file, ', '.join(config_locations)))
    parser.add_argument("-v",
                        "--verbose",
                        action="count",
                        default=None,
                        help="Increase verbosity in output. This option can be"
                        " specified multiple times.")
    parser.add_argument("--profiler",
                        action="store_true",
                        help="Use GreenletProfiler")
    args = parser.parse_args()

    configfile = args.configfile
    futile.logging.set_default_level(futile.logging.DEBUG)

    try:
        if not configfile:
            import os.path
            for d in config_locations:
                configfile = os.path.join(os.path.abspath(d),
                                          default_config_file)
                logger.debug("Trying config file location: %s", configfile)
                if os.path.isfile(configfile):
                    break
            else:
                raise ConfigurationError(
                    "Configuration file %s not found in "
                    "any of these locations: %s" %
                    (default_config_file, config_locations))

        config = load_config(configfile)
    except ConfigurationError as e:
        sys.stderr.write(str(e) + "\n")
        sys.exit(2)

    import openmtc_cse.api
    openmtc_cse.api.config = config
    import openmtc_server.api
    openmtc_server.api.config = config

    # TODO: kca:
    # Also: set global (non-futile) level?
    if "logging" in config:  # TODO init logging
        # FIXME: This won't work, needs translation to log levels
        log_conf = config["logging"]
        if args.verbose is None:
            futile.logging.set_default_level(
                log_conf.get("level") or futile.logging.WARNING)
        elif args.verbose >= 2:
            futile.logging.set_default_level(futile.logging.DEBUG)
        else:
            futile.logging.set_default_level(futile.logging.INFO)
        logfile = log_conf.get("file")
        if logfile:
            futile.logging.add_log_file(logfile)
    else:
        futile.logging.set_default_level(futile.logging.DEBUG)

    # make iso8601 logging shut up
    logger = futile.logging.get_logger(__name__)
    futile.logging.get_logger("iso8601").setLevel(futile.logging.ERROR)
    logger.debug("Running OpenMTC")

    from itertools import starmap

    import signal

    from gevent import spawn_later
    from gevent.event import Event as GEventEvent

    from openmtc_gevent.TaskRunner import GEventTaskRunner

    from openmtc_cse.methoddomain import OneM2MMethodDomain

    from openmtc_cse.transport import OneM2MTransportDomain

    from openmtc_server.platform.default.Event import (ResourceFinishEvent,
                                                       NetworkEvent)

    from GEventNetworkManager import GEventNetworkManager

    from openmtc_server.util.db import load_db_module

    omd = OneM2MMethodDomain(config=config)

    otd = OneM2MTransportDomain(config=config)

    nm = GEventNetworkManager(config=config.get("network_manager", {}))

    task_runner = GEventTaskRunner()
    _components.append(task_runner)

    _timers = set()

    db = load_db_module(config)

    class Api(object):
        PLATFORM = "gevent"

        class events(object):
            resource_created = ResourceFinishEvent(task_runner.run_task)
            resource_deleted = ResourceFinishEvent(task_runner.run_task)
            resource_updated = ResourceFinishEvent(task_runner.run_task)
            resource_announced = ResourceFinishEvent(task_runner.run_task)

            # fired when a network interface appeared
            # called with <interface>
            interface_created = NetworkEvent(task_runner.run_task)
            # fired when a network interface was disappeared
            # called with <interface>
            interface_removed = NetworkEvent(task_runner.run_task)
            # fired when an address appeared on an existing interface
            # called with <interface>, <address>
            address_created = NetworkEvent(task_runner.run_task)
            # fired when an address disappeared on an existing interface
            # called with <interface>, <address>
            address_removed = NetworkEvent(task_runner.run_task)

        start_onem2m_session = db.start_onem2m_session
        get_shelve = db.get_shelve

        # handle request
        handle_onem2m_request = omd.handle_onem2m_request

        # send request
        send_onem2m_request = otd.send_onem2m_request
        send_notify = otd.send_notify

        register_point_of_access = otd.register_point_of_access

        # connectors and endpoints
        register_onem2m_client = otd.register_client
        get_onem2m_endpoints = otd.get_endpoints
        add_poa_list = otd.add_poa_list

        network_manager = nm

        run_task = task_runner.run_task

        @staticmethod
        def set_timer(t, f, *args, **kw):
            timer = None

            def wrapper():
                _timers.discard(timer)
                f(*args, **kw)

            timer = spawn_later(t, wrapper)
            _timers.add(timer)
            return timer

        @staticmethod
        def cancel_timer(timer):
            _timers.discard(timer)
            timer.kill()

        map = map

        @staticmethod
        def starmap(c, l):
            return tuple(starmap(c, l))

    Api.db = db

    openmtc_cse.api.api = Api
    openmtc_cse.api.events = Api.events
    openmtc_server.api.api = Api
    openmtc_server.api.events = Api.events

    shutdown_event = GEventEvent()
    gevent.signal(signal.SIGTERM, shutdown_event.set)
    gevent.signal(signal.SIGINT, shutdown_event.set)

    try:
        init_component(otd, Api)
        init_component(omd, Api)
        init_component(nm, Api)

        force = config["database"].get("dropDB")
        if force or not db.is_initialized():
            db.initialize(force)
            omd.init_cse_base()

        omd.start()

        load_plugins(Api, config.get("plugins", ()), config["global"],
                     config["onem2m"], is_gateway)
        init_plugins()
        start_plugins()

        logger.info("OpenMTC is running")
    except:
        logger.exception("Error during startup")
    else:
        if args.profiler:
            import GreenletProfiler
            GreenletProfiler.set_clock_type("cpu")
            GreenletProfiler.start()

        # wait for shutdown event
        shutdown_event.wait()

        if args.profiler:
            GreenletProfiler.stop()
            stats = GreenletProfiler.get_func_stats()
            stats.print_all()
            stats.save('profile.callgrind', type='callgrind')

    stop_plugins()
    stop_components()

    for timer in _timers:
        try:
            timer.kill()
        except:
            logger.exception("Failed to kill timer %s", timer)
Beispiel #56
0
class Auction(DBServiceMixin, RequestIDServiceMixin, AuditServiceMixin,
              BiddersServiceMixin, DateTimeServiceMixin, StagesServiceMixin,
              PostAuctionServiceMixin):
    """Auction Worker Class"""
    def __init__(self,
                 tender_id,
                 worker_defaults,
                 auction_data={},
                 lot_id=None):
        super(Auction, self).__init__()
        self.generate_request_id()
        self.tender_id = tender_id
        self.lot_id = lot_id
        if lot_id:
            self.auction_doc_id = tender_id + "_" + lot_id
        else:
            self.auction_doc_id = tender_id
        self.tender_url = urljoin(
            worker_defaults["resource_api_server"],
            '/api/{0}/{1}/{2}'.format(worker_defaults["resource_api_version"],
                                      worker_defaults["resource_name"],
                                      tender_id))
        if auction_data:
            self.debug = True
            LOGGER.setLevel(logging.DEBUG)
            self._auction_data = auction_data
        else:
            self.debug = False
        self._end_auction_event = Event()
        self.bids_actions = BoundedSemaphore()
        self.session = RequestsSession()
        self.worker_defaults = worker_defaults
        if self.worker_defaults.get('with_document_service', False):
            self.session_ds = RequestsSession()
        self._bids_data = {}
        self.db = Database(str(self.worker_defaults["COUCH_DATABASE"]),
                           session=Session(retry_delays=range(10)))
        self.audit = {}
        self.retries = 10
        self.bidders_count = 0
        self.bidders_data = []
        self.bidders_features = {}
        self.bidders_coeficient = {}
        self.features = None
        self.mapping = {}
        self.rounds_stages = []

    def schedule_auction(self):
        self.generate_request_id()
        self.get_auction_document()
        if self.debug:
            LOGGER.info("Get _auction_data from auction_document")
            self._auction_data = self.auction_document.get(
                'test_auction_data', {})
        self.get_auction_info()
        self.prepare_audit()
        self.prepare_auction_stages()
        self.save_auction_document()
        round_number = 0
        SCHEDULER.add_job(self.start_auction,
                          'date',
                          kwargs={"switch_to_round": round_number},
                          run_date=self.convert_datetime(
                              self.auction_document['stages'][0]['start']),
                          name="Start of Auction",
                          id="Start of Auction")
        round_number += 1

        SCHEDULER.add_job(self.end_first_pause,
                          'date',
                          kwargs={"switch_to_round": round_number},
                          run_date=self.convert_datetime(
                              self.auction_document['stages'][1]['start']),
                          name="End of Pause Stage: [0 -> 1]",
                          id="End of Pause Stage: [0 -> 1]")
        round_number += 1
        for index in xrange(2, len(self.auction_document['stages'])):
            if self.auction_document['stages'][index - 1]['type'] == 'bids':
                SCHEDULER.add_job(
                    self.end_bids_stage,
                    'date',
                    kwargs={"switch_to_round": round_number},
                    run_date=self.convert_datetime(
                        self.auction_document['stages'][index]['start']),
                    name="End of Bids Stage: [{} -> {}]".format(
                        index - 1, index),
                    id="End of Bids Stage: [{} -> {}]".format(
                        index - 1, index))
            elif self.auction_document['stages'][index - 1]['type'] == 'pause':
                SCHEDULER.add_job(
                    self.next_stage,
                    'date',
                    kwargs={"switch_to_round": round_number},
                    run_date=self.convert_datetime(
                        self.auction_document['stages'][index]['start']),
                    name="End of Pause Stage: [{} -> {}]".format(
                        index - 1, index),
                    id="End of Pause Stage: [{} -> {}]".format(
                        index - 1, index))
            round_number += 1
        LOGGER.info("Prepare server ...",
                    extra={
                        "JOURNAL_REQUEST_ID": self.request_id,
                        "MESSAGE_ID": AUCTION_WORKER_SERVICE_PREPARE_SERVER
                    })
        self.server = run_server(
            self,
            self.convert_datetime(
                self.auction_document['stages'][-2]['start']), LOGGER)

    def wait_to_end(self):
        self._end_auction_event.wait()
        LOGGER.info("Stop auction worker",
                    extra={
                        "JOURNAL_REQUEST_ID": self.request_id,
                        "MESSAGE_ID":
                        AUCTION_WORKER_SERVICE_STOP_AUCTION_WORKER
                    })

    def start_auction(self, switch_to_round=None):
        self.generate_request_id()
        self.audit['timeline']['auction_start']['time'] = datetime.now(
            tzlocal()).isoformat()
        LOGGER.info('---------------- Start auction ----------------',
                    extra={
                        "JOURNAL_REQUEST_ID": self.request_id,
                        "MESSAGE_ID": AUCTION_WORKER_SERVICE_START_AUCTION
                    })
        self.get_auction_info()
        self.get_auction_document()
        # Initital Bids
        bids = deepcopy(self.bidders_data)
        self.auction_document["initial_bids"] = []
        bids_info = sorting_start_bids_by_amount(bids, features=self.features)
        for index, bid in enumerate(bids_info):
            amount = bid["value"]["amount"]
            audit_info = {
                "bidder": bid["id"],
                "date": bid["date"],
                "amount": amount
            }
            if self.features:
                amount_features = cooking(amount, self.features,
                                          self.bidders_features[bid["id"]])
                coeficient = self.bidders_coeficient[bid["id"]]
                audit_info["amount_features"] = str(amount_features)
                audit_info["coeficient"] = str(coeficient)
            else:
                coeficient = None
                amount_features = None

            self.audit['timeline']['auction_start']['initial_bids'].append(
                audit_info)
            self.auction_document["initial_bids"].append(
                prepare_initial_bid_stage(
                    time=bid["date"] if "date" in bid else self.startDate,
                    bidder_id=bid["id"],
                    bidder_name=self.mapping[bid["id"]],
                    amount=amount,
                    coeficient=coeficient,
                    amount_features=amount_features))
        if isinstance(switch_to_round, int):
            self.auction_document["current_stage"] = switch_to_round
        else:
            self.auction_document["current_stage"] = 0

        all_bids = deepcopy(self.auction_document["initial_bids"])
        minimal_bids = []
        for bid_info in self.bidders_data:
            minimal_bids.append(
                get_latest_bid_for_bidder(all_bids, str(bid_info['id'])))

        minimal_bids = self.filter_bids_keys(sorting_by_amount(minimal_bids))
        self.update_future_bidding_orders(minimal_bids)
        self.save_auction_document()

    def end_first_pause(self, switch_to_round=None):
        self.generate_request_id()
        LOGGER.info('---------------- End First Pause ----------------',
                    extra={
                        "JOURNAL_REQUEST_ID": self.request_id,
                        "MESSAGE_ID": AUCTION_WORKER_SERVICE_END_FIRST_PAUSE
                    })
        self.bids_actions.acquire()
        self.get_auction_document()

        if isinstance(switch_to_round, int):
            self.auction_document["current_stage"] = switch_to_round
        else:
            self.auction_document["current_stage"] += 1

        self.save_auction_document()
        self.bids_actions.release()

    def end_auction(self):
        LOGGER.info('---------------- End auction ----------------',
                    extra={
                        "JOURNAL_REQUEST_ID": self.request_id,
                        "MESSAGE_ID": AUCTION_WORKER_SERVICE_END_AUCTION
                    })
        LOGGER.debug("Stop server",
                     extra={"JOURNAL_REQUEST_ID": self.request_id})
        if self.server:
            self.server.stop()
        LOGGER.debug("Clear mapping",
                     extra={"JOURNAL_REQUEST_ID": self.request_id})
        delete_mapping(self.worker_defaults, self.auction_doc_id)

        start_stage, end_stage = self.get_round_stages(ROUNDS)
        minimal_bids = deepcopy(
            self.auction_document["stages"][start_stage:end_stage])
        minimal_bids = self.filter_bids_keys(sorting_by_amount(minimal_bids))
        self.auction_document["results"] = []
        for item in minimal_bids:
            self.auction_document["results"].append(
                prepare_results_stage(**item))
        self.auction_document["current_stage"] = (
            len(self.auction_document["stages"]) - 1)
        LOGGER.debug(' '.join(('Document in end_stage: \n',
                               yaml_dump(dict(self.auction_document)))),
                     extra={"JOURNAL_REQUEST_ID": self.request_id})
        self.approve_audit_info_on_announcement()
        LOGGER.info('Audit data: \n {}'.format(yaml_dump(self.audit)),
                    extra={"JOURNAL_REQUEST_ID": self.request_id})
        if self.debug:
            LOGGER.debug('Debug: put_auction_data disabled !!!',
                         extra={"JOURNAL_REQUEST_ID": self.request_id})
            sleep(10)
            self.save_auction_document()
        else:
            if self.put_auction_data():
                self.save_auction_document()
        LOGGER.debug("Fire 'stop auction worker' event",
                     extra={"JOURNAL_REQUEST_ID": self.request_id})

    def cancel_auction(self):
        self.generate_request_id()
        if self.get_auction_document():
            LOGGER.info(
                "Auction {} canceled".format(self.auction_doc_id),
                extra={'MESSAGE_ID': AUCTION_WORKER_SERVICE_AUCTION_CANCELED})
            self.auction_document["current_stage"] = -100
            self.auction_document["endDate"] = datetime.now(
                tzlocal()).isoformat()
            LOGGER.info("Change auction {} status to 'canceled'".format(
                self.auction_doc_id),
                        extra={
                            'MESSAGE_ID':
                            AUCTION_WORKER_SERVICE_AUCTION_STATUS_CANCELED
                        })
            self.save_auction_document()
        else:
            LOGGER.info(
                "Auction {} not found".format(self.auction_doc_id),
                extra={'MESSAGE_ID': AUCTION_WORKER_SERVICE_AUCTION_NOT_FOUND})

    def reschedule_auction(self):
        self.generate_request_id()
        if self.get_auction_document():
            LOGGER.info(
                "Auction {} has not started and will be rescheduled".format(
                    self.auction_doc_id),
                extra={
                    'MESSAGE_ID': AUCTION_WORKER_SERVICE_AUCTION_RESCHEDULE
                })
            self.auction_document["current_stage"] = -101
            self.save_auction_document()
        else:
            LOGGER.info(
                "Auction {} not found".format(self.auction_doc_id),
                extra={'MESSAGE_ID': AUCTION_WORKER_SERVICE_AUCTION_NOT_FOUND})
Beispiel #57
0
class RBDMirrorThrasher(Greenlet, Thrasher):
    """
    RBDMirrorThrasher::

    The RBDMirrorThrasher thrashes rbd-mirror daemons during execution of other
    tasks (workunits, etc).

    The config is optional.  Many of the config parameters are a maximum value
    to use when selecting a random value from a range.  The config is a dict
    containing some or all of:

    cluster: [default: ceph] cluster to thrash

    max_thrash: [default: 1] the maximum number of active rbd-mirror daemons per
      cluster will be thrashed at any given time.

    min_thrash_delay: [default: 60] minimum number of seconds to delay before
      thrashing again.

    max_thrash_delay: [default: 120] maximum number of seconds to delay before
      thrashing again.

    max_revive_delay: [default: 10] maximum number of seconds to delay before
      bringing back a thrashed rbd-mirror daemon.

    randomize: [default: true] enables randomization and use the max/min values

    seed: [no default] seed the random number generator

    Examples::

      The following example disables randomization, and uses the max delay
      values:

      tasks:
      - ceph:
      - rbd_mirror_thrash:
          randomize: False
          max_thrash_delay: 10
    """
    def __init__(self, ctx, config, cluster, daemons):
        Greenlet.__init__(self)
        Thrasher.__init__(self, "RBDMirrorThrasher")

        self.ctx = ctx
        self.config = config
        self.cluster = cluster
        self.daemons = daemons

        self.logger = log
        self.name = 'thrasher.rbd_mirror.[{cluster}]'.format(cluster=cluster)
        self.stopping = Event()

        self.randomize = bool(self.config.get('randomize', True))
        self.max_thrash = int(self.config.get('max_thrash', 1))
        self.min_thrash_delay = float(self.config.get('min_thrash_delay',
                                                      60.0))
        self.max_thrash_delay = float(
            self.config.get('max_thrash_delay', 120.0))
        self.max_revive_delay = float(self.config.get('max_revive_delay',
                                                      10.0))

    def _run(self):
        try:
            self.do_thrash()
        except Exception as e:
            # See _run exception comment for MDSThrasher
            self.exception = e
            self.logger.exception("exception:")
            # Allow successful completion so gevent doesn't see an exception.
            # The DaemonWatchdog will observe the error and tear down the test.

    def log(self, x):
        """Write data to logger assigned to this RBDMirrorThrasher"""
        self.logger.info(x)

    def stop(self):
        self.stopping.set()

    def do_thrash(self):
        """
        Perform the random thrashing action
        """

        self.log('starting thrash for cluster {cluster}'.format(
            cluster=self.cluster))
        stats = {
            "kill": 0,
        }

        while not self.stopping.is_set():
            delay = self.max_thrash_delay
            if self.randomize:
                delay = random.randrange(self.min_thrash_delay,
                                         self.max_thrash_delay)

            if delay > 0.0:
                self.log('waiting for {delay} secs before thrashing'.format(
                    delay=delay))
                self.stopping.wait(delay)
                if self.stopping.is_set():
                    continue

            killed_daemons = []

            weight = 1.0 / len(self.daemons)
            count = 0
            for daemon in self.daemons:
                skip = random.uniform(0.0, 1.0)
                if weight <= skip:
                    self.log(
                        'skipping daemon {label} with skip ({skip}) > weight ({weight})'
                        .format(label=daemon.id_, skip=skip, weight=weight))
                    continue

                self.log('kill {label}'.format(label=daemon.id_))
                try:
                    daemon.signal(signal.SIGTERM)
                except socket.error:
                    pass
                killed_daemons.append(daemon)
                stats['kill'] += 1

                # if we've reached max_thrash, we're done
                count += 1
                if count >= self.max_thrash:
                    break

            if killed_daemons:
                # wait for a while before restarting
                delay = self.max_revive_delay
                if self.randomize:
                    delay = random.randrange(0.0, self.max_revive_delay)

                self.log(
                    'waiting for {delay} secs before reviving daemons'.format(
                        delay=delay))
                sleep(delay)

                for daemon in killed_daemons:
                    self.log('waiting for {label}'.format(label=daemon.id_))
                    try:
                        run.wait([daemon.proc], timeout=600)
                    except CommandFailedError:
                        pass
                    except:
                        self.log(
                            'Failed to stop {label}'.format(label=daemon.id_))

                        try:
                            # try to capture a core dump
                            daemon.signal(signal.SIGABRT)
                        except socket.error:
                            pass
                        raise
                    finally:
                        daemon.reset()

                for daemon in killed_daemons:
                    self.log('reviving {label}'.format(label=daemon.id_))
                    daemon.start()

        for stat in stats:
            self.log("stat['{key}'] = {value}".format(key=stat,
                                                      value=stats[stat]))
class TestPreloadThenLoadDataset(IonIntegrationTestCase):
    """ Uses the preload system to define the ExternalDataset and related resources,
        then invokes services to perform the load
    """
    def setUp(self):
        # Start container
        self._start_container()
        self.container.start_rel_from_url('res/deploy/r2deploy.yml')
        config = dict(op="load",
                      scenario="NOSE",
                      attachments="res/preload/r2_ioc/attachments")
        self.container.spawn_process("Loader",
                                     "ion.processes.bootstrap.ion_loader",
                                     "IONLoader",
                                     config=config)
        self.pubsub = PubsubManagementServiceClient()
        self.dams = DataAcquisitionManagementServiceClient()

    def test_use_case(self):
        # setUp() has already started the container and performed the preload
        #        self.assert_dataset_loaded('Test External CTD Dataset') # make sure we have the ExternalDataset resources
        self.assert_dataset_loaded(
            'Unit Test SMB37')  # association changed -- now use device name
        self.do_listen_for_incoming(
        )  # listen for any data being received from the dataset
        self.do_read_dataset()  # call services to load dataset
        self.assert_data_received()  # check that data was received as expected
        self.do_shutdown()

    def assert_dataset_loaded(self, name):
        rr = self.container.resource_registry
        #        self.external_dataset = self.find_object_by_name(name, RT.ExternalDataset)
        devs, _ = rr.find_resources(RT.InstrumentDevice,
                                    name=name,
                                    id_only=False)
        self.assertEquals(len(devs), 1)
        self.device = devs[0]
        obj, _ = rr.find_objects(subject=self.device._id,
                                 predicate=PRED.hasAgentInstance,
                                 object_type=RT.ExternalDatasetAgentInstance)
        self.agent_instance = obj[0]
        obj, _ = rr.find_objects(object_type=RT.ExternalDatasetAgent,
                                 predicate=PRED.hasAgentDefinition,
                                 subject=self.agent_instance._id)
        self.agent = obj[0]

        driver_cfg = self.agent_instance.driver_config
        #stream_definition_id = driver_cfg['dh_cfg']['stream_def'] if 'dh_cfg' in driver_cfg else driver_cfg['stream_def']
        #self.stream_definition = rr.read(stream_definition_id)

        self.data_product = rr.read_object(subject=self.device._id,
                                           predicate=PRED.hasOutputProduct,
                                           object_type=RT.DataProduct)

        self.dataset_id = rr.read_object(subject=self.data_product._id,
                                         predicate=PRED.hasDataset,
                                         object_type=RT.Dataset,
                                         id_only=True)

        ids, _ = rr.find_objects(subject=self.data_product._id,
                                 predicate=PRED.hasStream,
                                 object_type=RT.Stream,
                                 id_only=True)
        self.stream_id = ids[0]
        self.route = self.pubsub.read_stream_route(self.stream_id)

    def do_listen_for_incoming(self):
        subscription_id = self.pubsub.create_subscription(
            'validator', data_product_ids=[self.data_product._id])
        self.addCleanup(self.pubsub.delete_subscription, subscription_id)

        self.granule_capture = []
        self.granule_count = 0

        def on_granule(msg, route, stream_id):
            self.granule_count += 1
            if self.granule_count < 5:
                self.granule_capture.append(msg)

        validator = StandaloneStreamSubscriber('validator',
                                               callback=on_granule)
        validator.start()
        self.addCleanup(validator.stop)

        self.pubsub.activate_subscription(subscription_id)
        self.addCleanup(self.pubsub.deactivate_subscription, subscription_id)

        self.dataset_modified = Event()

        def cb2(*args, **kwargs):
            self.dataset_modified.set()
            # TODO: event isn't using the ExternalDataset, but a different ID for a Dataset

        es = EventSubscriber(event_type=OT.DatasetModified,
                             callback=cb2,
                             origin=self.dataset_id)
        es.start()
        self.addCleanup(es.stop)

    def do_read_dataset(self):
        self.dams.start_external_dataset_agent_instance(
            self.agent_instance._id)
        #
        # should i wait for process (above) to start
        # before launching client (below)?
        #
        self.client = None
        end = time.time() + MAX_AGENT_START_TIME
        while not self.client and time.time() < end:
            try:
                self.client = ResourceAgentClient(self.device._id,
                                                  process=FakeProcess())
            except NotFound:
                time.sleep(2)
        if not self.client:
            self.fail(
                msg='external dataset agent process did not start in %d seconds'
                % MAX_AGENT_START_TIME)
        self.client.execute_agent(
            AgentCommand(command=ResourceAgentEvent.INITIALIZE))
        self.client.execute_agent(
            AgentCommand(command=ResourceAgentEvent.GO_ACTIVE))
        self.client.execute_agent(AgentCommand(command=ResourceAgentEvent.RUN))
        self.client.execute_resource(command=AgentCommand(
            command=DriverEvent.START_AUTOSAMPLE))

    def assert_data_received(self):

        #let it go for up to 120 seconds, then stop the agent and reset it
        if not self.dataset_modified.is_set():
            self.dataset_modified.wait(30)
        self.assertTrue(self.granule_count > 2,
                        msg='granule count = %d' % self.granule_count)

        rdt = RecordDictionaryTool.load_from_granule(self.granule_capture[0])
        self.assertAlmostEqual(0, rdt['oxygen'][0], delta=0.01)
        self.assertAlmostEqual(309.77, rdt['pressure'][0], delta=0.01)
        self.assertAlmostEqual(37.9848, rdt['conductivity'][0], delta=0.01)
        self.assertAlmostEqual(9.5163, rdt['temp'][0], delta=0.01)
        self.assertAlmostEqual(3527207897.0, rdt['time'][0], delta=1)

    def do_shutdown(self):
        self.dams.stop_external_dataset_agent_instance(self.agent_instance._id)
Beispiel #59
0
class _FelixEtcdWatcher(gevent.Greenlet):
    """
    Greenlet that communicates with the etcd driver over a socket.

    * Does the initial handshake with the driver, sending it the init
      message.
    * Receives the pre-loaded config from the driver and uses that
      to do Felix's one-off configuration.
    * Sends the relevant config back to the driver.
    * Processes the event stream from the driver, sending it on to
      the splitter.

    This class is similar to the EtcdWatcher class in that it uses
    a PathDispatcher to fan out updates but it doesn't own an etcd
    connection of its own.
    """
    def __init__(self, config, etcd_api, status_reporter, hosts_ipset):
        super(_FelixEtcdWatcher, self).__init__()
        self._config = config
        self._etcd_api = etcd_api
        self._status_reporter = status_reporter
        self.hosts_ipset = hosts_ipset
        # Whether we've been in sync with etcd at some point.
        self._been_in_sync = False
        # Keep track of the config loaded from etcd so we can spot if it
        # changes.
        self.last_global_config = None
        self.last_host_config = None
        self.my_config_dir = dir_for_per_host_config(self._config.HOSTNAME)
        # Events triggered by the EtcdAPI Actor to tell us to load the config
        # and start polling.  These are one-way flags.
        self.load_config = Event()
        self.begin_polling = Event()
        # Event that we trigger once the config is loaded.
        self.configured = Event()
        # Polling state initialized at poll start time.
        self.splitter = None
        # Next-hop IP addresses of our hosts, if populated in etcd.
        self.ipv4_by_hostname = {}
        # Forces a resync after the current poll if set.  Safe to set from
        # another thread.  Automatically reset to False after the resync is
        # triggered.
        self.resync_requested = False
        self.dispatcher = PathDispatcher()
        # The Popen object for the driver.
        self._driver_process = None
        # Stats.
        self.read_count = 0
        self.msgs_processed = 0
        self.last_rate_log_time = monotonic_time()
        # Register for events when values change.
        self._register_paths()
        self._usage_report_greenlet = gevent.Greenlet(
            self._periodically_usage_report)

    def estimated_host_count(self):
        """
        Thread safe for caller because calling len() on a dict is atomic in Python.

        :return An aproximation to our Felix cluster count
        """
        return len(self.ipv4_by_hostname)

    @logging_exceptions
    def _periodically_usage_report(self):
        """
        Greenlet: periodically report the cluster existence to
        projectcalico.org.  Period is about once per day.

        :return: Does not return, unless USAGE_REPORT disabled.
        """

        interval = 86400  # Once every 24 hours minus 12 minute jitter
        jitter = random.random() * 0.01 * interval
        try:
            calico_version = str(pkg_resources.require("calico")[0].version)
        except ResolutionError:
            calico_version = "NA"

        _log.info(
            "Started usage report thread.  Usage report interval: %s, pre-jitter: %s",
            interval, jitter)

        # Pre-Jitter the reporting thread start by 1% of interval (about 12 minutes)
        # Jitter prevents thundering herd for large clusters when the cluster first starts
        # Do pre-jitter only for clusters greater than 25.
        felix_count = self.estimated_host_count()
        if (felix_count >= 25):
            gevent.sleep(jitter)

        while True:
            config = self._config
            felix_count = self.estimated_host_count()
            cluster_type = "NA"

            if self._config.USAGE_REPORT:
                _log.info("usage report is enabled")
                report_usage_and_get_warnings(calico_version, config.HOSTNAME,
                                              config.CLUSTER_GUID, felix_count,
                                              cluster_type)

            # Jitter by 10% of interval (about 120 minutes)
            jitter = random.random() * 0.1 * interval
            sleep_time = interval - jitter
            _log.info("Usage report interval: %s, sleep-time: %s", interval,
                      sleep_time)
            gevent.sleep(sleep_time)

    def _register_paths(self):
        """
        Program the dispatcher with the paths we care about.
        """
        reg = self.dispatcher.register
        # Profiles and their contents.
        reg(TAGS_KEY, on_set=self.on_tags_set, on_del=self.on_tags_delete)
        reg(RULES_KEY, on_set=self.on_rules_set, on_del=self.on_rules_delete)
        reg(PROFILE_LABELS_KEY,
            on_set=self.on_prof_labels_set,
            on_del=self.on_prof_labels_delete)
        # Tiered policy
        reg(TIER_DATA,
            on_set=self.on_tier_data_set,
            on_del=self.on_tier_data_delete)
        reg(TIERED_PROFILE,
            on_set=self.on_tiered_policy_set,
            on_del=self.on_tiered_policy_delete)
        # Hosts and endpoints.
        reg(HOST_IP_KEY,
            on_set=self.on_host_ip_set,
            on_del=self.on_host_ip_delete)
        reg(PER_ENDPOINT_KEY,
            on_set=self.on_endpoint_set,
            on_del=self.on_endpoint_delete)
        reg(HOST_IFACE_KEY,
            on_set=self.on_host_ep_set,
            on_del=self.on_host_ep_delete)
        reg(CIDR_V4_KEY,
            on_set=self.on_ipam_v4_pool_set,
            on_del=self.on_ipam_v4_pool_delete)
        # Configuration keys.  If any of these is changed or created, we'll
        # restart to pick up the change.
        reg(CONFIG_PARAM_KEY,
            on_set=self._on_config_updated,
            on_del=self._on_config_updated)
        reg(PER_HOST_CONFIG_PARAM_KEY,
            on_set=self._on_host_config_updated,
            on_del=self._on_host_config_updated)

    @logging_exceptions
    def _run(self):
        # Don't do anything until we're told to load the config.
        _log.info("Waiting for load_config event...")
        self.load_config.wait()
        _log.info("...load_config set.  Starting driver read %s loop", self)
        # Start the driver process and wait for it to connect back to our
        # socket.
        self._msg_reader, self._msg_writer = self._start_driver()
        # Loop reading from the socket and processing messages.
        self._loop_reading_from_driver()

    def _loop_reading_from_driver(self):
        while True:
            try:
                # Note: self._msg_reader.new_messages() returns iterator so
                # whole for loop must be inside the try.
                for msg_type, msg in self._msg_reader.new_messages(timeout=1):
                    self._dispatch_msg_from_driver(msg_type, msg)
            except SocketClosed:
                _log.critical("The driver process closed its socket, Felix "
                              "must exit.")
                die_and_restart()
            if self.resync_requested:
                _log.info("Resync requested, sending resync request to driver")
                self.resync_requested = False
                self._msg_writer.send_message(MSG_TYPE_RESYNC)
            # Check that the driver hasn't died.  The recv() call should
            # raise an exception when the buffer runs dry but this usually
            # gets hit first.
            driver_rc = self._driver_process.poll()
            if driver_rc is not None:
                _log.critical(
                    "Driver process died with RC = %s.  Felix must "
                    "exit.", driver_rc)
                die_and_restart()

    def _dispatch_msg_from_driver(self, msg_type, msg):
        # Optimization: put update first in the "switch" block because
        # it's on the critical path.
        if msg_type == MSG_TYPE_UPDATE:
            _stats.increment("Update messages from driver")
            self._on_update_from_driver(msg)
        elif msg_type == MSG_TYPE_CONFIG_LOADED:
            _stats.increment("Config loaded messages from driver")
            self._on_config_loaded_from_driver(msg)
        elif msg_type == MSG_TYPE_STATUS:
            _stats.increment("Status messages from driver")
            self._on_status_from_driver(msg)
        else:
            raise RuntimeError("Unexpected message %s" % msg)
        self.msgs_processed += 1
        if self.msgs_processed % MAX_EVENTS_BEFORE_YIELD == 0:
            # Yield to ensure that other actors make progress.  (gevent only
            # yields for us if the socket would block.)  The sleep must be
            # non-zero to work around gevent issue where we could be
            # immediately rescheduled.
            gevent.sleep(0.000001)

    def _on_update_from_driver(self, msg):
        """
        Called when the driver sends us a key/value pair update.

        After the initial handshake, the stream of events consists
        entirely of updates unless something happens to change the
        state of the driver.

        :param dict msg: The message received from the driver.
        """
        assert self.configured.is_set(), "Received update before config"
        # The driver starts polling immediately, make sure we block until
        # everyone else is ready to receive updates.
        self.begin_polling.wait()
        # Unpack the message.
        key = msg[MSG_KEY_KEY]
        value = msg[MSG_KEY_VALUE]
        _log.debug("Update from driver: %s -> %s", key, value)
        # Output some very coarse stats.
        self.read_count += 1
        if self.read_count % 1000 == 0:
            now = monotonic_time()
            delta = now - self.last_rate_log_time
            _log.info("Processed %s updates from driver "
                      "%.1f/s", self.read_count, 1000.0 / delta)
            self.last_rate_log_time = now
        # Wrap the update in an EtcdEvent object so we can dispatch it via the
        # PathDispatcher.
        n = EtcdEvent("set" if value is not None else "delete", key, value)
        self.dispatcher.handle_event(n)

    def _on_config_loaded_from_driver(self, msg):
        """
        Called when we receive a config loaded message from the driver.

        This message is expected once per resync, when the config is
        pre-loaded by the driver.

        On the first call, responds to the driver synchronously with a
        config response.

        If the config has changed since a previous call, triggers Felix
        to die.
        """
        global_config = msg[MSG_KEY_GLOBAL_CONFIG]
        host_config = msg[MSG_KEY_HOST_CONFIG]
        _log.info("Config loaded by driver:\n"
                  "Global: %s\nPer-host: %s", global_config, host_config)
        if self.configured.is_set():
            # We've already been configured.  We don't yet support
            # dynamic config update so instead we check if the config
            # has changed and die if it has.
            _log.info("Checking configuration for changes...")
            if (host_config != self.last_host_config
                    or global_config != self.last_global_config):
                _log.warning("Felix configuration has changed, "
                             "felix must restart.")
                _log.info("Old host config: %s", self.last_host_config)
                _log.info("New host config: %s", host_config)
                _log.info("Old global config: %s", self.last_global_config)
                _log.info("New global config: %s", global_config)
                die_and_restart()
        else:
            # First time loading the config.  Report it to the config
            # object.  Take copies because report_etcd_config is
            # destructive.
            self.last_host_config = host_config.copy()
            self.last_global_config = global_config.copy()
            self._config.report_etcd_config(host_config, global_config)
            # Config now fully resolved, inform the driver.
            driver_log_file = self._config.DRIVERLOGFILE
            self._msg_writer.send_message(
                MSG_TYPE_CONFIG, {
                    MSG_KEY_LOG_FILE:
                    driver_log_file,
                    MSG_KEY_SEV_FILE:
                    self._config.LOGLEVFILE,
                    MSG_KEY_SEV_SCREEN:
                    self._config.LOGLEVSCR,
                    MSG_KEY_SEV_SYSLOG:
                    self._config.LOGLEVSYS,
                    MSG_KEY_PROM_PORT:
                    self._config.PROM_METRICS_DRIVER_PORT
                    if self._config.PROM_METRICS_ENABLED else None
                })
            self.configured.set()

    def _on_status_from_driver(self, msg):
        """
        Called when we receive a status update from the driver.

        The driver sends us status messages whenever its status changes.
        It moves through these states:

        (1) wait-for-ready (waiting for the global ready flag to become set)
        (2) resync (resyncing with etcd, processing a snapshot and any
            concurrent events)
        (3) in-sync (snapshot processsing complete, now processing only events
            from etcd)

        If the driver falls out of sync with etcd then it will start again
        from (1).

        If the status is in-sync, triggers the relevant processing.
        """
        status = msg[MSG_KEY_STATUS]
        _log.info("etcd driver status changed to %s", status)
        if status == STATUS_IN_SYNC and not self._been_in_sync:
            # We're now in sync, tell the Actors that need to do start-of-day
            # cleanup.
            self.begin_polling.wait()  # Make sure splitter is set.
            self._been_in_sync = True
            self.splitter.on_datamodel_in_sync()
            if self._config.REPORT_ENDPOINT_STATUS:
                self._status_reporter.clean_up_endpoint_statuses(async=True)
            self._update_hosts_ipset()
            if self._config.USAGE_REPORT:
                self._usage_report_greenlet.start()

    def _start_driver(self):
        """
        Starts the driver subprocess, connects to it over the socket
        and sends it the init message.

        Stores the Popen object in self._driver_process for future
        access.

        :return: the connected socket to the driver.
        """
        _log.info("Creating server socket.")
        if os.path.exists("/run"):
            # Linux FHS version 3.0+ location for runtime sockets etc.
            sck_filename = "/run/felix-driver.sck"
        else:
            # Older Linux versions use /var/run.
            sck_filename = "/var/run/felix-driver.sck"
        try:
            os.unlink(sck_filename)
        except OSError:
            _log.debug("Failed to delete driver socket, assuming it "
                       "didn't exist.")
        update_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
        update_socket.bind(sck_filename)
        update_socket.listen(1)
        if getattr(sys, "frozen", False):
            # We're running under pyinstaller, where we share our executable
            # with the etcd driver.  Re-run this executable with the "driver"
            # argument to invoke the etcd driver.
            cmd = [sys.argv[0], "driver"]
        else:
            # Not running under pyinstaller, execute the etcd driver directly.
            cmd = [sys.executable, "-m", "calico.etcddriver"]
        # etcd driver takes the felix socket name as argument.
        cmd += [sck_filename]
        _log.info("etcd-driver command line: %s", cmd)
        self._driver_process = subprocess.Popen(cmd)
        _log.info("Started etcd driver with PID %s", self._driver_process.pid)
        with gevent.Timeout(10):
            update_conn, _ = update_socket.accept()
        _log.info("Accepted connection on socket")
        # No longer need the server socket, remove it.
        try:
            os.unlink(sck_filename)
        except OSError:
            # Unexpected but carry on...
            _log.exception("Failed to unlink socket")
        else:
            _log.info("Unlinked server socket")

        # Wrap the socket in reader/writer objects that simplify using the
        # protocol.
        reader = MessageReader(update_conn)
        writer = MessageWriter(update_conn)
        # Give the driver its config.
        writer.send_message(
            MSG_TYPE_INIT, {
                MSG_KEY_ETCD_URLS: [
                    self._config.ETCD_SCHEME + "://" + addr
                    for addr in self._config.ETCD_ADDRS
                ],
                MSG_KEY_HOSTNAME:
                self._config.HOSTNAME,
                MSG_KEY_KEY_FILE:
                self._config.ETCD_KEY_FILE,
                MSG_KEY_CERT_FILE:
                self._config.ETCD_CERT_FILE,
                MSG_KEY_CA_FILE:
                self._config.ETCD_CA_FILE,
            })
        return reader, writer

    def on_endpoint_set(self, response, hostname, orchestrator, workload_id,
                        endpoint_id):
        """Handler for endpoint updates, passes the update to the splitter."""
        combined_id = WloadEndpointId(hostname, orchestrator, workload_id,
                                      endpoint_id)
        _log.debug("Endpoint %s updated", combined_id)
        _stats.increment("Endpoint created/updated")
        endpoint = parse_endpoint(self._config, combined_id, response.value)
        self.splitter.on_endpoint_update(combined_id, endpoint)

    def on_endpoint_delete(self, response, hostname, orchestrator, workload_id,
                           endpoint_id):
        """Handler for endpoint deleted, passes the update to the splitter."""
        combined_id = WloadEndpointId(hostname, orchestrator, workload_id,
                                      endpoint_id)
        _log.debug("Endpoint %s deleted", combined_id)
        _stats.increment("Endpoint deleted")
        self.splitter.on_endpoint_update(combined_id, None)

    def on_host_ep_set(self, response, hostname, endpoint_id):
        """Handler for create/update of host endpoint."""
        combined_id = HostEndpointId(hostname, endpoint_id)
        _log.debug("Host iface %s updated", combined_id)
        _stats.increment("Host iface created/updated")
        iface_data = parse_host_ep(self._config, combined_id, response.value)
        self.splitter.on_host_ep_update(combined_id, iface_data)

    def on_host_ep_delete(self, response, hostname, endpoint_id):
        """Handler for delete of host endpoint."""
        combined_id = HostEndpointId(hostname, endpoint_id)
        _log.debug("Host iface %s deleted", combined_id)
        _stats.increment("Host iface deleted")
        self.splitter.on_host_ep_update(combined_id, None)

    def on_rules_set(self, response, profile_id):
        """Handler for rules updates, passes the update to the splitter."""
        _log.debug("Rules for %s set", profile_id)
        _stats.increment("Rules created/updated")
        rules = parse_profile(profile_id, response.value)
        profile_id = intern(profile_id.encode("utf8"))
        self.splitter.on_rules_update(profile_id, rules)

    def on_rules_delete(self, response, profile_id):
        """Handler for rules deletes, passes the update to the splitter."""
        _log.debug("Rules for %s deleted", profile_id)
        _stats.increment("Rules deleted")
        self.splitter.on_rules_update(profile_id, None)

    def on_tags_set(self, response, profile_id):
        """Handler for tags updates, passes the update to the splitter."""
        _log.debug("Tags for %s set", profile_id)
        _stats.increment("Tags created/updated")
        rules = parse_tags(profile_id, response.value)
        profile_id = intern(profile_id.encode("utf8"))
        self.splitter.on_tags_update(profile_id, rules)

    def on_tags_delete(self, response, profile_id):
        """Handler for tags deletes, passes the update to the splitter."""
        _log.debug("Tags for %s deleted", profile_id)
        _stats.increment("Tags deleted")
        self.splitter.on_tags_update(profile_id, None)

    def on_prof_labels_set(self, response, profile_id):
        """Handler for profile labels, passes update to the splitter."""
        _log.debug("Labels for profile %s created/updated", profile_id)
        labels = parse_labels(profile_id, response.value)
        profile_id = intern(profile_id.encode("utf8"))
        self.splitter.on_prof_labels_set(profile_id, labels)

    def on_prof_labels_delete(self, response, profile_id):
        """Handler for profile label deletion

        passed update to the splitter."""
        _log.debug("Labels for profile %s deleted", profile_id)
        profile_id = intern(profile_id.encode("utf8"))
        self.splitter.on_prof_labels_set(profile_id, None)

    def on_tier_data_set(self, response, tier):
        _log.debug("Tier data set for tier %s", tier)
        _stats.increment("Tier data created/updated")
        data = parse_tier_data(tier, response.value)
        self.splitter.on_tier_data_update(tier, data)

    def on_tier_data_delete(self, response, tier):
        _log.debug("Tier data deleted for tier %s", tier)
        _stats.increment("Tier data deleted")
        self.splitter.on_tier_data_update(tier, None)

    def on_tiered_policy_set(self, response, tier, policy_id):
        _log.debug("Rules for %s/%s set", tier, policy_id)
        _stats.increment("Tiered rules created/updated")
        policy_id = TieredPolicyId(tier, policy_id)
        rules = parse_policy(policy_id, response.value)
        if rules is not None:
            selector = rules.pop("selector")
            order = rules.pop("order")
            self.splitter.on_rules_update(policy_id, rules)
            self.splitter.on_policy_selector_update(policy_id, selector, order)
        else:
            self.splitter.on_rules_update(policy_id, None)
            self.splitter.on_policy_selector_update(policy_id, None, None)

    def on_tiered_policy_delete(self, response, tier, policy_id):
        """Handler for tiered rules deletes, passes update to the splitter."""
        _log.debug("Rules for %s/%s deleted", tier, policy_id)
        _stats.increment("tiered rules deleted")
        policy_id = TieredPolicyId(tier, policy_id)
        self.splitter.on_rules_update(policy_id, None)
        self.splitter.on_policy_selector_update(policy_id, None, None)

    def on_host_ip_set(self, response, hostname):
        _stats.increment("Host IP created/updated")
        ip = parse_host_ip(hostname, response.value)
        if ip:
            self.ipv4_by_hostname[hostname] = ip
        else:
            _log.warning(
                "Invalid IP for hostname %s: %s, treating as "
                "deletion", hostname, response.value)
            self.ipv4_by_hostname.pop(hostname, None)
        self._update_hosts_ipset()

    def on_host_ip_delete(self, response, hostname):
        _stats.increment("Host IP deleted")
        if self.ipv4_by_hostname.pop(hostname, None):
            self._update_hosts_ipset()

    def _update_hosts_ipset(self):
        if not self._config.IP_IN_IP_ENABLED:
            _log.debug(
                "Ignoring update to hosts ipset because IP-in-IP is disabled")
            return
        if not self._been_in_sync:
            _log.debug("Deferring update to hosts ipset until we're in-sync")
            return
        self.hosts_ipset.replace_members(frozenset(
            self.ipv4_by_hostname.values()),
                                         async=True)

    def _on_config_updated(self, response, config_param):
        new_value = response.value
        if self.last_global_config.get(config_param) != new_value:
            _log.critical(
                "Global config value %s updated.  Felix must be "
                "restarted.", config_param)
            die_and_restart()
        _stats.increment("Global config (non) updates")

    def _on_host_config_updated(self, response, hostname, config_param):
        if hostname != self._config.HOSTNAME:
            _log.debug("Ignoring config update for host %s", hostname)
            return
        _stats.increment("Per-host config created/updated")
        new_value = response.value
        if self.last_host_config.get(config_param) != new_value:
            _log.critical(
                "Global config value %s updated.  Felix must be "
                "restarted.", config_param)
            die_and_restart()

    def on_ipam_v4_pool_set(self, response, pool_id):
        _stats.increment("IPAM pool created/updated")
        pool = parse_ipam_pool(pool_id, response.value)
        self.splitter.on_ipam_pool_updated(pool_id, pool)

    def on_ipam_v4_pool_delete(self, response, pool_id):
        _stats.increment("IPAM pool deleted")
        self.splitter.on_ipam_pool_updated(pool_id, None)
Beispiel #60
0
class AceClient(object):

    def __init__(self, host, port, connect_timeout=5, result_timeout=10):
        # Receive buffer
        self._recvbuffer = None
        # Stream URL
        self._url = None
        # Ace stream socket
        self._socket = None
        # Result timeout
        self._resulttimeout = result_timeout
        # Shutting down flag
        self._shuttingDown = Event()
        # Product key
        self._product_key = None
        # Current STATUS
        self._status = None
        # Current STATE
        self._state = None
        # Current AUTH
        self._auth = None
        self._gender = None
        self._age = None
        # Result (Created with AsyncResult() on call)
        self._result = AsyncResult()
        self._authevent = Event()
        # Result for getURL()
        self._urlresult = AsyncResult()
        # Event for resuming from PAUSE
        self._resumeevent = Event()

        # Logger
        logger = logging.getLogger('AceClient_init')

        try:
            self._socket = telnetlib.Telnet(host, port, connect_timeout)
            logger.info("Successfully connected with Ace!")
        except Exception as e:
            raise AceException(
                "Socket creation error! Ace is not running? " + repr(e))

        # Spawning recvData greenlet
        gevent.spawn(self._recvData)
        gevent.sleep()

    def __del__(self):
        # Destructor just calls destroy() method
        self.destroy()

    def destroy(self):
        '''
        AceClient Destructor
        '''
        if self._shuttingDown.isSet():
        # Already in the middle of destroying
            return

        # Logger
        logger = logging.getLogger("AceClient_destroy")
        # We should resume video to prevent read greenlet deadlock
        self._resumeevent.set()
        # And to prevent getUrl deadlock
        self._urlresult.set()

        # Trying to disconnect
        try:
            logger.debug("Destroying client...")
            self._shuttingDown.set()
            self._write(AceMessage.request.SHUTDOWN)
        except:
            # Ignore exceptions on destroy
            pass
        finally:
            self._shuttingDown.set()

    def _write(self, message):
        try:
            self._socket.write(message + "\r\n")
        except EOFError as e:
            raise AceException("Write error! " + repr(e))

    def aceInit(self, gender=AceConst.SEX_MALE, age=AceConst.AGE_18_24, product_key=None, pause_delay=0):
        self._product_key = product_key
        self._gender = gender
        self._age = age
        # PAUSE/RESUME delay
        self._pausedelay = pause_delay

        # Logger
        logger = logging.getLogger("AceClient_aceInit")

        # Sending HELLO
        self._write(AceMessage.request.HELLO)
        if not self._authevent.wait(self._resulttimeout):
            errmsg = "Authentication timeout. Wrong key?"
            logger.error(errmsg)
            raise AceException(errmsg)
            return

        if not self._auth:
            errmsg = "Authentication error. Wrong key?"
            logger.error(errmsg)
            raise AceException(errmsg)
            return

        logger.debug("aceInit ended")

    def _getResult(self):
        # Logger
        logger = logging.getLogger("AceClient_START")

        try:
            result = self._result.get(timeout=self._resulttimeout)
            if not result:
                errmsg = "START error!"
                logger.error(errmsg)
                raise AceException(errmsg)
        except gevent.Timeout:
            errmsg = "START timeout!"
            logger.error(errmsg)
            raise AceException(errmsg)

        return result

    def START(self, datatype, value):
        '''
        Start video method
        '''
        self._result = AsyncResult()
        self._urlresult = AsyncResult()

        self._write(AceMessage.request.LOADASYNC(datatype.upper(), 0, value))
        contentinfo = self._getResult()

        self._write(AceMessage.request.START(datatype.upper(), value))
        self._getResult()

        return contentinfo

    def getUrl(self, timeout=40):
        # Logger
        logger = logging.getLogger("AceClient_getURL")

        try:
            res = self._urlresult.get(timeout=timeout)
            return res
        except gevent.Timeout:
            errmsg = "getURL timeout!"
            logger.error(errmsg)
            raise AceException(errmsg)

    def getPlayEvent(self, timeout=None):
        '''
        Blocking while in PAUSE, non-blocking while in RESUME
        '''
        self._resumeevent.wait(timeout=timeout)
        return

    def _recvData(self):
        '''
        Data receiver method for greenlet
        '''
        logger = logging.getLogger('AceClient_recvdata')

        while True:
            gevent.sleep()
            try:
                self._recvbuffer = self._socket.read_until("\r\n")
                self._recvbuffer = self._recvbuffer.strip()
            except:
                # If something happened during read, abandon reader.
                if not self._shuttingDown.isSet():
                    logger.error("Exception at socket read")
                    self._shuttingDown.set()
                return

            if self._recvbuffer:
                # Parsing everything only if the string is not empty
                if self._recvbuffer.startswith(AceMessage.response.HELLO):
                    # Parse HELLO
                    if 'key=' in self._recvbuffer:
                        self._request_key = self._recvbuffer.split()[
                            2].split('=')[1]
                        try:
                            self._write(AceMessage.request.READY_key(
                                self._request_key, self._product_key,
                                self._resulttimeout))
                        except urllib2.URLError as e:
                            logger.error("Can't connect to keygen server! " + \
                                repr(e))
                            self._auth = False
                            self._authevent.set()
                        self._request_key = None
                    else:
                        self._write(AceMessage.request.READY_nokey)

                elif self._recvbuffer.startswith(AceMessage.response.NOTREADY):
                    # NOTREADY
                    logger.error("Ace is not ready. Wrong auth?")
                    self._auth = False
                    self._authevent.set()

                elif self._recvbuffer.startswith(AceMessage.response.LOADRESP):
                    # LOADRESP
                    _contentinfo_raw = self._recvbuffer.split()[2:]
                    _contentinfo_raw = ' '.join(_contentinfo_raw)
                    _contentinfo = json.loads(_contentinfo_raw)
                    if _contentinfo.get('status') == 100:
                        logger.error("LOADASYNC returned error with message: %s"
                            % _contentinfo.get('message'))
                        self._result.set(False)
                    else:
                        logger.debug("Content info: %s", _contentinfo)
                        _filename = urllib2.unquote(_contentinfo.get('files')[0][0])
                        self._result.set(_filename)

                elif self._recvbuffer.startswith(AceMessage.response.START):
                    # START
                    try:
                        self._url = self._recvbuffer.split()[1]
                        self._urlresult.set(self._url)
                        self._resumeevent.set()
                    except IndexError as e:
                        self._url = None

                elif self._recvbuffer.startswith(AceMessage.response.STOP):
                    pass

                elif self._recvbuffer.startswith(AceMessage.response.SHUTDOWN):
                    logger.debug("Got SHUTDOWN from engine")
                    self._socket.close()
                    return

                elif self._recvbuffer.startswith(AceMessage.response.AUTH):
                    try:
                        self._auth = self._recvbuffer.split()[1]
                        # Send USERDATA here
                        self._write(
                            AceMessage.request.USERDATA(self._gender, self._age))
                    except:
                        pass
                    self._authevent.set()

                elif self._recvbuffer.startswith(AceMessage.response.GETUSERDATA):
                    raise AceException("You should init me first!")

                elif self._recvbuffer.startswith(AceMessage.response.STATE):
                    self._state = self._recvbuffer.split()[1]

                elif self._recvbuffer.startswith(AceMessage.response.STATUS):
                    self._tempstatus = self._recvbuffer.split()[1].split(';')[0]
                    if self._tempstatus != self._status:
                        self._status = self._tempstatus
                        logger.debug("STATUS changed to " + self._status)

                    if self._status == 'main:err':
                        logger.error(
                            self._status + ' with message ' + self._recvbuffer.split(';')[2])
                        self._result.set_exception(
                            AceException(self._status + ' with message ' + self._recvbuffer.split(';')[2]))
                        self._urlresult.set_exception(
                            AceException(self._status + ' with message ' + self._recvbuffer.split(';')[2]))
                    elif self._status == 'main:starting':
                        self._result.set(True)

                elif self._recvbuffer.startswith(AceMessage.response.PAUSE):
                    logger.debug("PAUSE event")
                    self._resumeevent.clear()

                elif self._recvbuffer.startswith(AceMessage.response.RESUME):
                    logger.debug("RESUME event")
                    gevent.sleep(self._pausedelay)
                    self._resumeevent.set()