Exemple #1
0
 def stop(self):
     '''
     Stop running tasks (corroutines), this means :py:method:main will return (if running) on next gevent switch.
     '''
     if self.tasks:
         gevent.killall(self.tasks)
         del self.tasks[:]
Exemple #2
0
    def run(self):
        logger.debug("Starting agent...")
        global agent_running, agent_stopped
        self._start_engines()

        self.running = True
        agent_running.set()
        logger.debug("Agent started.")

        while not self.interrupted:
            try:
                gevent.joinall(self._greenlets, timeout=0.1)
            except KeyboardInterrupt:
                logger.debug("Interrupted.")
                break

            try:
                req = self._inbox.get_nowait()
                if req:
                    self._process_request(req)
            except Empty:
                pass

        # stop engines in reverse order.
        self._stop_engines()

        gevent.killall(self._greenlets, timeout=1)

        self.running = False
        agent_stopped.set()
        logger.debug("Agent stopped.")
Exemple #3
0
    def on_stop_event(self, event):
        jobs = self.activated_events.get(event)
        for job in jobs:
            if job in self.jobs:
                self.jobs.remove(job)

        killall(jobs)
Exemple #4
0
    def run(self, in_addr="0.0.0.0:30002", out_addr="tcp://*:5000"):
        self.is_running = True
        signal.signal(signal.SIGTERM, self.stop)
        ip, port = in_addr.split(':')

        self.child = Decoder(os.getpid(), out_addr)
        self.child.start()
        time.sleep(.5)

        logger.info("Listen on %s:%s, output goes to: %s" % (ip, port, out_addr))
        context = zmq.Context()
        self.push = context.socket(zmq.PUSH)
        self.push.connect("ipc:///tmp/pinba2zmq.sock")

        pool = Pool(5000)
        self.server = DgramServer(ip, int(port), self.recv, spawn=pool)
        logger.info("Ready!")
        try:
            gevent.spawn(self.watcher)
            self.workers = [gevent.spawn_later(1, self.interval)]
            self.server.serve_forever()
        except KeyboardInterrupt:
            pass
        except Exception:
            logger.error(traceback.format_exc())

        logger.info("Daemon shutting down")
        self.is_running = False
        gevent.killall(self.workers)
        self.pub.close()
        self.child.terminate()
        logger.info("Daemon stops")
Exemple #5
0
 def exit(self):
     print(self.name + ": exiting")
     if hasattr(self, "challenger"):
         self.challenger.signal_victory()
     gevent.killall([self._receiver_greenlet])
     self.ws.close()
     raise gevent.GreenletExit
 def shutdown(self):
     if self._spawn_greenlets:
         try:
             gevent.killall(list(self._spawn_greenlets), block=True)
         except Exception:
             log.warn("Ignoring error while killing spawn greenlets", exc_info=True)
         self._spawn_greenlets.clear()
Exemple #7
0
 def stop(self):
     """Close all AMQP connections and channels, stop greenlets"""
     if self.stopped:
         log.warning("%s: Already stopped, can't stop again.", self.lbl)
         return
     log.debug("%s: Closing all AMQP channels.", self.lbl)
     for gid in self.chans.keys():
         try:
             self.chans.pop(gid).close()
         except Exception as exc:
             log.warning("%s: Closing AMQP channel exception %r.",
                         self.lbl, exc)
     log.debug("%s: Closing all AMQP connections.", self.lbl)
     for gid in self.conns.keys():
         try:
             self.conns.pop(gid).close()
         except Exception as exc:
             log.warning("%s: Closing AMQP connection exception %r.",
                         self.lbl, exc)
     if self.greenlets:
         log.debug("%s: Stopping all greenlets %s.",
                   self.lbl, tuple(self.greenlets.keys()))
         gevent.killall(self.greenlets.values())
         gevent.joinall(self.greenlets.values())
         self.greenlets.clear()
     self.stopped = True
Exemple #8
0
def cleanup_tasks():
    tasks = [
        running_task
        for running_task in gc.get_objects()
        if isinstance(running_task, gevent.Greenlet)
    ]
    gevent.killall(tasks)
Exemple #9
0
def sync_everything():
    try:
        redis_handler.close_extra_connections()
        thread_list = []
        if not config.NO_ROUTE53:
            route53_handler = dikhao.aws.route53.Route53Handler(
                apikey=config.AWS_ACCESS_KEY_ID,
                apisecret=config.AWS_SECRET_ACCESS_KEY)
            new_threads = dikhao.sync.sync_route53(route53_handler, redis_handler,
                config.HOSTED_ZONES, expire=config.EXPIRE_DURATION, ttl=config.TTL)
            thread_list.extend(new_threads)
        if not config.NO_EC2:
            new_threads = dikhao.sync.sync_ec2(redis_handler, apikey=config.AWS_ACCESS_KEY_ID,
                apisecret=config.AWS_SECRET_ACCESS_KEY, regions=config.REGIONS,
                expire=config.EXPIRE_DURATION)
            thread_list.extend(new_threads)
        print 'Sync Started... . . .  .  .   .     .     .'
        gevent.joinall(thread_list, timeout=config.SYNC_TIMEOUT)
        gevent.killall(thread_list)
        print 'Cleanup stale records initiated...'
        dikhao.sync.clean_stale_entries(redis_handler,
                                 clean_route53=not config.NO_ROUTE53,
                                 clean_ec2=not config.NO_EC2)
        print 'Details saved. Indexing records!'
        dikhao.sync.index_records(redis_handler, expire=config.EXPIRE_DURATION)
        redis_handler.delete_lock(timeout=config.MIN_SYNC_GAP)
    except redis.ResponseError:
        print 'Redis ResponseError happened. Closing all active connections'
        redis_handler.close_extra_connections(max_connections=0)
    print 'Complete'
Exemple #10
0
    def _exit(self, *args):
        for j in self.jobs:
            j.unlink(self._exit)

        gevent.killall(self.jobs)
        ClientManager.remove(self)
        self.on_exit()
Exemple #11
0
 def stop(self):
     """
     Stop scan
     :return:
     """
     # TODO: stop correctly
     gevent.killall(self.workers)
Exemple #12
0
    def test_register_agent_to_auth_protected_host(self):
        r = PsDashRunner({
            'PSDASH_AUTH_USERNAME': '******',
            'PSDASH_AUTH_PASSWORD': '******'
        })
        agent = PsDashRunner({
            'PSDASH_AGENT': True,
            'PSDASH_PORT': 5001,
            'PSDASH_REGISTER_TO': 'http://localhost:5000',
            'PSDASH_AUTH_USERNAME': '******',
            'PSDASH_AUTH_PASSWORD': '******'
        })
        jobs = []
        jobs.append(gevent.spawn(r.run))
        gevent.sleep(0.3)
        jobs.append(gevent.spawn(agent.run))
        gevent.sleep(0.3)

        self.assertIn('127.0.0.1:5001', r.get_nodes())
        self.assertEquals(r.get_node('127.0.0.1:5001').name, socket.gethostname())
        self.assertEquals(r.get_node('127.0.0.1:5001').port, 5001)

        r.server.close()
        agent.server.close()
        gevent.killall(jobs)
Exemple #13
0
    def run(self):
        '''Entry point for running agent.

        Subclasses should not override this method. Instead, the setup
        and finish methods should be overridden to customize behavior.
        '''
        def _trigger_event(event):
            for callback, args, kwargs in self._event_callbacks.get(event, ()):
                callback(*args, **kwargs)
        self.vip_socket = vip.Socket(self.context)   # pylint: disable=attribute-defined-outside-init
        if self.vip_identity:
            self.vip_socket.identity = self.vip_identity
        _trigger_event('setup')
        self.vip_socket.connect(self.vip_address)
        _trigger_event('connect')
        # Start periodic callbacks
        for periodic in self._periodics:
            periodic.start()
        _trigger_event('start')
        try:
            self._vip_loop()
        finally:
            _trigger_event('stop')
            gevent.killall(self._periodics)
            _trigger_event('disconnect')
            try:
                self.vip_socket.disconnect(self.vip_address)
            except ZMQError:
                pass
            _trigger_event('finish')
    def kill(self, detach=False):
        """This function must/will be called when a socket is to be completely
        shut down, closed by connection timeout, connection error or explicit
        disconnection from the client.

        It will call all of the Namespace's
        :meth:`~socketio.namespace.BaseNamespace.disconnect` methods
        so that you can shut-down things properly.

        """
        # Clear out the callbacks
        self.ack_callbacks = {}
        log.debug('Killing %s' % self)
        self.server_queue.put_nowait(None)
        self.client_queue.put_nowait(None)

        if self.connected:
            self.state = self.STATE_DISCONNECTING
            if len(self.active_ns) > 0:
                log.debug("Calling disconnect() on %s" % self)
                self.disconnect()
        else:
            log.error('Socket kill()ed before being connected')

        if detach:
            self.detach()

        gevent.killall(self.jobs)
        log.debug('Killed %s' % self)
Exemple #15
0
 def connect(self):
     self._socket.connect((self.host, self.port))
     try:
         jobs = [gevent.spawn(self._recv_loop), gevent.spawn(self._send_loop)]
         gevent.joinall(jobs)
     finally:
         gevent.killall(jobs)
 def cleanup_greenlets(self, timeout=None):
     """Allow the greenlets stored in this list timeout seconds to finish.
     After the timeout, kill the remaining greenlets."""
     LOG.info("Cleaning up greenlets")
     if timeout:
         gevent.joinall(self, timeout=timeout)
     gevent.killall(self, exception=gevent.Timeout)
    def kill(self):
        """This function must/will be called when a socket is to be completely
        shut down, closed by connection timeout, connection error or explicit
        disconnection from the client.

        It will call all of the Namespace's
        :meth:`~socketio.namespace.BaseNamespace.disconnect` methods
        so that you can shut-down things properly.

        """
        # Clear out the callbacks
        self.ack_callbacks = {}
        if self.connected:
            self.state = self.STATE_DISCONNECTING
            self.server_queue.put_nowait(None)
            self.client_queue.put_nowait(None)
            if len(self.active_ns) > 0:
                self.disconnect()

            if self.sessid in self.server.sockets:
                self.server.sockets.pop(self.sessid)

            gevent.killall(self.jobs)
        else:
            raise Exception('Not connected')
        def toBeScheduled():
            for i in iterList:
                bc = bcList[i]  # makeBroadcast(i)
                sd = sdList[i]
                recv = servers[0].get
                th = Greenlet(honestParty, i, N, t, controlChannels[i], bc, recv, sd, options.B)
                th.parent_args = (N, t)
                th.name = 'client_test_freenet.honestParty(%d)' % i
                controlChannels[i].put(('IncludeTransaction',
                    transactionSet))
                th.start()
                mylog('Summoned party %i at time %f' % (i, time.time()), verboseLevel=-1)
                ts.append(th)

            try:
                gevent.joinall(ts)
            except ACSException:
                gevent.killall(ts)
            except finishTransactionLeap:  ### Manually jump to this level
                print 'msgCounter', msgCounter
                print 'msgTypeCounter', msgTypeCounter
                # message id 0 (duplicated) for signatureCost
                logChannel.put(StopIteration)
                mylog("=====", verboseLevel=-1)
                for item in logChannel:
                    mylog(item, verboseLevel=-1)
                mylog("=====", verboseLevel=-1)
            except gevent.hub.LoopExit:  # Manual fix for early stop
                while True:
                    gevent.sleep(1)
                checkExceptionPerGreenlet()
            finally:
                print "Consensus Finished"
Exemple #19
0
 def cleanup():
     site_temp.storage.deleteFiles()
     site_temp.content_manager.contents.db.deleteSite(site_temp)
     site_temp.content_manager.contents.db.close()
     db_path = "%s-temp/content.db" % config.data_dir
     os.unlink(db_path)
     del ContentDb.content_dbs[db_path]
     gevent.killall([obj for obj in gc.get_objects() if isinstance(obj, gevent.Greenlet) and obj not in threads_before])
 def check_queue(input_queue):
     while True:
         try:
             eq_(MSG, input_queue.get(block=False))
             gevent.killall(greenlets)
         except Empty:
             pass
         gevent.sleep(0)
Exemple #21
0
 def closeEvent(self, event):
     global app_running
     app_running = False
     try:
         gevent.killall(timeout=2)
     except:
         pass
     event.accept()
Exemple #22
0
    def kill_local_jobs(self):
        """Kills all the jobs spawned with BaseNamespace.spawn() on a namespace
        object.

        This will be called automatically if the ``watcher`` process detects
        that the Socket was closed.
        """
        gevent.killall(self.jobs)
        self.jobs = []
Exemple #23
0
def main(*args):
	g_app = gevent.spawn(app.run)
	g_irc = gevent.spawn(irc.run)
	try:
		ready = gevent.wait([g_app, g_irc], count=1)
		for g in ready:
			g.get() # let it raise
	finally:
		gevent.killall([g_app, g_irc], block=True)
Exemple #24
0
 def connect(self):
     print 'Connecting to ' + self.host + ":" + str(self.port)
     self._socket.connect((self.host, self.port))
     print 'Connected.'
     try:
         jobs = [gevent.spawn(self._recvLoop), gevent.spawn(self._sendLoop)]
         gevent.joinall(jobs)
     finally:
         gevent.killall(jobs)
Exemple #25
0
 def __call__(self):
     """Start the chat session I/O loops and wait for them to exit."""
     logging.info("%r: Starting session.", self.__addr)
     procs = [gevent.spawn(self._send_proc),
              gevent.spawn(self._recv_proc)]
     gevent.wait(procs, count=1)            
     gevent.killall(procs)
     self.__serv.clear_name(self)
     logging.info("%r: Session halted.", self.__addr)
Exemple #26
0
 def cleanup():
     site.storage.deleteFiles()
     site.content_manager.contents.db.deleteSite(site)
     del SiteManager.site_manager.sites["1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT"]
     site.content_manager.contents.db.close()
     db_path = "%s/content.db" % config.data_dir
     os.unlink(db_path)
     del ContentDb.content_dbs[db_path]
     gevent.killall([obj for obj in gc.get_objects() if isinstance(obj, gevent.Greenlet) and obj not in threads_before])
Exemple #27
0
def update_workers():
    global WORKERS
    if WORKERS:
        gevent.killall(WORKERS)
    del WORKERS[:]
    nodes = list(db.query(Resource).filter(Resource.removed==False).all())
    logging.error(nodes)
    for job in [gevent.spawn(worker, node.id, node.ip_addr) for node in nodes]:
        WORKERS.append(job)
    gevent.joinall(WORKERS)
 def test_killall_iterable_argument_non_block(self):
     p1 = GreenletSubclass.spawn(lambda: gevent.sleep(0.5))
     p2 = GreenletSubclass.spawn(lambda: gevent.sleep(0.5))
     s = set()
     s.add(p1)
     s.add(p2)
     gevent.killall(s, block=False)
     gevent.sleep(0.5)
     for g in s:
         assert g.dead
def main():
    global VERBOSE
    global DRY_RUN

    from docopt import docopt

    arguments = docopt(__doc__, version='CopperEgg Agent')

    api_key = arguments.get('--key', None)
    if not api_key:
        print('Invalid API key')
        return 1

    host = arguments.get('--host', None)
    if not host:
        print('Invalid hostname')
        return 1

    prefix = arguments.get('--prefix', None)
    if not prefix:
        print('Invalid group prefix')
        return 1

    if arguments.get('--dry'):
        print('DRY mode, no changes made.')
        DRY_RUN = True

    if arguments.get('--verbose'):
        print('VERBOSE mode.')
        VERBOSE = True
        logging.basicConfig(level=logging.DEBUG,
                            format=u'[%(levelname)s] %(message)s',)

    jobs = []

    queue = {}
    queue['GET'] = Queue(maxsize=10000)
    queue['POST'] = Queue(maxsize=10000)
    queue['HEAD'] = Queue()
    queue['PUT'] = Queue()

    metrics_group = "{}_{}_backend".format(prefix, host)
    jobs.append(gevent.spawn(get_metrics_nginx_backend, queue))

    for verb in HTTP_VERBS:
        jobs.append(gevent.spawn(post_metrics,
                                 queue[verb],
                                 5,  # report every 5 seconds
                                 api_key,
                                 POST_URL.format(metrics_group)))

    try:
        gevent.joinall(jobs)
    except KeyboardInterrupt:
        gevent.killall(jobs)
Exemple #30
0
    def ws_serve(self, ws):
        """Handle a new websocket connection."""
        logging.debug('ws_serve()')

        def emit(signal, message):
            try:
                ws.send(json.dumps({'signal': signal, 'load': message}))
            except geventwebsocket.WebSocketError:
                logging.info('websocket closed. could not send: '+signal +
                             ' -- '+str(message))

        analysis_instance = self.instantiate_analysis_class()
        logging.debug("analysis instantiated")
        analysis_instance.set_emit_fn(emit)
        greenlets = []
        greenlets.append(gevent.Greenlet.spawn(
            analysis_instance.on_connect
        ))

        def process_message(message):
            if message is None:
                logging.debug('empty message received.')
                return

            message_data = json.loads(message)
            analysis_instance.onall(message_data)
            if 'signal' not in message_data or 'load' not in message_data:
                logging.info('message not processed: '+message)
                return

            fn_name = 'on_'+message_data['signal']
            if not hasattr(self.analysis_class, fn_name):
                logging.warning('frontend wants to call '+fn_name +
                                ' which is not in the Analysis class.')
                return

            logging.debug('calling '+fn_name)
            # every 'on_' is processed in a separate greenlet
            greenlets.append(gevent.Greenlet.spawn(
                Meta.run_action, analysis_instance,
                fn_name, message_data['load']
            ))

        while True:
            try:
                message = ws.receive()
                logging.debug('received message: '+str(message))
                process_message(message)
            except geventwebsocket.WebSocketError:
                break

        # disconnected
        logging.debug("disconnecting analysis instance")
        gevent.killall(greenlets)
        analysis_instance.on_disconnect()
Exemple #31
0
def main():
    greenlets = [
        gevent.spawn(consumer),
        gevent.spawn(producer),
    ]

    #gevent.signal(signal.SIGQUIT, gevent.kill)

    try:
        gevent.joinall(greenlets)
    except KeyboardInterrupt:
        print("Exiting...")
        gevent.killall(greenlets)
Exemple #32
0
    def _run(self):
        self.services_not_available.wait()
        logger.info('Start \'{}\' worker'.format(type(self).__name__),
                    extra=journal_context({'MESSAGE_ID': BRIDGE_START}, {}))
        self.immortal_jobs = self._start_jobs()

        try:
            while not self.exit:
                gevent.sleep(self.delay)
                self.check_and_revive_jobs()
        except Exception as e:
            logger.error(e)
            gevent.killall(self.immortal_jobs.values(), timeout=5)
Exemple #33
0
 def shutdown(self):
     if not self.has_shutdown:
         # shutdown all greenlets and clean up
         gevent.killall(self.greenlets)
         self.qh.shutdown()
         try:
             self.gm.shutdown()
         except KeyboardInterrupt:
             pass
         self.dm.shutdown()
         gevent.shutdown()
         self.has_shutdown = True
         logger.info('Server exit nicely.')
Exemple #34
0
 def run(self):
     try:
         self.Logger.info('Start working at {}'.format(datetime.now()))
         jobs = [
             self.client.get_tenders_forward(self.to_get_queue),
             self.client.get_tenders_backward(self.to_get_queue),
             self.get_tender(),
             self.save_docs()
         ]
         gevent.joinall(jobs)
         self.Logger.info('Finish work at {}'.format(datetime.now()))
     except KeyboardInterrupt:
         gevent.killall(jobs)
Exemple #35
0
 def watcher(request):
     """Watch if any of the greenlets for a request have died. If so, kill the
     request and the socket.
     """
     # TODO: add that if any of the request.jobs die, kill them all and exit
     io = request.environ['socketio']
     gevent.sleep(5.0)
     while True:
         gevent.sleep(1.0)
         if not io.connected():
             # TODO: Warning, what about the on_disconnect callbacks ?
             gevent.killall(request.jobs)
             return
Exemple #36
0
    def release_for_job(self, job_id):
        """
        Release resources associated with the specified job.

        :param job_id: the job's ID.
        """

        _logger.debug("Releasing tasks for job: '%s'", job_id)
        s = self._tasks_per_job.get(job_id)
        if s is None:
            return

        gevent.killall(list(s))
Exemple #37
0
    def proxy_checker(self):
        ''' Concurrency stuff here '''
        jobs = [gevent.spawn(self.proxy_checker_resp, proxy) for proxy in self.proxy_list]
        try:
            while 1:
                gevent.sleep(1)
                if len(self.final_proxies) >= self.show_num:
                    gevent.killall(jobs)
                    break
        except KeyboardInterrupt:
            sys.exit('[-] Ctrl-C caught, exiting')

        return self.final_proxies[:self.show_num]
 def cleanup():
     site.storage.deleteFiles()
     site.content_manager.contents.db.deleteSite(site)
     del SiteManager.site_manager.sites["1TeSTvb4w2PWE81S2rEELgmX2GCCExQGT"]
     site.content_manager.contents.db.close()
     SiteManager.site_manager.sites.clear()
     db_path = "%s/content.db" % config.data_dir
     os.unlink(db_path)
     del ContentDb.content_dbs[db_path]
     gevent.killall([
         obj for obj in gc.get_objects()
         if isinstance(obj, gevent.Greenlet) and obj not in threads_before
     ])
Exemple #39
0
def wait_for_vm_pool(gl_list, timeout=240):
    print('Waiting for all ({}) greenlets to complete (timeout: {} secs)'.format(len(gl_list), timeout))
    t1 = dt.datetime.now()
    while (dt.datetime.now()-t1).total_seconds() < timeout:
        alive_glts = [x for x in gl_list if not x.ready()]
        if alive_glts:
            gevent.sleep(0.5)
            continue
        break
    else:
        print('Killing all greenlets as waiting timeout ({}) expired'.format(timeout))
        gevent.killall(gl_list)
        print('Killed them all')
Exemple #40
0
    def _watcher(self):
        """Watch out if we've been disconnected, in that case, kill
        all the jobs.

        """
        while True:
            gevent.sleep(1.0)
            if not self.connected:
                for ns_name, ns in list(self.active_ns.items()):
                    ns.recv_disconnect()
                # Killing Socket-level jobs
                gevent.killall(self.jobs)
                break
Exemple #41
0
 def stop(self):
     """Close all AMQP connections and channels, stop greenlets"""
     if self.stopped:
         log.warning("%s: Already stopped, can't stop again.", self.lbl)
         return
     self.consumer.should_stop = True
     gevent.sleep(1)
     if self.greenlets:
         log.debug("%s: Stopping all greenlets %s.",
                   self.lbl, tuple(self.greenlets.keys()))
         gevent.killall(list(self.greenlets.values()))
         gevent.joinall(list(self.greenlets.values()))
         self.greenlets.clear()
     self.stopped = True
def test_fast_wait():
    """Annoy someone who causes fast-sleep test patching to regress.

    Someone could break the test-only monkey-patching of gevent.sleep
    without noticing and costing quite a bit of aggravation aggregated
    over time waiting in tests, added bit by bit.

    To avoid that, add this incredibly huge/annoying delay that can
    only be avoided by monkey-patch to catch the regression.
    """
    gevent.sleep(300)
    g = gevent.spawn(nonterm_greenlet)
    gevent.joinall([g], timeout=300)
    gevent.killall([g], timeout=300)
Exemple #43
0
 def run(self):
     #Fire off some greenlits to handing reading and writing
     try:
         print("Starting Read/Write Loops")
         tasks = [
             gevent.spawn(raise_exceptions(self._read)),
             gevent.spawn(raise_exceptions(self._write))
         ]
         #Wait for a socket exception and raise the flag
         select.select([], [], [self._socket])  # Yield
         raise self.SocketError('Socket Exception')
     finally:  # Make sure we kill the tasks
         print("Killing read and write loops")
         gevent.killall(tasks)
Exemple #44
0
	def stop(self, graceful=True):
		if self.has_stopped:
			return
			# raise AlreadyStopError()

		self.is_running = False
		#唤醒所有等待的worker
		self.pending_tasks_event.set()
		#TODO 确保关闭

		if not graceful:
			gevent.killall(self.__executors)

		self.has_stopped = True
Exemple #45
0
def key_ctrl_c(sig, frame):
    print(f'Please wait for last case finished ...(three times to break)')
    global key_times, ctrl_c
    ctrl_c = True
    key_times += 1
    if key_times == 3:
        try:
            gevent.killall(threads)
        except:
            pass
        finally:
            print('[**] Break by user. Please clear resource manually.')
            ResourceDB.dump()
            ResourceDB.flush()
Exemple #46
0
def sort_servers_closest(
    servers: Sequence[str],
    max_timeout: float = 3.0,
    samples_per_server: int = 3,
    sample_delay: float = 0.125,
) -> Dict[str, float]:
    """Sorts a list of servers by http round-trip time

    Params:
        servers: sequence of http server urls
    Returns:
        sequence of pairs of url,rtt in seconds, sorted by rtt, excluding failed and excessively
        slow servers (possibly empty)

    The default timeout was chosen after measuring the long tail of the development matrix servers.
    Under no stress, servers will have a very long tail of up to 2.5 seconds (measured 15/01/2020),
    which can lead to failure during startup if the timeout is too low.
    This increases the timeout so that the network hiccups won't cause Raiden startup failures.
    """
    if not {urlparse(url).scheme
            for url in servers}.issubset({"http", "https"}):
        raise TransportError("Invalid server urls")

    rtt_greenlets = set(
        spawn_named(
            "get_average_http_response_time",
            get_average_http_response_time,
            url=server_url,
            samples=samples_per_server,
            sample_delay=sample_delay,
        ) for server_url in servers)

    total_timeout = samples_per_server * (max_timeout + sample_delay)

    results = []
    for greenlet in gevent.iwait(rtt_greenlets, timeout=total_timeout):
        result = greenlet.get()
        if result is not None:
            results.append(result)

    gevent.killall(rtt_greenlets)

    if not results:
        raise TransportError(
            f"No Matrix server available with good latency, requests takes more "
            f"than {max_timeout} seconds.")

    server_url_to_rtt = dict(sorted(results, key=itemgetter(1)))
    log.debug("Available Matrix homeservers", servers=server_url_to_rtt)
    return server_url_to_rtt
Exemple #47
0
def test_get_paths(
    api_sut: ServiceApi,
    api_url: str,
    addresses: List[Address],
    token_network_model: TokenNetwork,
):
    url = api_url + f'/{token_network_model.address}/paths'

    data = {
        'from': addresses[0],
        'to': addresses[2],
        'value': 10,
        'max_paths': DEFAULT_MAX_PATHS,
    }
    response = requests.post(url, json=data)
    assert response.status_code == 200
    paths = response.json()['result']
    assert len(paths) == 1
    assert paths == [
        {
            'path': [addresses[0], addresses[1], addresses[2]],
            'estimated_fee': 0,
        },
    ]

    # check default value for num_path
    data = {
        'from': addresses[0],
        'to': addresses[2],
        'value': 10,
    }
    default_response = requests.post(url, json=data)
    assert default_response.json()['result'] == response.json()['result']

    # there is no connection between 0 and 5, this should return an error
    data = {
        'from': addresses[0],
        'to': addresses[5],
        'value': 10,
        'max_paths': 3,
    }
    response = requests.post(url, json=data)
    assert response.status_code == 400
    assert response.json()['errors'].startswith(
        'No suitable path found for transfer from')

    # killen aller greenlets
    gevent.killall(
        [obj
         for obj in gc.get_objects() if isinstance(obj, gevent.Greenlet)], )
Exemple #48
0
 def shutdown(self):
     log.debug('Shutdown initiated')
     self.zerorpc.stop()
     self.rotkehlchen.shutdown()
     log.debug('Waiting for greenlets')
     gevent.wait(self.waited_greenlets)
     log.debug('Waited for greenlets. Killing all other greenlets')
     gevent.killall(self.killable_greenlets)
     log.debug('Greenlets killed. Killing zerorpc greenlet')
     self.zerorpc_greenlet.kill()
     log.debug('Killed zerorpc greenlet')
     log.debug('Shutdown completed')
     logging.shutdown()
     self.stop_event.set()
    def test_killall_iterable_argument_timeout_not_started(self):
        def f():
            try:
                gevent.sleep(1.5)
            except: # pylint:disable=bare-except
                gevent.sleep(1)
        p1 = GreenletSubclass.spawn(f)
        p2 = GreenletSubclass.spawn(f)
        s = set()
        s.add(p1)
        s.add(p2)
        gevent.killall(s, timeout=0.5)

        for g in s:
            self.assertTrue(g.dead, g)
Exemple #50
0
 def call_disconnect_event(self):
     disconnect_jobs = []
     for event, vals in self.events['disconnect'].items():
         for function, adapterClass in vals:
             name = function.__module__.split('.')[2]
             session = OpenERPSession(self, adapterClass=adapterClass)
             module = session.model('ir.module.module')
             if module.search([('state', '=', 'installed'),
                               ('name', '=', name)]):
                 disconnect_jobs.append(self.spawn(function, session))
                 logger.info("Disconnected events %r: %r", event,
                             function.__name__)
     joinall(disconnect_jobs)
     super(LongPollingNameSpace, self).recv_disconnect()
     killall(self.jobs)
Exemple #51
0
    def join(self):
        """Wait for transfer to exit, raising errors as necessary."""
        self.closed = True

        while self.expect > 0:
            val = self.wait_change.get()
            self.expect -= 1

            if val is not None:
                # Wait a while for all running greenlets to exit, and
                # then attempt to force them to exit so join()
                # terminates in a reasonable amount of time.
                gevent.joinall(list(self.greenlets), timeout=30)
                gevent.killall(list(self.greenlets), block=True, timeout=30)
                raise val
Exemple #52
0
    def _watcher(self):
        """Watch if any of the greenlets for a request have died. If so, kill
        the request and the socket.
        """
        # TODO: add that if any of the request.jobs die, kill them all and exit
        gevent.sleep(5.0)

        while True:
            gevent.sleep(1.0)

            if not self.connected:
                # Killing Socket-level jobs
                gevent.killall(self.jobs)
                for ns_name, ns in list(self.active_ns.iteritems()):
                    ns.disconnect(silent=True)
    def _receive(self, event):
        if event.op_code == 10:
            self.print('hello received')
            self.heartbeat_interval = event.payload['heartbeat_interval'] / 1000
            if self._ping_greenlet: gevent.killall([self._ping_greenlet])
            self._ping_greenlet = gevent.spawn(self.ping)
            self.print(f'ping began with {self.heartbeat_interval} interval')

        elif event.op_code == 1:
            self.print('ping required')
            self.ws.send(11, None)

        elif event.op_code == 11:
            self.print('ping ack received')
            gevent.kill(self._ack_timeout)
    def test_killall_iterable_argument_timeout(self):
        def f():
            try:
                gevent.sleep(1.5)
            except: # pylint:disable=bare-except
                gevent.sleep(1)
        p1 = GreenletSubclass.spawn(f)
        p2 = GreenletSubclass.spawn(f)
        s = set()
        s.add(p1)
        s.add(p2)
        with self.assertRaises(Timeout):
            gevent.killall(s, timeout=0.5)

        for g in s:
            self.assertFalse(g.dead, g)
Exemple #55
0
    def _unload(self):
        for callback in self._unload_callbacks:
            self._call_function(callback)

        self._regex_listeners[:] = []
        self._loaded_callbacks[:] = []
        self._message_listeners[:] = []
        self._commands.clear()
        self._save_data(close=True)
        self._clean_g()
        self._app = None
        self.client = None
        self.opts = None

        gevent.killall(list(self._running_greenlets), block=False)
        self._running_greenlets.clear()
Exemple #56
0
def test_reply_reconcile(db, config, message, sync_client):
    from inbox.server.models.tables.base import Message, SpoolMessage
    from inbox.server.models.tables.imap import ImapAccount
    from inbox.server.sendmail.base import reply, recipients

    to, subject, body = message
    attachment = None
    cc = '*****@*****.**'
    bcc = None

    account = db.session.query(ImapAccount).get(ACCOUNT_ID)

    # Create email message, store a local copy + send it:
    reply(NAMESPACE_ID, account, THREAD_ID, recipients(to, cc, bcc), subject,
          body, attachment)

    # Sync to verify reconciliation:
    synclet = Greenlet(sync_client.start_sync, ACCOUNT_ID)
    synclet.start()

    print '\nSyncing...'
    Greenlet.join(synclet, timeout=60)

    sync_client.stop_sync(ACCOUNT_ID)

    spool_messages = db.session.query(SpoolMessage).\
        filter_by(subject=THREAD_TOPIC).all()
    assert len(spool_messages) == 1, 'spool message missing'

    resolved_message_id = spool_messages[0].resolved_message_id
    assert resolved_message_id, 'spool message not reconciled'

    inbox_uid = spool_messages[0].inbox_uid
    thread_id = spool_messages[0].thread_id
    g_thrid = spool_messages[0].g_thrid

    killall(synclet)

    reconciled_message = db.session.query(Message).get(resolved_message_id)
    assert reconciled_message.inbox_uid == inbox_uid,\
        'spool message, reconciled message have different inbox_uids'

    assert reconciled_message.thread_id == thread_id,\
        'spool message, reconciled message have different thread_ids'

    assert reconciled_message.g_thrid == g_thrid,\
        'spool message, reconciled message have different g_thrids'
Exemple #57
0
    def tearDownClass(cls):
        test_common.kill_svc_monitor(cls._svc_mon_greenlet)
        test_common.kill_schema_transformer(cls._st_greenlet)
        cls.kill_kube_manager()
        super(KMTestCase, cls).tearDownClass()

        # Kill all remaining greenlets except for the main one
        gevent.killall(
            x for x in gc.get_objects()
            if isinstance(x, gevent.Greenlet) and x != greenlet.getcurrent())

        exceptions = test_common.ErrorInterceptingLogger.get_exceptions()
        if exceptions:
            raise AssertionError(
                "Tracebacks found in logs (count={}):\n\n{}".format(
                    len(exceptions),
                    "\n\n".join(msg for msg, _, __ in exceptions)))
Exemple #58
0
 def handle(self):
     """Actual FTP service to which the user has connected."""
     while not self.disconnect_client:
         try:
             # These greenlets would be running forever. During the connection.
             # first two are for duplex command channel. Final one is for storing files on file-system.
             self.ftp_greenlets = [
                 gevent.spawn(self.handle_cmd_channel),
                 gevent.spawn(self.process_ftp_command),
                 gevent.spawn(self.handle_data_channel),
             ]
             gevent.joinall(self.ftp_greenlets)
             # Block till all jobs are not finished
         except KeyboardInterrupt:
             logger.info("Shutting FTP server.")
         finally:
             gevent.killall(self.ftp_greenlets)
Exemple #59
0
 def sync(self):
     sync_lock = self.redis_handler.get_sync_lock()
     if sync_lock:
         return
     self.redis_handler.set_sync_lock(timeout=self.sync_timeout)
     self.index_keys = []
     thread_list = self.sync_ec2()
     print 'Sync Started... . . .  .  .   .     .     .'
     gevent.joinall(thread_list, timeout=self.sync_timeout)
     gevent.killall(thread_list)
     print 'Details saved. Indexing records!'
     self.index_records()
     self.redis_handler.set_last_sync_time()
     print 'Starting cleanup of stale records...'
     self.redis_handler.cleanup_keys(self.index_keys)
     self.redis_handler.set_sync_lock(timeout=0)
     print 'Complete'
Exemple #60
0
def main():
    global log
    StderrHandler().push_application()
    log = Logger("xbbs.coordinator")

    XBBS_CFG_DIR = os.getenv("XBBS_CFG_DIR", "/etc/xbbs")
    with open(path.join(XBBS_CFG_DIR, "coordinator.toml"), "r") as fcfg:
        cfg = CONFIG_VALIDATOR.validate(toml.load(fcfg))

    inst = Xbbs.create(cfg)

    for name, elem in cfg["projects"].items():
        project = Project(name,
                          **elem,
                          base=path.join(inst.project_base, name))
        inst.projects[name] = project
        os.makedirs(project.base, exist_ok=True)
        log.debug("got project {}", inst.projects[name])

    with inst.zmq.socket(zmq.REP) as sock_cmd, \
         inst.zmq.socket(zmq.PULL) as inst.intake, \
         inst.zmq.socket(zmq.ROUTER) as inst.worker_endpoint:
        # XXX: potentially make perms overridable? is that useful in any
        #      capacity?
        inst.intake.bind(cfg["intake"]["bind"])
        _ipc_chmod(cfg["intake"]["bind"], 0o664)

        inst.worker_endpoint.bind(cfg["worker_endpoint"])
        inst.worker_endpoint.set(zmq.ROUTER_MANDATORY, 1)
        _ipc_chmod(cfg["worker_endpoint"], 0o664)

        sock_cmd.bind(cfg["command_endpoint"]["bind"])
        _ipc_chmod(cfg["command_endpoint"]["bind"], 0o664)

        dumper = gevent.signal_handler(signal.SIGUSR1, dump_projects, inst)
        log.info("startup")
        intake = gevent.spawn(intake_loop, inst)
        job_pull = gevent.spawn(job_pull_loop, inst)
        try:
            command_loop(inst, sock_cmd)
        finally:
            # XXX: This may not be the greatest way to handle this
            gevent.killall(inst.project_greenlets[:])
            gevent.kill(intake)
            gevent.kill(job_pull)
            dumper.cancel()