Exemple #1
0
 def stop(self):
     log.info("Stopping nodes")
     stop_group = Group()
     for runner in self._node_runners:
         stop_group.spawn(runner.stop)
     stop_group.join(raise_error=True)
     log.info("Nodes stopped")
class WSGIServer(_WSGIServer):
    handler_class = WebSocketWSGIHandler

    def __init__(self, *args, **kwargs):
        """
        WSGI server that simply tracks websockets
        and send them a proper closing handshake
        when the server terminates.

        Other than that, the server is the same
        as its :class:`gevent.pywsgi.WSGIServer`
        base.
        """
        _WSGIServer.__init__(self, *args, **kwargs)
        self._websockets = Group()

    def link_websocket_to_server(self, websocket):
        logger.info("Managing websocket %s" % format_addresses(websocket))
        self._websockets.spawn(websocket.run)

    def stop(self, *args, **kwargs):
        logger.info("Terminating server and all connected websockets")
        for greenlet in self._websockets:
            try:
                websocket = greenlet._run.im_self
                if websocket:
                    websocket.close(1001, 'Server is shutting down')
            except:
                pass
        _WSGIServer.stop(self, *args, **kwargs)
Exemple #3
0
 def run(self):
     import gevent.queue
     from gevent.pool import Group
     gq = gevent.queue.Queue()
     group = Group()
     group.spawn(transfer_queue, self.q, gq)
     run_from_queue(group, gq)
Exemple #4
0
class SlaveRunner(Runner):
    def __init__(self, host, port):
        super().__init__(host, port)
        self.slave_id = socket.gethostname()
        self.client = rpc.Client(self.host, self.port)
        self.greenlet = Group()
        self.greenlet.spawn(self.work)
        self.client.send(rpc.Message('slave_ready', None, self.slave_id))

    def stats_reporter(self):
        while True:
            logger.info("starting....." + str(time.time()))
            # work = Worker(self.client,self.greenlet)
            time.sleep(SLAVE_REPORT_INTERVAL)
            self.client.send(
                Message('slave_complete', None, socket.gethostname()))

    def work(self):
        while True:
            print(">>>>>>>>>><<<<<<<<")
            msg = self.client.recv()
            logger.info(msg.type)
            if msg.type == 'slave_start':
                pass
            elif msg.type == 'slave_stop':
                # self.stop()
                self.client.send(Message("slave_stop", None, self.slave_id))
                self.client.send(Message("slave_ready", None, self.slave_id))
            elif msg.type == 'slave_quit':
                logger.info("Got quit message from master, shutting down...")
                self.stop()
                self.greenlet.kill()
Exemple #5
0
def pipeline(stages, initial_data):
    monitors = Group()
    # Make sure items in initial_data are iterable.
    if not isinstance(initial_data, types.GeneratorType):
        try:
            iter(initial_data)
        except:
            raise TypeError('initial_data must be iterable')
    # The StopIteration will bubble through the queues as it is reached.
    #   Once a stage monitor sees it, it indicates that the stage will read
    #   no more data and the monitor can wait for the current work to complete
    #   and clean up.
    if hasattr(initial_data, 'append'):
        initial_data.append(StopIteration)
    if not stages:
        return PipelineResult(monitors, [])
    # chain stage queue io
    #  Each stage shares an output queue with the next stage's input.
    qs = [initial_data] + [Queue() for _ in range(len(stages))]
    for stage, in_q, out_q in zip(stages, qs[:-1], qs[1:]):
        stage.in_q = in_q
        stage.out_q = out_q
        monitors.spawn(stage_monitor, stage)
    gevent.sleep(0)
    return PipelineResult(monitors, stages[-1].out_q)
Exemple #6
0
def test_quiet_group():
    from gevent.pool import Group
    group = Group()
    group.spawn(divide_by_zero)
    group.spawn(divide_by_zero)
    with raises_from(ZeroDivisionError, 'divide_by_zero'):
        group.join(raise_error=True)
class WSGIServer(_WSGIServer):
    handler_class = WebSocketWSGIHandler

    def __init__(self, *args, **kwargs):
        """
        WSGI server that simply tracks websockets
        and send them a proper closing handshake
        when the server terminates.

        Other than that, the server is the same
        as its :class:`gevent.pywsgi.WSGIServer`
        base.
        """
        _WSGIServer.__init__(self, *args, **kwargs)
        self._websockets = Group()

    def link_websocket_to_server(self, websocket):
        logger.info("Managing websocket %s" % format_addresses(websocket))
        self._websockets.spawn(websocket.run)

    def stop(self, *args, **kwargs):
        logger.info("Terminating server and all connected websockets")
        for greenlet in self._websockets:
            try:
                websocket = greenlet._run.im_self
                if websocket:
                    websocket.close(1001, 'Server is shutting down')
            except:
                pass
        _WSGIServer.stop(self, *args, **kwargs)
Exemple #8
0
class MbtClient(aWebSocketClient):
    def __init__(self, *args):
        super().__init__(*args)
        self.callback_dict = {}
        self.greenlets = Group()
        self.greenlets.spawn(self.onMessage)
    
    def onMessage(self):
        while True:
            try:
                #收到的data类型为ws4py.messaging.TextMessage
                data = self.receive()
                #data.data是bytes类型,先转化为str
                parsed_data = json.loads(data.data.decode())
                cmd = parsed_data["head"]["cmd"]
                cmd_name = CommandMapping(cmd)
                body_dict = json.loads(parsed_data["body"])
                parsed_data["body"] = body_dict
                self.callback_dict[cmd_name] = DotDict(parsed_data)
                logger.info("Receive data from {}:{}".format(cmd_name, parsed_data))
            except ValueError:
                logger.warn("Receive unknown commandID: {}".format(cmd))
            except TypeError:
                logger.error("Receive invalid data that is not JSON serializable:{}".format(data.data))
    
    def closed(self, code, reason):
        self.greenlets.kill()
        logger.debug("Socket was closed:code-{};reason-{}".format(code, reason))
def test_parallel_folder_syncs(db, folder_name_mapping, default_account,
                               monkeypatch):
    # test that when we run save_folder_names in parallel, we only create one
    # tag for that folder. this happens when the CondstoreFolderSyncEngine
    # checks for UID changes.

    # patching the heartbeat clear means that we force the first greenlet to
    # wait around (there is a deleted folder in folder_name_mapping), thereby
    # assuring that the second greenlet will overtake it and force any
    # potential race condition around tag creation.
    def clear_heartbeat_patch(w, x, y, z):
        gevent.sleep(1)

    monkeypatch.setattr('inbox.heartbeat.store.HeartbeatStore.remove_folders',
                        clear_heartbeat_patch)

    log = get_logger()
    group = Group()
    with mailsync_session_scope() as db_session:
        group.spawn(save_folder_names, log, default_account.id,
                    folder_name_mapping, db_session)
    with mailsync_session_scope() as db_session:
        group.spawn(save_folder_names, log, default_account.id,
                    folder_name_mapping, db_session)
    group.join()

    with mailsync_session_scope() as db_session:
        account = db_session.query(Account).get(default_account.id)
        random_tags = db_session.query(Tag).filter_by(
            namespace_id=account.namespace.id, name='random')
        assert random_tags.count() == 1
def test_parallel_folder_syncs(db, folder_name_mapping, monkeypatch):
    # test that when we run save_folder_names in parallel, we only create one
    # tag for that folder. this happens when the CondstoreFolderSyncEngine
    # checks for UID changes.

    # patching the heartbeat clear means that we force the first greenlet to
    # wait around (there is a deleted folder in folder_name_mapping), thereby
    # assuring that the second greenlet will overtake it and force any
    # potential race condition around tag creation.
    def clear_heartbeat_patch(w, x, y, z):
        gevent.sleep(1)

    monkeypatch.setattr('inbox.heartbeat.store.HeartbeatStore.remove_folders',
                        clear_heartbeat_patch)

    log = get_logger()
    group = Group()
    with mailsync_session_scope() as db_session:
        group.spawn(save_folder_names, log, ACCOUNT_ID,
                    folder_name_mapping, db_session)
    with mailsync_session_scope() as db_session:
        group.spawn(save_folder_names, log, ACCOUNT_ID,
                    folder_name_mapping, db_session)
    group.join()

    with mailsync_session_scope() as db_session:
        account = db_session.query(Account).get(ACCOUNT_ID)
        random_tags = db_session.query(Tag).filter_by(
            namespace_id=account.namespace.id,
            name='random')
        assert random_tags.count() == 1
Exemple #11
0
class PbClient:
    def __init__(self, ip, port):
        self.addr = (ip, int(port)) 
        self.callback_dict = {}
        self.greenlets = Group()
        self.soc = socket()
        self.interval = 1/200
        self.received_data = b''

    def connect(self):
        self.soc.connect(self.addr)
        self.greenlets.spawn(self.on_message)
    
    def send(self, data):
        self.soc.send(data)

    def close(self):
        try:
            self.greenlets.kill()
            self.soc.close()
        except:
            pass
        finally:
            self.soc = None

    def on_message(self):
        try:
            while self.soc:
                temp = self.soc.recv(2048)
                # gevent.sleep(self.interval)
                if temp:
                    self.received_data += temp
                    self.handle_message()
        except EnvironmentError:
            pass
        except:
            raise

    def handle_message(self):
        try:
            if len(self.received_data) < HEAD_SIZE:
                return
            data = self.get_pack_stream()
            data_dict = DotDict(DataStruct.parse(data))
            cmd_name = CommandMapping(data_dict['CommandID'])
            self.callback_dict[cmd_name] = data_dict
            logger.info("Receive data from {}:{}".format(cmd_name, data_dict))
            self.handle_message()
        except ValueError:
            logger.warn("Receive unknown commandID: {}".format(cmd_name))
        except TypeError as e:
            logger.error("Receive invalid data:", exc_info=True)

    def get_pack_stream(self):
        '''获取单个完整包的数据流'''
        size_stream = self.received_data[1:4]
        pack_size = pack_size_struct.parse(size_stream)
        pack_stream = self.received_data[:pack_size]
        self.received_data = self.received_data[pack_size:]
        return pack_stream
Exemple #12
0
def _search(responder, id, search, max_results):
    todo = dict()
    group = Group()
    for ctx in search.contexts:
        if len(ctx.results) > max_results/len(search.contexts):
            continue
        todo[ctx.name] = 0
        group.spawn(_run, responder, search, ctx, todo)
    group.join()

    for ctx in search.contexts[:]:
        if not ctx.results:
            search.remove_ctx(ctx)

    results = list()
    while search.contexts and (max_results is None or len(results) < max_results):
        ctx = search.next_result_ctx()
        if not ctx.results:
            break
        results.append(ctx.results.pop(0))

    display = search.display

    for ctx in search.contexts[:]:
        if ctx.next is None:
            search.remove_ctx(ctx)

    if search.more:
        cache[search.id] = search
    elif search.id in cache:
        del cache[search.id]

    return dict(id=id, search_id=search.id, display=display, more=search.more, results=results)
Exemple #13
0
def patch_all(timeout=180, external_loaded=True, source_complete_callback=None):
    with patch_all_lock:
        # check config urls
        log.debug('checking config urls')
        todo = list()
        for source in sources.values():
            config_url = source.get_config_url()
            if config_url is not None and config_url not in todo:
                todo.append(config_url)

        group = Group()
        for config_url in todo:
            g = group.spawn(config_url.update)
            patch_group.add(g)
        group.join()

        log.debug('updating repos')
        # check for updates
        patches = list()
        for source in sources.values():
            if source.enabled:
                def _patch(patches, source, timeout):
                    try:
                        patch_one(patches, source, timeout)
                    finally:
                        if source_complete_callback is not None:
                            source_complete_callback(source)
                g = group.spawn(_patch, patches, source, timeout)
                patch_group.add(g)
        group.join()
        finalize_patches(patches, external_loaded=external_loaded)
Exemple #14
0
class MasterRunner(Runner):
    def __init__(self, host, port):
        super().__init__(host, port)
        self.server = rpc.Server(self.host, self.port)
        self.slave = {}
        self.greenlet = Group()
        self.greenlet.spawn(self.slave_listener)
        # self.slave_listener()

    def slave_listener(self):
        while True:
            msg = self.server.recv()
            if msg.type == 'slave_ready':
                id = msg.node_id
                self.slave[id] = id
                logger.info(
                    "Client %r reported as ready. Currently %i slaves ready to emit."
                    % (id, len(self.slave)))
                self.server.send(Message('slave_stop', None, None))
            elif msg.type == 'slave_stats':
                logger.info(msg.data)
            elif msg.type == 'slave_complete':
                logger.info("=====================================")
                self.server.send(Message('slave_stop', None, None))
            elif msg.type == 'slave_start':
                pass
            elif msg.type == 'slave_stop':
                del self.slave[msg.node_id]
                logger.info("Removing %s from running slaves" % (msg.node_id))
                if len(self.slave) == 0:
                    self.state = 'stopped'
Exemple #15
0
class SlaveLocustRunner(DistributedLocustRunner):
    def __init__(self, *args, **kwargs):
        super(SlaveLocustRunner, self).__init__(*args, **kwargs)
        self.client_id = socket.gethostname() + "_" + md5(str(time() + random.randint(0,10000))).hexdigest()
        
        self.client = rpc.Client(self.master_host)
        self.greenlet = Group()
        self.greenlet.spawn(self.worker).link_exception()
        self.client.send(Message("client_ready", None, self.client_id))
        self.greenlet.spawn(self.stats_reporter).link_exception()
        
        # register listener for when all locust users have hatched, and report it to the master node
        def on_hatch_complete(count):
            self.client.send(Message("hatch_complete", {"count":count}, self.client_id))
        events.hatch_complete += on_hatch_complete
        
        # register listener that adds the current number of spawned locusts to the report that is sent to the master node 
        def on_report_to_master(client_id, data):
            data["user_count"] = self.user_count
        events.report_to_master += on_report_to_master
        
        # register listener that sends quit message to master
        def on_quitting():
            self.client.send(Message("quit", None, self.client_id))
        events.quitting += on_quitting

        # register listener thats sends locust exceptions to master
        def on_locust_error(locust, e, tb):
            formatted_tb = "".join(traceback.format_tb(tb))
            self.client.send(Message("exception", {"msg" : str(e), "traceback" : formatted_tb}, self.client_id))
        events.locust_error += on_locust_error

    def worker(self):
        while True:
            msg = self.client.recv()
            if msg.type == "hatch":
                self.client.send(Message("hatching", None, self.client_id))
                job = msg.data
                self.hatch_rate = job["hatch_rate"]
                #self.num_clients = job["num_clients"]
                self.num_requests = job["num_requests"]
                self.host = job["host"]
                self.hatching_greenlet = gevent.spawn(lambda: self.start_hatching(locust_count=job["num_clients"], hatch_rate=job["hatch_rate"]))
            elif msg.type == "stop":
                self.stop()
                self.client.send(Message("client_stopped", None, self.client_id))
                self.client.send(Message("client_ready", None, self.client_id))

    def stats_reporter(self):
        while True:
            data = {}
            events.report_to_master.fire(self.client_id, data)
            try:
                self.client.send(Message("stats", data, self.client_id))
            except:
                logger.error("Connection lost to master server. Aborting...")
                break
            
            gevent.sleep(SLAVE_REPORT_INTERVAL)
Exemple #16
0
class SlaveLocustRunner(DistributedLocustRunner):
    def __init__(self, *args, **kwargs):
        super(SlaveLocustRunner, self).__init__(*args, **kwargs)
        self.client_id = socket.gethostname() + "_" + md5(str(time() + random.randint(0,10000))).hexdigest()
        
        self.client = zmqrpc.Client(self.master_host)
        self.greenlet = Group()
        self.greenlet.spawn(self.worker).link_exception()
        self.client.send({"type":"client_ready", "data":self.client_id})
        self.greenlet.spawn(self.stats_reporter).link_exception()
        
        # register listener for when all locust users have hatched, and report it to the master node
        def on_hatch_complete(count):
            self.client.send({"type":"hatch_complete", "data":{"client_id":self.client_id, "count":count}})
        events.hatch_complete += on_hatch_complete
        
        # register listener that adds the current number of spawned locusts to the report that is sent to the master node 
        def on_report_to_master(client_id, data):
            data["user_count"] = self.user_count
        events.report_to_master += on_report_to_master
        
        # register listener that sends quit message to master
        def on_quitting():
            self.client.send({"type":"quit", "data":self.client_id})
        events.quitting += on_quitting
    
    def worker(self):
        while True:
            msg = self.client.recv()
            if msg["type"] == "hatch":
                self.client.send({"type":"hatching", "data":self.client_id})
                job = msg["data"]
                self.hatch_rate = job["hatch_rate"]
                #self.num_clients = job["num_clients"]
                self.num_requests = job["num_requests"]
                self.host = job["host"]
                self.hatching_greenlet = gevent.spawn(lambda: self.start_hatching(locust_count=job["num_clients"], hatch_rate=job["hatch_rate"]))
            elif msg["type"] == "stop":
                self.stop()
                self.client.send({"type":"client_stopped", "data":self.client_id})
                self.client.send({"type":"client_ready", "data":self.client_id})


    def stats_reporter(self):
        while True:
            data = {}
            events.report_to_master.fire(self.client_id, data)
            report = {
                "client_id": self.client_id,
                "data": data,
            }
            try:
                self.client.send({"type":"stats", "data":report})
            except:
                logger.error("Connection lost to master server. Aborting...")
                break
            
            gevent.sleep(SLAVE_REPORT_INTERVAL)
Exemple #17
0
def spawn_greenlets(conf):
    """Some sugar to wrap up all of your greenlets."""
    group = Group()
    for args in conf:
        group.spawn(*args)
    try:
        while True:
            gevent.sleep(1)
    except KeyboardInterrupt:
        pass
def execute(l):
    from gevent.pool import Group
    letters = string.ascii_lowercase
    key = random.randrange(0, 1000)
    second_key = random.randrange(0, 1000)
    value = ''.join(random.choice(letters) for i in range(10))
    group = Group()
    group.spawn(lambda: l.client.get(f"/set/{key}/{value}"))
    group.spawn(lambda: l.client.get(f"/get/{second_key}"))
    group.join()
Exemple #19
0
def terminate():
    g = Group()
    for torrent in torrents.values():
        g.spawn(torrent.save_resume_data)
    try:
        g.join(timeout=5)
    except:
        pass
    for torrent in torrents.values():
        torrent.remove()
Exemple #20
0
def terminate():
    g = Group()
    for torrent in torrents.values():
        g.spawn(torrent.save_resume_data)
    try:
        g.join(timeout=5)
    except:
        pass
    for torrent in torrents.values():
        torrent.remove()
Exemple #21
0
        def loadConfigSchema(self):
            def config():
                self.client.get(f"{self.dataset}{API}/config", stream=True).close()

            def schema():
                self.client.get(f"{self.dataset}{API}/schema", stream=True).close()

            group = Group()
            group.spawn(config)
            group.spawn(schema)
            group.join()
Exemple #22
0
        def loadObsAnnotations(self):
            def obs_annotation(name):
                self.client.get(
                    f"{self.dataset}{API}/annotations/obs?annotation-name={name}",
                    headers={"Accept": "application/octet-stream"},
                    stream=True,
                ).close()

            obs_names = self.parent.obs_annotation_names()
            group = Group()
            for name in obs_names:
                group.spawn(obs_annotation, name)
            group.join()
Exemple #23
0
def test_kill_processlet_group(proc):
    group = Group()
    group.greenlet_class = lets.Processlet
    group.spawn(raise_when_killed)
    group.spawn(raise_when_killed)
    group.spawn(raise_when_killed)
    group.join(0)
    assert len(proc.children()) == 3
    group.kill()
    assert len(proc.children()) == 0
    for job in group:
        with pytest.raises(Killed):
            job.get()
        assert job.exit_code == 1
Exemple #24
0
def _main():
    user_crawler_group = Group()

    for _ in xrange(GREENLET_COUNT):
        user_crawler_group.spawn(analyze)

    with open('ids.txt') as FILE:
        for line in FILE:
            id = line.strip()
            cursor.execute('SELECT COUNT(1) as total_count FROM tb_xweibo_user_info WHERE uid = %s' % id)
            result = cursor.fetchone()
            if not result['total_count']:
                users_fetch_queue.put(id)
    user_crawler_group.join()
Exemple #25
0
def get_multihoster_account(task, multi_match, file):
    if not account.config.use_useraccounts:
        print "multihoster off, use_useraccounts false"
        return

    group = Group()
    for pool in account.manager.values():
        for acc in pool:
            if acc.multi_account:
                from . import manager
                acc.hoster = manager.find_by_name(acc.name)
                group.spawn(acc.boot)
    group.join()

    accounts = []
    best_weight = 0
    hostname = file.split_url.host
    for pool in account.manager.values():
        for acc in pool:
            if acc._private_account:
                continue
            if not acc.multi_account:
                continue
            if hasattr(acc, 'premium') and not acc.premium:
                continue
            print acc.premium
            if not multi_match(acc, hostname):
                continue
            try:
                weight = acc.weight
            except gevent.GreenletExit:
                print "greenlet exit"
                continue
            if weight > best_weight:
                accounts = []
                best_weight = weight
            bisect.insort(accounts, (acc.get_task_pool(task).full() and 1
                                     or 0, len(accounts), acc))
    if accounts:
        return accounts[0][2]
        """try:
            file.log.info('trying multihoster {}'.format(acc.name))
            acc.hoster.get_download_context(file)
        except gevent.GreenletExit:
            raise
        except BaseException as e:
            log.exception(e)"""
    else:
        print "multi: no accounts found"
Exemple #26
0
class Manager(object):
    def __init__(self, config_addr, keyfile,
            certfile, cacerts, backlog=10):
        if isinstance(config_addr, basestring):
            ip, port = config_addr.split(':')
            config_addr = (ip, int(port))

        self.keyfile = keyfile
        self.certfile = certfile
        self.cacerts = cacerts
        self.config_addr = config_addr
        self.backlog = backlog
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.end_evt = Event()
        self.clients = Group()
        self.redirectors = {}
        self.msg_processors = {
            'redirect': self.m_redirect,
            'list_redirect': self.m_list_redirect,
            'drop_redirect': self.m_drop_redirect,
            'shutdown': self.m_shutdown,
        }

        logging.info('manager initialized')

    def run(self):
        logging.info('manager start to run')
        self.sock.bind(self.config_addr)
        logging.info('manager bind to: %s:%d' % self.config_addr)
        self.sock.listen(self.backlog)
        accept_let = gevent.spawn(self.accept_let)

        self.end_evt.wait()
        logging.info('shutdown evt recved')
        accept_let.kill()
        self.clients.kill()

    def accept_let(self):
        while True:
            sock, addr = self.sock.accept()
            try:
                sock = ssl.wrap_socket(sock, keyfile=self.keyfile,
                    certfile=self.certfile, server_side=True,
                    cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.cacerts)
            except ssl.SSLError, e:
                print e
                continue
            self.clients.spawn(self.client_let, sock, addr)
Exemple #27
0
def test_gevent(num, depth, period, settle):
    global SWITCH_COUNT

    import gevent    
    from gevent.pool import Group
    from gevent.hub import sleep
    horde = Group()
    for x in xrange(num):
        horde.spawn(stack_filler, depth, sleep) 
    gevent.sleep(settle)
    print("settle period over, {:.2f} sw/sec, testing".format(SWITCH_COUNT/(1.0*settle)))
    SWITCH_COUNT=0
    gevent.sleep(period)
    print("testing period over, {:.2f} sw/sec".format(SWITCH_COUNT/(1.0*period)))
    horde.kill()
Exemple #28
0
def get_multihoster_account(task, multi_match, file):
    if not account.config.use_useraccounts:
        print "multihoster off, use_useraccounts false"
        return
    
    group = Group()
    for pool in account.manager.values():
        for acc in pool:
            if acc.multi_account:
                from . import manager
                acc.hoster = manager.find_by_name(acc.name)
                group.spawn(acc.boot)
    group.join()

    accounts = []
    best_weight = 0
    hostname = file.split_url.host
    for pool in account.manager.values():
        for acc in pool:
            if acc._private_account:
                continue
            if not acc.multi_account:
                continue
            if hasattr(acc, 'premium') and not acc.premium:
                continue
            print acc.premium
            if not multi_match(acc, hostname):
                continue
            try:
                weight = acc.weight
            except gevent.GreenletExit:
                print "greenlet exit"
                continue
            if weight > best_weight:
                accounts = []
                best_weight = weight
            bisect.insort(accounts, (acc.get_task_pool(task).full() and 1 or 0, len(accounts), acc))
    if accounts:
        return accounts[0][2]
        """try:
            file.log.info('trying multihoster {}'.format(acc.name))
            acc.hoster.get_download_context(file)
        except gevent.GreenletExit:
            raise
        except BaseException as e:
            log.exception(e)"""
    else:
        print "multi: no accounts found"
def _enqueue_children(resources, seed, parent):
    threads = Group()
    for resource in [r for r in resources if r.parent == parent.name]:
        thread = threads.spawn(_process_resource, resources, seed, resource)
        thread.link_exception(_create_error_handler(resource.collection))

    threads.join()
Exemple #30
0
    def _push_to_target(self, targets):
        """Get a batch of elements from the queue, and push it to the targets.

        This function returns True if it proceeded all the elements in
        the queue, and there isn't anything more to read.
        """
        if self.queue.empty():
            return 0    # nothing

        batch = []
        pushed = 0

        # collecting a batch
        while len(batch) < self.batch_size:
            item = self.queue.get()
            if item == 'END':
                pushed += 1  # the 'END' item
                break
            batch.append(item)

        if len(batch) != 0:
            greenlets = Group()
            for plugin in targets:
                green = greenlets.spawn(self._put_data, plugin, batch)
                green.link_exception(partial(self._error,
                                             exception.InjectError, plugin))
            greenlets.join()
            pushed += len(batch)

        return pushed
Exemple #31
0
    def handle_tcp(self, sock, remote):
        fdset = [sock, remote]
        tasks = Group()
        prev = [0, 0]
        sock_switch = remote_switch = 0
        sock_counter = remote_counter = 0
        sock_count = remote_count = 0
        sock_size = array.array('i', [0])
        remote_size = array.array('i', [0])
        while True:
            r, w, e = select.select(fdset, [], [])
            # Problem is knowing beforehand when the socket is going to switch to joinall remainding tasks
            # FIONREAD will check size of available bytes of the socket to catch the last send()/recv()
            if sock in r:
                if sock_switch == 0:
                    fcntl.ioctl(sock, termios.FIONREAD, sock_size, True)
                    sock_count = ceil(sock_size[0] / float(BUF_SIZE))
                    print("sock", sock_size[0], sock_count)
                    sock_switch = 1
                delay = self.calculate_delay(prev)
                sock_buf = sock.recv(BUF_SIZE)
                #print(sock_buf)
                if sock_buf is None or sock_buf == "": break
                tasks.spawn(self.delay_message, sock_buf, delay, remote)
                sock_counter += 1
            if remote in r:
                if remote_switch == 0:
                    fcntl.ioctl(remote, termios.FIONREAD, remote_size, True)
                    remote_count = ceil(remote_size[0] / float(BUF_SIZE))
                    print("remote", remote_size[0], remote_count)
                    remote_switch = 1
                delay = self.calculate_delay(prev)
                remote_buf = remote.recv(BUF_SIZE)
                #print(remote_buf)
                if remote_buf is None or remote_buf == "": break
                tasks.spawn(self.delay_message, remote_buf, delay, sock)
                remote_counter += 1

            # Wait for last task before switching socket
            if sock_count == sock_counter and sock_switch == 1:
                print("joiningsocks")
                tasks.join()
                sock_counter = sock_switch = 0
            if remote_count == remote_counter and remote_switch == 1:
                print("joiningremote")
                tasks.join()
                remote_counter = remote_switch = 0
Exemple #32
0
        def loadBootstrapData(self):
            def layout():
                self.client.get(
                    f"{self.dataset}{API}/layout/obs", headers={"Accept": "application/octet-stream"}, stream=True
                ).close()

            def varAnnotationIndex():
                self.client.get(
                    f"{self.dataset}{API}/annotations/var?annotation-name={self.parent.var_index_name()}",
                    headers={"Accept": "application/octet-stream"},
                    stream=True,
                ).close()

            group = Group()
            group.spawn(layout)
            group.spawn(varAnnotationIndex)
            group.join()
Exemple #33
0
class GeventExecutor(wrapper.ExecutorBase):
    def __init__(self):
        super().__init__()
        self.group = Group()
        self.channel: ThreadsafeChannel[Callable[[],
                                                 None]] = ThreadsafeChannel()
        self.spawn_work_greenlet = gevent.spawn(self._spawn_work)

    def __del__(self):
        super().__del__()
        self.spawn_work_greenlet.kill()

    def execute(self, func: Callable[[], None]):
        self.channel.put(func)

    def _spawn_work(self):
        while True:
            self.group.spawn(self.channel.get())
Exemple #34
0
class TCPClient():
    def __init__(self):
        self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self._group = Group()
        self._send_buffer = Queue()
        self._recv_buffer = Queue()

    def connect(self, address):
        self._socket.connect(address)
        self._group.spawn(self._recv_loop)
        self._group.spawn(self._send_loop)

    def disconnect(self):
        self._group.kill()
        self._socket.close()
        self._group.join()

    def _recv_loop(self):
        buf = ""
        u4 = lambda x: unpack("<I", x)[0]

        while True:
            data = self._socket.recv(8192)
            buf += data

            while len(buf) > 0:
                length = u4(buf[:4])
                if len(buf) - 4 < length:
                    break

                self._recv_buffer.put(buf[4:4 + length])
                buf = buf[4 + length:]

    def _send_loop(self):
        while True:
            data = self._send_buffer.get()
            self._socket.sendall(data)

    def write(self, data):
        self._send_buffer.put(data)

    def get_packet(self):
        return self._recv_buffer.get()
def execute(resources, seed=None):
    threads = Group()
    for resource in [r for r in resources if not r.parent]:
        thread = threads.spawn(_process_resource, resources, seed, resource)
        thread.link_exception(_create_error_handler(resource.collection))

    threads.join()

    if len(_errors):
        sys.exit(1)
Exemple #36
0
class Dispatcher(gevent.Greenlet):
    def __init__(self, collector, publisher, quiet=False):
        super(Dispatcher, self).__init__()
        self.collector = collector
        self.publisher = publisher
        self.quiet = quiet
        self.greenlets = Group()
        self.channel = gevent.queue.Queue(0)
        self._keep_going = True

    def _run(self):
        self.greenlets.spawn(self.__collect)
        self.greenlets.spawn(self.__publish)
        self.greenlets.join()

    def kill(self, exception=gevent.GreenletExit, **kwargs):
        self._keep_going = False
        self.greenlets.kill()
        super(Dispatcher, self).kill(exception=exception, **kwargs)

    def __collect(self):
        while self._keep_going:
            message = self.collector.recv_multipart()
            self.channel.put(message)
            gevent.sleep()

    def __publish(self):
        while self._keep_going:
            message = self.channel.get()
            if not self.quiet:
                # message is assumed to be a tuple of: (topic, record_json)
                topic,record_json = message
                topic = topic.decode()
                name_and_level = topic[len(zerolog.stream_prefix):]
                logger_name,level_name = name_and_level.split(':')
                logger = zerolog.getLocalLogger(logger_name)
                if logger.isEnabledFor(logging.getLevelName(level_name)):
                    # inject log record into local logger
                    record_dict = json.loads(record_json.decode())
                    record = logging.makeLogRecord(record_dict)
                    logger.handle(record)
            self.publisher.send_multipart(message)
            gevent.sleep()
Exemple #37
0
def _search(responder, id, search, max_results):
    responder._sent = False
    todo = dict()
    group = Group()
    groups[id] = [group]
    for ctx in search.contexts:
        groups[id].append(ctx.thumb_pool)
        if len(ctx.results) > max_results/len(search.contexts):
            continue
        todo[ctx.name] = 0
        group.spawn(_run, responder, search, ctx, todo)
    group.join()

    for ctx in search.contexts[:]:
        if not ctx.results:
            search.remove_ctx(ctx)

    results = list()
    while search.contexts and (max_results is None or len(results) < max_results):
        ctx = search.next_result_ctx()
        if not ctx.results:
            break
        results.append(ctx.results.pop(0))

    display = search.display

    for ctx in search.contexts[:]:
        if ctx.next is None:
            search.remove_ctx(ctx)

    if search.more:
        cache[search.id] = search
    elif search.id in cache:
        del cache[search.id]

    payload = dict(id=id, search_id=search.id, display=display, more=search.more, results=results)
    responder.send(payload=payload)
    responder._sent = True
    try:
        del groups[id]
    except KeyError:
        pass
Exemple #38
0
    def _update(self):
        found_sources = list()
        resp = requests.get(self.url, stream=True)
        try:
            resp.raise_for_status()
            data = yaml.load(resp.raw)
        finally:
            resp.close()
        assert len(data.keys()) > 0
        group = Group()

        def _add_source(url):
            try:
                source = add_source(url, self.url)
            except:
                self.log.warning('error adding new repo {}'.format(url))
            else:
                found_sources.append(source)
        for name, url in data.iteritems():
            try:
                Url(url)
            except:
                self.log.warning('invalid patch source entry: {}'.format(url))
            try:
                source = sources[name]
            except KeyError:
                self.log.info('adding new repo {}'.format(url))
                group.spawn(_add_source, url)
            else:
                found_sources.append(source)
                if source.url != url:
                    source.log.info('changing url to {}'.format(url))
                    with transaction:
                        source.url = url
                    source.unlink()
        group.join()

        for source in sources.values():
            if source.config_url == self.url and source not in found_sources:
                source.log.info('erasing repo')
                source.delete(True)
Exemple #39
0
def run(fqn, concurrency=1, numruns=1):
    """ Runs a test.

    * fnq: fully qualified name
    * concurrency: number of concurrent runs
    * numruns: number of run per concurrent
    """
    set_global_stream('stdout', total=concurrency * numruns)
    test = resolve_name(fqn)
    klass = test.im_class
    ob = klass(test.__name__)
    test_result = unittest.TestResult()

    group = Group()

    for i in range(concurrency):
        group.spawn(_run, i, ob, test_result, numruns)

    group.join()

    return  test_result
 def test_ungraceful_close_in_greenlet(self):
     group = Group()
     UnaryCallWithSleep = self._channel.unary_unary(
         _UNARY_CALL_METHOD_WITH_SLEEP,
         request_serializer=messages_pb2.SimpleRequest.SerializeToString,
         response_deserializer=messages_pb2.SimpleResponse.FromString,
     )
     greenlet = group.spawn(self._run_client, UnaryCallWithSleep)
     # release loop so that greenlet can take control
     gevent.sleep()
     group.killone(greenlet)
     self.assertFalse(self._unhandled_exception, "Unhandled GreenletExit")
Exemple #41
0
    def get_best(self, task, file):
        with self.lock:
            if config.use_useraccounts:
                all_accounts = self
            else:
                all_accounts = [a for a in self if a._private_account]

            group = Group()
            for account in all_accounts:
                group.spawn(account.boot)
            group.join()

            all_accounts = [a for a in all_accounts if a._private_account or (a.enabled and a.last_error is None)]
            
            accounts = []
            best_weight = 0
            for account in all_accounts:
                if file is not None and not account.match(file):
                    continue
                try:
                    weight = account.weight
                except gevent.GreenletExit:
                    continue
                if weight is None or weight < best_weight:
                    continue
                if weight > best_weight:
                    accounts = []
                    best_weight = weight
                bisect.insort(accounts, (account.get_task_pool(task).full() and 1 or 0, len(accounts), account))

            if accounts:
                return accounts[0][2]
            if len(all_accounts) > 0:
                #self.log.warning('found no account. returning first one...')
                return all_accounts[0]
            else:
                self.log.info('found no account. creating a "free" account')
                account = self.add(_private_account=True)
                account.boot()
                return account
 def test_kill_greenlet_with_generic_exception(self):
     group = Group()
     UnaryCallWithSleep = self._channel.unary_unary(
         _UNARY_CALL_METHOD_WITH_SLEEP,
         request_serializer=messages_pb2.SimpleRequest.SerializeToString,
         response_deserializer=messages_pb2.SimpleResponse.FromString,
     )
     greenlet = group.spawn(self._run_client, UnaryCallWithSleep)
     # release loop so that greenlet can take control
     gevent.sleep()
     group.killone(greenlet, exception=Exception)
     self.assertFalse(self._unhandled_exception, "Unhandled exception")
     self.assertRaises(Exception, greenlet.get)
 def test_graceful_close_in_greenlet(self):
     group = Group()
     stub = test_pb2_grpc.TestServiceStub(self._channel)
     greenlet = group.spawn(self._run_client, stub.UnaryCall)
     # release loop so that greenlet can take control
     gevent.sleep()
     self._channel.close()
     group.killone(greenlet)
     self.assertFalse(self._unhandled_exception, "Unhandled GreenletExit")
     try:
         greenlet.get()
     except Exception as e:  # pylint: disable=broad-except
         self.fail(f"Unexpected exception in greenlet: {e}")
Exemple #44
0
class GeventExecutor(Executor):
    def __init__(self):
        self.group = Group()
        self.channel: GeventChannel[
            Tuple[Callable, List[Any], Dict[str, Any]]
        ] = GeventChannel()
        self.spawn_work_greenlet = gevent.spawn(self._spawn_work)

    def __del__(self):
        self.spawn_work_greenlet.kill()

    def wrap(self, fn: Func) -> Func:
        @functools.wraps(fn)
        def wrapper(*args, **kwargs):
            self.channel.put((fn, args, kwargs))

        return cast(Func, wrapper)

    def _spawn_work(self):
        while True:
            (fn, args, kwargs) = self.channel.get()
            self.group.spawn(fn, *args, **kwargs)
Exemple #45
0
class SyncStack:
    class SyncLayer:
        def __init__(self, path):
            self.path = path
            self.delta = None

    def __init__(self):
        self.layers = []
        self.latest_tag = 0
        self.sync_group = Group()

    def append(self, path):
        self.layers.append(self.SyncLayer(path))

    def has_unknown_delta(self):
        return self.latest_tag < len(self.layers)

    def need_sync(self, worker):
        return worker.sync_tag < self.latest_tag

    def update_delta(self, worker):
        assert self.has_unknown_delta()
        assert worker.sync_tag == self.latest_tag
        layer = self.layers[self.latest_tag]
        layer.delta = worker.calc_dir_delta(layer.path)
        self.latest_tag += 1

    def start_sync(self, worker):
        def _sync():
            while worker.sync_tag < self.latest_tag:
                layer = self.layers[worker.sync_tag]
                worker.sync_with_delta(layer.delta, layer.path)
            worker.set_syncing(False)

        worker.set_syncing(True)
        self.sync_group.spawn(_sync)

    def stop(self):
        self.sync_group.kill()
Exemple #46
0
 def api_calls(self):
     # Run all the API calls in parallel
     group = Group()
     if self.isNotFerry:
         group.spawn(self.realtime_polling)
         group.spawn(self.map_api)
     group.spawn(self.line_api)
     group.join()
Exemple #47
0
def _search(responder, id, search, max_results):
    todo = dict()
    group = Group()
    for ctx in search.contexts:
        if len(ctx.results) > max_results / len(search.contexts):
            continue
        todo[ctx.name] = 0
        group.spawn(_run, responder, search, ctx, todo)
    group.join()

    for ctx in search.contexts[:]:
        if not ctx.results:
            search.remove_ctx(ctx)

    results = list()
    while search.contexts and (max_results is None
                               or len(results) < max_results):
        ctx = search.next_result_ctx()
        if not ctx.results:
            break
        results.append(ctx.results.pop(0))

    display = search.display

    for ctx in search.contexts[:]:
        if ctx.next is None:
            search.remove_ctx(ctx)

    if search.more:
        cache[search.id] = search
    elif search.id in cache:
        del cache[search.id]

    return dict(id=id,
                search_id=search.id,
                display=display,
                more=search.more,
                results=results)
Exemple #48
0
def run_troller():
    youtube_interest_q = Queue()
    detector_input_q = Queue()
    bad_video_q = Queue()
    tumblr_q = Queue()
    reddit_q = Queue()
    debug_q = Queue()
    conf = [
        (scrape_reddit, youtube_interest_q),
        (download_youtube_thumbs, youtube_interest_q, detector_input_q),
        (detect_bad_videos, detector_input_q, bad_video_q),
        (debug_results, debug_q),
        (post_to_tumblr, tumblr_q),
        (post_reddit_response, reddit_q),
        (bad_video_fanout, bad_video_q, (tumblr_q, reddit_q, debug_q)),
    ]
    group = Group()
    for args in conf:
        group.spawn(*args)
    try:
        while True:
            gevent.sleep(1)
    except KeyboardInterrupt:
        pass
Exemple #49
0
def ready(role):
    queue = Group()
    while True:
        info = ScaleInfo(role)
        _scale_ctx_stack.push(info)
        if config.check_func is not None:
            queue.spawn(action, role, config.check_func())
        else:
            queue.spawn(action, role, check_cpu_utilization(role))
        queue.spawn(gevent.sleep, role.cooltime*60)
        queue.join()
        _scale_ctx_stack.pop()
Exemple #50
0
class Processor(object):
    def __init__(self):
        self.group = Group()
        self.greenlets = []

    def spawn(self, func, *args, **kwargs):
        g = self.group.spawn(func, *args, **kwargs)
        self.greenlets.append(g)

    def join(self):
        self.group.join()

    def values(self):
        gs, self.greenlets = self.greenlets, []
        return [g.value for g in gs]

    def empty(self):
        return not bool(self.greenlets)
Exemple #51
0
    def _run_phase(self, phase, start_date, end_date):
        phase, sources, targets = phase
        logger.info('Running phase %r' % phase)
        self._reset_counters()

        self._start_transactions(targets)
        self.database.start_transaction()
        try:
            greenlets = Group()
            # each callable will push its result in the queue
            for source in sources:
                exists = self.database.exists(source, start_date, end_date)
                if exists and not self.force:
                    logger.info('Already done: %s, %s to %s' % (
                        source.get_id(), start_date, end_date))
                    continue

                green = greenlets.spawn(self._get_data, source,
                                        start_date, end_date)
                green.link_value(partial(self._log_transaction, source,
                                         start_date, end_date))
                green.link_exception(partial(self._error,
                                             exception.ExtractError, source))

            # looking at the queue
            pushed = 0

            while len(greenlets) > 0 or self.queue.qsize() > 0:
                gevent.sleep(0)
                pushed += self._push_to_target(targets)
                # let's see if we have some errors
                if len(self.errors) > 0:
                    # yeah! we need to rollback
                    # XXX later we'll do a source-by-source rollback
                    raise exception.RunError(self.errors)

        except Exception:
            self._rollback_transactions(targets)
            self.database.rollback_transaction()
            raise
        else:
            self._commit_transactions(targets)
            self.database.commit_transaction()
Exemple #52
0
class LocustRunner(object):
    def __init__(self, locust_classes, hatch_rate, num_clients, num_requests=None, host=None):
        self.locust_classes = locust_classes
        self.hatch_rate = hatch_rate
        self.num_clients = num_clients
        self.num_requests = num_requests
        self.host = host
        self.locusts = Group()
        self.state = STATE_INIT
        self.hatching_greenlet = None
        self.exceptions = {}
        self.stats = global_stats
        
        # register listener that resets stats when hatching is complete
        def on_hatch_complete(count):
            self.state = STATE_RUNNING
            logger.info("Resetting stats\n")
            self.stats.reset_all()
        events.hatch_complete += on_hatch_complete

    @property
    def request_stats(self):
        return self.stats.entries
    
    @property
    def errors(self):
        return self.stats.errors
    
    @property
    def user_count(self):
        return len(self.locusts)

    def weight_locusts(self, amount, stop_timeout = None):
        """
        Distributes the amount of locusts for each WebLocust-class according to it's weight
        returns a list "bucket" with the weighted locusts
        """
        bucket = []
        weight_sum = sum((locust.weight for locust in self.locust_classes if locust.task_set))
        for locust in self.locust_classes:
            if not locust.task_set:
                warnings.warn("Notice: Found Locust class (%s) got no task_set. Skipping..." % locust.__name__)
                continue

            if self.host is not None:
                locust.host = self.host
            if stop_timeout is not None:
                locust.stop_timeout = stop_timeout

            # create locusts depending on weight
            percent = locust.weight / float(weight_sum)
            num_locusts = int(round(amount * percent))
            bucket.extend([locust for x in xrange(0, num_locusts)])
        return bucket

    def spawn_locusts(self, spawn_count=None, stop_timeout=None, wait=False):
        if spawn_count is None:
            spawn_count = self.num_clients

        if self.num_requests is not None:
            self.stats.max_requests = self.num_requests

        bucket = self.weight_locusts(spawn_count, stop_timeout)
        spawn_count = len(bucket)
        if self.state == STATE_INIT or self.state == STATE_STOPPED:
            self.state = STATE_HATCHING
            self.num_clients = spawn_count
        else:
            self.num_clients += spawn_count

        logger.info("Hatching and swarming %i clients at the rate %g clients/s..." % (spawn_count, self.hatch_rate))
        occurence_count = dict([(l.__name__, 0) for l in self.locust_classes])
        
        def hatch():
            sleep_time = 1.0 / self.hatch_rate
            while True:
                if not bucket:
                    logger.info("All locusts hatched: %s" % ", ".join(["%s: %d" % (name, count) for name, count in occurence_count.iteritems()]))
                    events.hatch_complete.fire(self.num_clients)
                    return

                locust = bucket.pop(random.randint(0, len(bucket)-1))
                occurence_count[locust.__name__] += 1
                def start_locust(_):
                    try:
                        locust().run()
                    except GreenletExit:
                        pass
                new_locust = self.locusts.spawn(start_locust, locust)
                if len(self.locusts) % 10 == 0:
                    logger.debug("%i locusts hatched" % len(self.locusts))
                gevent.sleep(sleep_time)
        
        hatch()
        if wait:
            self.locusts.join()
            logger.info("All locusts dead\n")

    def kill_locusts(self, kill_count):
        """
        Kill a kill_count of weighted locusts from the Group() object in self.locusts
        """
        bucket = self.weight_locusts(kill_count)
        kill_count = len(bucket)
        self.num_clients -= kill_count
        logger.info("Killing %i locusts" % kill_count)
        dying = []
        for g in self.locusts:
            for l in bucket:
                if l == g.args[0]:
                    dying.append(g)
                    bucket.remove(l)
                    break
        for g in dying:
            self.locusts.killone(g)
        events.hatch_complete.fire(self.num_clients)

    def start_hatching(self, locust_count=None, hatch_rate=None, wait=False):
        if self.state != STATE_RUNNING and self.state != STATE_HATCHING:
            self.stats.clear_all()
            self.stats.start_time = time()
            self.exceptions = {}

        # Dynamically changing the locust count
        if self.state != STATE_INIT and self.state != STATE_STOPPED:
            self.state = STATE_HATCHING
            if self.num_clients > locust_count:
                # Kill some locusts
                kill_count = self.num_clients - locust_count
                self.kill_locusts(kill_count)
            elif self.num_clients < locust_count:
                # Spawn some locusts
                if hatch_rate:
                    self.hatch_rate = hatch_rate
                spawn_count = locust_count - self.num_clients
                self.spawn_locusts(spawn_count=spawn_count)
        else:
            if hatch_rate:
                self.hatch_rate = hatch_rate
            if locust_count:
                self.spawn_locusts(locust_count, wait=wait)
            else:
                self.spawn_locusts(wait=wait)

    def stop(self):
        # if we are currently hatching locusts we need to kill the hatching greenlet first
        if self.hatching_greenlet and not self.hatching_greenlet.ready():
            self.hatching_greenlet.kill(block=True)
        self.locusts.kill(block=True)
        self.state = STATE_STOPPED

    def log_exception(self, node_id, msg, formatted_tb):
        key = hash(formatted_tb)
        row = self.exceptions.setdefault(key, {"count": 0, "msg": msg, "traceback": formatted_tb, "nodes": set()})
        row["count"] += 1
        row["nodes"].add(node_id)
        self.exceptions[key] = row
Exemple #53
0
class MasterLocustRunner(DistributedLocustRunner):
    def __init__(self, *args, **kwargs):
        super(MasterLocustRunner, self).__init__(*args, **kwargs)
        
        class SlaveNodesDict(dict):
            def get_by_state(self, state):
                return [c for c in self.itervalues() if c.state == state]
            
            @property
            def ready(self):
                return self.get_by_state(STATE_INIT)
            
            @property
            def hatching(self):
                return self.get_by_state(STATE_HATCHING)
            
            @property
            def running(self):
                return self.get_by_state(STATE_RUNNING)
        
        self.clients = SlaveNodesDict()
        
        self.client_stats = {}
        self.client_errors = {}
        self._request_stats = {}

        self.server = rpc.Server()
        self.greenlet = Group()
        self.greenlet.spawn(self.client_listener).link_exception(receiver=self.noop)
        
        # listener that gathers info on how many locust users the slaves has spawned
        def on_slave_report(client_id, data):
            self.clients[client_id].user_count = data["user_count"]
        events.slave_report += on_slave_report
        
        # register listener that sends quit message to slave nodes
        def on_quitting():
            self.quit()
        events.quitting += on_quitting
    
    def noop(self, *args, **kw):
        pass
    
    @property
    def user_count(self):
        return sum([c.user_count for c in self.clients.itervalues()])
    
    def start_hatching(self, locust_count, hatch_rate):
        self.num_clients = locust_count
        slave_num_clients = locust_count / ((len(self.clients.ready) + len(self.clients.running)) or 1)
        slave_hatch_rate = float(hatch_rate) / ((len(self.clients.ready) + len(self.clients.running)) or 1)

        logger.info("Sending hatch jobs to %i ready clients" % (len(self.clients.ready) + len(self.clients.running)))
        if not (len(self.clients.ready)+len(self.clients.running)):
            logger.warning("You are running in distributed mode but have no slave servers connected. Please connect slaves prior to swarming.")
            return
        
        if self.state != STATE_RUNNING and self.state != STATE_HATCHING:
            self.stats.clear_all()
            self.exceptions = {}
        
        for client in self.clients.itervalues():
            data = {"hatch_rate":slave_hatch_rate, "num_clients":slave_num_clients, "num_requests": self.num_requests, "host":self.host, "stop_timeout":None}
            self.server.send(Message("hatch", data, None))
        
        self.stats.start_time = time()
        self.state = STATE_HATCHING

    def stop(self):
        for client in self.clients.hatching + self.clients.running:
            self.server.send(Message("stop", None, None))
    
    def quit(self):
        for client in self.clients.itervalues():
            self.server.send(Message("quit", None, None))
        self.greenlet.kill(block=True)
    
    def client_listener(self):
        while True:
            msg = self.server.recv()
            if msg.type == "client_ready":
                id = msg.node_id
                self.clients[id] = SlaveNode(id)
                logger.info("Client %r reported as ready. Currently %i clients ready to swarm." % (id, len(self.clients.ready)))
                ## emit a warning if the slave's clock seem to be out of sync with our clock
                #if abs(time() - msg.data["time"]) > 5.0:
                #    warnings.warn("The slave node's clock seem to be out of sync. For the statistics to be correct the different locust servers need to have synchronized clocks.")
            elif msg.type == "client_stopped":
                del self.clients[msg.node_id]
                if len(self.clients.hatching + self.clients.running) == 0:
                    self.state = STATE_STOPPED
                logger.info("Removing %s client from running clients" % (msg.node_id))
            elif msg.type == "stats":
                events.slave_report.fire(msg.node_id, msg.data)
            elif msg.type == "hatching":
                self.clients[msg.node_id].state = STATE_HATCHING
            elif msg.type == "hatch_complete":
                self.clients[msg.node_id].state = STATE_RUNNING
                self.clients[msg.node_id].user_count = msg.data["count"]
                if len(self.clients.hatching) == 0:
                    count = sum(c.user_count for c in self.clients.itervalues())
                    events.hatch_complete.fire(count)
            elif msg.type == "quit":
                if msg.node_id in self.clients:
                    del self.clients[msg.node_id]
                    logger.info("Client %r quit. Currently %i clients connected." % (msg.node_id, len(self.clients.ready)))
            elif msg.type == "exception":
                self.log_exception(msg.node_id, msg.data["msg"], msg.data["traceback"])

    @property
    def slave_count(self):
        return len(self.clients.ready) + len(self.clients.hatching) + len(self.clients.running)
Exemple #54
0
class MasterLocustRunner(DistributedLocustRunner):
    def __init__(self, *args, **kwargs):
        super(MasterLocustRunner, self).__init__(*args, **kwargs)
        
        class SlaveNodesDict(dict):
            def get_by_state(self, state):
                return [c for c in self.itervalues() if c.state == state]
            
            @property
            def ready(self):
                return self.get_by_state(STATE_INIT)
            
            @property
            def hatching(self):
                return self.get_by_state(STATE_HATCHING)
            
            @property
            def running(self):
                return self.get_by_state(STATE_RUNNING)
        
        self.clients = SlaveNodesDict()
        
        self.client_stats = {}
        self.client_errors = {}
        self._request_stats = {}
        
        self.server = zmqrpc.Server()
        self.greenlet = Group()
        self.greenlet.spawn(self.client_listener).link_exception()
        
        # listener that gathers info on how many locust users the slaves has spawned
        def on_slave_report(client_id, data):
            self.clients[client_id].user_count = data["user_count"]
        events.slave_report += on_slave_report
    
    @property
    def user_count(self):
        return sum([c.user_count for c in self.clients.itervalues()])
    
    def start_hatching(self, locust_count, hatch_rate):
        self.num_clients = locust_count
        slave_num_clients = locust_count / ((len(self.clients.ready) + len(self.clients.running)) or 1)
        slave_hatch_rate = float(hatch_rate) / ((len(self.clients.ready) + len(self.clients.running)) or 1)

        print "Sending hatch jobs to %i ready clients" % (len(self.clients.ready) + len(self.clients.running))
        if not (len(self.clients.ready)+len(self.clients.running)):
            print "WARNING: You are running in distributed mode but have no slave servers connected."
            print "Please connect slaves prior to swarming."
        
        if self.state != STATE_RUNNING and self.state != STATE_HATCHING:
            RequestStats.clear_all()
        
        for client in self.clients.itervalues():
            msg = {"hatch_rate":slave_hatch_rate, "num_clients":slave_num_clients, "num_requests": self.num_requests, "host":self.host, "stop_timeout":None}
            self.server.send({"type":"hatch", "data":msg})
        
        RequestStats.global_start_time = time()
        self.state = STATE_HATCHING

    def stop(self):
        for client in self.clients.hatching + self.clients.running:
            self.server.send({"type":"stop", "data":{}})
    
    def client_listener(self):
        while True:
            msg = self.server.recv()
            if msg["type"] == "client_ready":
                id = msg["data"]
                self.clients[id] = SlaveNode(id)
                print "Client %r reported as ready. Currently %i clients ready to swarm." % (id, len(self.clients.ready))
            elif msg["type"] == "client_stopped":
                del self.clients[msg["data"]]
                if len(self.clients.hatching + self.clients.running) == 0:
                    self.state = STATE_STOPPED
                print "Removing %s client from running clients" % (msg["data"])
            elif msg["type"] == "stats":
                report = msg["data"]
                events.slave_report.fire(report["client_id"], report["data"])
            elif msg["type"] == "hatching":
                id = msg["data"]
                self.clients[id].state = STATE_HATCHING
            elif msg["type"] == "hatch_complete":
                id = msg["data"]["client_id"]
                self.clients[id].state = STATE_RUNNING
                self.clients[id].user_count = msg["data"]["count"]
                if len(self.clients.hatching) == 0:
                    count = sum(c.user_count for c in self.clients.itervalues())
                    events.hatch_complete.fire(count)

    @property
    def slave_count(self):
        return len(self.clients.ready) + len(self.clients.hatching) + len(self.clients.running)