Example #1
0
def run(interval, algalon_client, app):
    from daemonutils import node_polling
    from daemonutils.cluster_task import TaskPoller

    TaskPoller(app, interval).start()

    NODES_EACH_THREAD = 20
    with handlers.base.app.app_context():
        while True:
            poll = file_ipc.read_poll()
            nodes = _load_from(node_polling.RedisNodeStatus, poll['nodes'])
            proxies = _load_from(node_polling.ProxyStatus, poll['proxies'])
            models.base.db.session.commit()

            all_nodes = nodes + proxies
            random.shuffle(all_nodes)
            pollers = [
                Poller(all_nodes[i: i + NODES_EACH_THREAD], algalon_client)
                for i in xrange(0, len(all_nodes), NODES_EACH_THREAD)]
            for p in pollers:
                p.start()
            time.sleep(interval)

            for p in pollers:
                p.join()

            logging.info('Total %d nodes, %d proxies', len(nodes),
                         len(proxies))

            try:
                file_ipc.write_details({n.addr: n.details for n in nodes},
                                       {p.addr: p.details for p in proxies})
            except StandardError, e:
                logging.exception(e)
Example #2
0
    def _shot(self):
        poll = file_ipc.read_poll()
        nodes = _load_from(RedisNodeStatus, poll['nodes'])
        proxies = _load_from(ProxyStatus, poll['proxies'])
        # commit because `get_by` may create new nodes
        # to reattach session they must be persisted
        commit_session()

        all_nodes = nodes + proxies
        random.shuffle(all_nodes)
        pollers = [
            Poller(all_nodes[i:i + NODES_EACH_THREAD], self.algalon_client)
            for i in xrange(0, len(all_nodes), NODES_EACH_THREAD)
        ]
        for p in pollers:
            p.start()
        time.sleep(self.interval)

        for p in pollers:
            p.join()
        for p in pollers:
            for n in p.nodes:
                n.add_to_db()

        save_polling_stat(nodes, proxies)
        commit_session()
        logging.debug('Total %d nodes, %d proxies', len(nodes), len(proxies))

        try:
            file_ipc.write_details({n.addr: n.details
                                    for n in nodes},
                                   {p.addr: p.details
                                    for p in proxies})
        except StandardError, e:
            logging.exception(e)
Example #3
0
    def _shot(self):
        poll = file_ipc.read_poll()
        nodes = _load_from(RedisNodeStatus, poll['nodes'])
        proxies = _load_from(ProxyStatus, poll['proxies'])
        # commit because `get_by` may create new nodes
        # to reattach session they must be persisted
        db.session.commit()

        all_nodes = nodes + proxies
        random.shuffle(all_nodes)
        pollers = [Poller(all_nodes[i: i + NODES_EACH_THREAD],
                          self.algalon_client)
                   for i in xrange(0, len(all_nodes), NODES_EACH_THREAD)]
        for p in pollers:
            p.start()
        time.sleep(self.interval)

        for p in pollers:
            p.join()
        for p in pollers:
            for n in p.nodes:
                n.add_to_db()

        save_polling_stat(nodes, proxies)
        db.session.commit()
        logging.debug('Total %d nodes, %d proxies', len(nodes), len(proxies))

        try:
            file_ipc.write_details({n.addr: n.details for n in nodes},
                                   {p.addr: p.details for p in proxies})
        except StandardError, e:
            logging.exception(e)
Example #4
0
def run(interval, algalon_client, app):
    TaskPoller(app, interval).start()

    NODES_EACH_THREAD = 20
    while True:
        poll = file_ipc.read_poll()
        nodes = _load_from(node_polling.RedisNode, poll['nodes'])
        proxies = _load_from(node_polling.Proxy, poll['proxies'])

        all_nodes = nodes + proxies
        random.shuffle(all_nodes)
        pollers = [Poller(all_nodes[i: i + NODES_EACH_THREAD], algalon_client)
                   for i in xrange(0, len(all_nodes), NODES_EACH_THREAD)]
        for p in pollers:
            p.start()

        time.sleep(interval)

        for p in pollers:
            p.join()

        logging.info('Total %d nodes, %d proxies', len(nodes), len(proxies))
        node_polling.flush_to_db()
        try:
            file_ipc.write([n.details for n in nodes],
                           [p.details for p in proxies])
        except StandardError, e:
            logging.exception(e)
Example #5
0
def run():
    NODES_EACH_THREAD = 20
    while True:
        poll = file_ipc.read_poll()
        nodes = _load_from(RedisNode, poll['nodes'])
        proxies = _load_from(Proxy, poll['proxies'])

        all_nodes = nodes + proxies
        random.shuffle(all_nodes)
        pollers = [Poller(all_nodes[i: i + NODES_EACH_THREAD])
                   for i in xrange(0, len(all_nodes), NODES_EACH_THREAD)]
        for p in pollers:
            p.start()

        time.sleep(INTERVAL)

        for p in pollers:
            p.join()

        logging.info('Total %d nodes, %d proxies', len(nodes), len(proxies))
        _flush_to_db()
        try:
            file_ipc.write([n.details for n in nodes],
                           [p.details for p in proxies])
        except StandardError, e:
            logging.exception(e)
Example #6
0
def run():
    NODES_EACH_THREAD = 20
    while True:
        poll = file_ipc.read_poll()
        nodes = _load_from(RedisNode, poll['nodes'])
        proxies = _load_from(Proxy, poll['proxies'])

        all_nodes = nodes + proxies
        random.shuffle(all_nodes)
        pollers = [
            Poller(all_nodes[i:i + NODES_EACH_THREAD])
            for i in xrange(0, len(all_nodes), NODES_EACH_THREAD)
        ]
        for p in pollers:
            p.start()

        time.sleep(INTERVAL)

        for p in pollers:
            p.join()

        logging.info('Total %d nodes, %d proxies', len(nodes), len(proxies))
        _flush_to_db()
        try:
            file_ipc.write([n.details for n in nodes],
                           [p.details for p in proxies])
        except StandardError, e:
            logging.exception(e)
Example #7
0
def run():
    while True:
        poll = file_ipc.read_poll()
        nodes = _load_from(RedisNode, poll['nodes'])
        for node in nodes:
            try:
                i = _info_node(node.details['host'], node.details['port'])
                node.update(i)
                node.set_available(i['response_time'])
            except (ReplyError, SocketError, StandardError), e:
                logging.error('Fail to retrieve info of %s:%d',
                              node['host'], node['port'])
                logging.exception(e)
                node.set_unavailable()
                if algalon_client is not None:
                    algalon_client.send_alarm(
                        'Fail to retrieve info of {0}:{1}'.format(
                            node['host'], node['port']),
                        traceback.format_exc())
            else:
                _send_to_influxdb(node)
            session.add(node)

        proxies = _load_from(Proxy, poll['proxies'])
        for p in proxies:
            try:
                i = _info_proxy(p.details['host'], p.details['port'])
                p.update(i)
                p.set_available(i['response_time'])
            except (ReplyError, SocketError, StandardError), e:
                logging.error('Fail to retrieve info of %s:%d',
                              p['host'], p['port'])
                logging.exception(e)
                p.set_unavailable()
                if algalon_client is not None:
                    algalon_client.send_alarm(
                        'Fail to retrieve info of {0}:{1}'.format(
                            p['host'], p['port']),
                        traceback.format_exc())
            else:
                _send_proxy_to_influxdb(p)
            session.add(p)
Example #8
0
def run():
    while True:
        poll = file_ipc.read_poll()
        nodes = _load_from(RedisNode, poll['nodes'])
        for node in nodes:
            try:
                i = _info_node(node.details['host'], node.details['port'])
                node.update(i)
                node.set_available(i['response_time'])
            except (ReplyError, SocketError, StandardError), e:
                logging.error('Fail to retrieve info of %s:%d', node['host'],
                              node['port'])
                logging.exception(e)
                node.set_unavailable()
                if algalon_client is not None:
                    algalon_client.send_alarm(
                        'Fail to retrieve info of {0}:{1}'.format(
                            node['host'], node['port']),
                        traceback.format_exc())
            else:
                _send_to_influxdb(node)
            session.add(node)

        proxies = _load_from(Proxy, poll['proxies'])
        for p in proxies:
            try:
                i = _info_proxy(p.details['host'], p.details['port'])
                p.update(i)
                p.set_available(i['response_time'])
            except (ReplyError, SocketError, StandardError), e:
                logging.error('Fail to retrieve info of %s:%d', p['host'],
                              p['port'])
                logging.exception(e)
                p.set_unavailable()
                if algalon_client is not None:
                    algalon_client.send_alarm(
                        'Fail to retrieve info of {0}:{1}'.format(
                            p['host'], p['port']), traceback.format_exc())
            else:
                _send_proxy_to_influxdb(p)
            session.add(p)
Example #9
0
def run(interval, algalon_client, app):
    from daemonutils import node_polling
    from daemonutils.cluster_task import TaskPoller

    TaskPoller(app, interval).start()

    NODES_EACH_THREAD = 20
    with handlers.base.app.app_context():
        while True:
            poll = file_ipc.read_poll()
            nodes = _load_from(node_polling.RedisNodeStatus, poll['nodes'])
            proxies = _load_from(node_polling.ProxyStatus, poll['proxies'])
            models.base.db.session.commit()

            all_nodes = nodes + proxies
            random.shuffle(all_nodes)
            pollers = [
                Poller(all_nodes[i:i + NODES_EACH_THREAD], algalon_client)
                for i in xrange(0, len(all_nodes), NODES_EACH_THREAD)
            ]
            for p in pollers:
                p.start()
            time.sleep(interval)

            for p in pollers:
                p.join()

            logging.info('Total %d nodes, %d proxies', len(nodes),
                         len(proxies))

            try:
                file_ipc.write_details({n.addr: n.details
                                        for n in nodes},
                                       {p.addr: p.details
                                        for p in proxies})
            except StandardError, e:
                logging.exception(e)
Example #10
0
 def polling_targets(self):
     return file_ipc.read_poll()