Пример #1
0
class Lock:
    """ UNIX-specific exclusive file locks (released when the process ends).

    Based on
    http://blog.vmfarms.com/2011/03/cross-process-locking-and.html,
    adapted for context managers (the 'with' statement).

    Modified to be gevent-safe! Locks held by a given Greenlet may not be
    taken by other Greenlets until released, _as long as you only create one
    Lock object per lockfile_. THIS IS VERY IMPORTANT. *Make sure* that you're
    not creating multiple locks on the same file from the same process,
    otherwise you'll bypass the gevent lock!

    Parameters
    ----------
    f : file or str
        File handle or filename to use as the lock.
    block : bool
        Whether to block or throw IOError if the lock is grabbed multiple
        times.
    """
    TIMEOUT = 60

    def __init__(self, f, block=True):
        if isinstance(f, file):
            self.filename = f.name
            self.handle = f if not f.closed else open(f, 'w')
        else:
            self.filename = f
            mkdirp(os.path.dirname(f))
            self.handle = open(f, 'w')
        if block:
            self.lock_op = fcntl.LOCK_EX
        else:
            self.lock_op = fcntl.LOCK_EX | fcntl.LOCK_NB
        self.block = block
        self.gevent_lock = BoundedSemaphore(1)

    def acquire(self):
        got_gevent_lock = self.gevent_lock.acquire(blocking=self.block)
        if not got_gevent_lock:
            raise IOError("cannot acquire gevent lock")
        fcntl.flock(self.handle, self.lock_op)

    def release(self):
        fcntl.flock(self.handle, fcntl.LOCK_UN)
        self.gevent_lock.release()

    def locked(self):
        return self.gevent_lock.locked()

    def __enter__(self):
        self.acquire()
        return self

    def __exit__(self, type, value, traceback):
        self.release()

    def __del__(self):
        self.handle.close()
Пример #2
0
    def __init__(self, module, server_list):
        # logging
        logger = logging.getLogger(module)
        logger.setLevel(logging.INFO)
        try:
            handler = logging.handlers.RotatingFileHandler(
                '/var/log/contrail/' + module + '-zk.log',
                maxBytes=10 * 1024 * 1024,
                backupCount=5)
        except IOError:
            print "Cannot open log file in /var/log/contrail/"
        else:
            log_format = logging.Formatter(
                '%(asctime)s [%(name)s]: %(message)s',
                datefmt='%m/%d/%Y %I:%M:%S %p')
            handler.setFormatter(log_format)
            logger.addHandler(handler)

        self._zk_client = \
            kazoo.client.KazooClient(
                server_list,
                handler=kazoo.handlers.gevent.SequentialGeventHandler(),
                logger=logger)

        self._logger = logger
        self._election = None
        self._zk_sem = BoundedSemaphore(1)
        self.connect()
Пример #3
0
class LiberateProtocol(protocols.BaseProtocol):
    """协议"""

    buff = ""

    def connectionMade(self):
        """连接建立处理
        """
        address = self.transport.getAddress()
        logger.info('Client %d login in.[%s,%d]' % (self.transport.sessionno,
                                                    address[0], address[1]))
        self.factory.connmanager.addConnection(self)
        self.factory.doConnectionMade(self)
        self.sem = BoundedSemaphore(1)

    def connectionLost(self, reason):
        """连接断开处理
        """
        logger.info('Client %d login out(%s).' % (self.transport.sessionno,
                                                  reason))
        self.factory.doConnectionLost(self)
        self.factory.connmanager.dropConnectionByID(self.transport.sessionno)

    def safeToWriteData(self, data, command):
        """线程安全的向客户端发送数据
        @param data: str 要向客户端写的数据
        """
        if data is None:
            return
        senddata = self.factory.produceResult(data, command)
        self.sem.acquire()
        self.transport.sendall(senddata)
        self.sem.release()

    def dataReceived(self, data):
        """数据到达处理
        @param data: str 客户端传送过来的数据
        """
        length = self.factory.dataprotocl.getHeadlength()  # 获取协议头的长度
        self.buff += data
        while self.buff.__len__() >= length:
            unpackdata = self.factory.dataprotocl.unpack(self.buff[:length])
            if not unpackdata.get('result'):
                logger.info('illegal data package --')
                self.connectionLost('illegal data package')
                break
            command = unpackdata.get('command')
            rlength = unpackdata.get('length')
            request = self.buff[length:length + rlength]
            if request.__len__() < rlength:
                logger.info('some data lose')
                break
            self.buff = self.buff[length + rlength:]
            response = self.factory.doDataReceived(self, command, request)

            # if not response:
            #     continue

            self.safeToWriteData(response, command)
Пример #4
0
 def connectionMade(self):
     """连接建立处理
     """
     address = self.transport.getAddress()
     logger.info('Client %d login in.[%s,%d]' % (self.transport.sessionno,
                                                 address[0], address[1]))
     self.factory.connmanager.addConnection(self)
     self.factory.doConnectionMade(self)
     self.sem = BoundedSemaphore(1)
Пример #5
0
 def __init__(self, redis_uve_server, logger):
     self._local_redis_uve = redis_uve_server
     self._redis_uve_list = []
     self._logger = logger
     self._sem = BoundedSemaphore(1)
     self._redis = None
     if self._local_redis_uve:
         self._redis = redis.StrictRedis(self._local_redis_uve[0],
                                         self._local_redis_uve[1], db=0)
Пример #6
0
 def __init__(self, redis_uve_server, logger, redis_password=None):
     self._local_redis_uve = redis_uve_server
     self._redis_uve_list = []
     self._logger = logger
     self._sem = BoundedSemaphore(1)
     self._redis = None
     self._redis_password = redis_password
     if self._local_redis_uve:
         self._redis = redis.StrictRedis(self._local_redis_uve[0],
                                         self._local_redis_uve[1],
                                         password=self._redis_password,
                                         db=1)
Пример #7
0
    def __init__(self,
                 discServer,
                 zk_srv_ip='127.0.0.1',
                 zk_srv_port='2181',
                 reset_config=False):
        self._reset_config = reset_config
        self._service_id_to_type = {}
        self._ds = discServer
        self._zk_sem = BoundedSemaphore(1)
        self._election = None
        self._restarting = False

        zk_endpts = []
        for ip in zk_srv_ip.split(','):
            zk_endpts.append('%s:%s' % (ip, zk_srv_port))

        # logging
        logger = logging.getLogger('discovery-service')
        logger.setLevel(logging.WARNING)
        handler = logging.handlers.RotatingFileHandler(
            '/var/log/contrail/discovery_zk.log',
            maxBytes=1024 * 1024,
            backupCount=10)
        log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s',
                                       datefmt='%m/%d/%Y %I:%M:%S %p')
        handler.setFormatter(log_format)
        logger.addHandler(handler)

        self._zk = kazoo.client.KazooClient(
            hosts=','.join(zk_endpts),
            handler=kazoo.handlers.gevent.SequentialGeventHandler(),
            logger=logger)
        self._logger = logger

        # connect
        self.connect()

        if reset_config:
            self.delete_node("/services", recursive=True)
            self.delete_node("/clients", recursive=True)
            self.delete_node("/election", recursive=True)

        # create default paths
        self.create_node("/services")
        self.create_node("/clients")
        self.create_node("/election")

        self._debug = {
            'subscription_expires': 0,
            'oos_delete': 0,
            'db_excepts': 0,
        }
Пример #8
0
    def __init__(self, localServer):
        Greenlet.__init__(self)
        self.callinfos = {}
        self.localServer = localServer
        self.recv_queue = Queue()
        self.lock = BoundedSemaphore(1)

        self.handle_stoping = False
        self.recv_stop_evt = Event()
        self.timeout_stop_evt = Event()

        self.timeout_handle_greenlet = gevent.spawn(self.on_timeout_handle)
        self.timeout_handle_greenlet.start()
Пример #9
0
 def __init__(self, f, block=True):
     if isinstance(f, file):
         self.filename = f.name
         self.handle = f if not f.closed else open(f, 'w')
     else:
         self.filename = f
         mkdirp(os.path.dirname(f))
         self.handle = open(f, 'w')
     if block:
         self.lock_op = fcntl.LOCK_EX
     else:
         self.lock_op = fcntl.LOCK_EX | fcntl.LOCK_NB
     self.block = block
     self.gevent_lock = BoundedSemaphore(1)
Пример #10
0
    def __init__(self, args):

        self.thread = None
        self.socket = None
        self.cli_args = args
        self.force_debug = args['debug']

        # timers, counters and triggers
        self.pokemon_caught = 0
        self._error_counter = 0
        self._error_threshold = 10
        self.start_time = time()
        self.exp_start = None
        self._heartbeat_number = 1  # setting this back to one because we make parse a full heartbeat during login!
        self._heartbeat_frequency = 3  # 1 = always
        self._full_heartbeat_frequency = 15  # 10 = as before (every 10th heartbeat)
        self._farm_mode_triggered = False

        # objects, order is important!
        self.config = None
        self._load_config()

        self.log = create_logger(__name__,
                                 self.config.log_colors["poketrainer".upper()])

        self._open_socket()

        self.player = Player({})
        self.player_stats = PlayerStats({})
        self.inventory = Inventory(self, [])
        self.fort_walker = FortWalker(self)
        self.map_objects = MapObjects(self)
        self.poke_catcher = PokeCatcher(self)
        self.incubate = Incubate(self)
        self.evolve = Evolve(self)
        self.release = Release(self)
        self.sniper = Sniper(self)

        self._origPosF = (0, 0, 0)
        self.api = None
        self._load_api()

        # config values that might be changed during runtime
        self.step_size = self.config.step_size
        self.should_catch_pokemon = self.config.should_catch_pokemon

        # threading / locking
        self.sem = BoundedSemaphore(1)  # gevent
        self.persist_lock = False
        self.locker = None
Пример #11
0
 def __init__(self, size, exc_classes=DEFAULT_EXC_CLASSES, keepalive=None):
     self.size = size
     self.conn = deque()
     self.lock = BoundedSemaphore(size)
     self.keepalive = keepalive
     # Exceptions list must be in tuple form to be caught properly
     self.exc_classes = tuple(exc_classes)
     for i in xrange(size):
         self.lock.acquire()
     for i in xrange(size):
         greenlet = TestRunGreenlet(self._addOne)
         greenlet.start_later(self.SPAWN_FREQUENCY * i)
     if self.keepalive:
         greenlet = TestRunGreenlet(self._keepalive_periodic)
         greenlet.start_later()
Пример #12
0
    def __init__(self, amqp_info):
        Greenlet.__init__(self)
        ExampleConsumer.__init__(self, amqp_info)
        self.callinfos = {}
        self.send_queue = Queue()
        self.lock = BoundedSemaphore(1)
        self.send_greenlet = None

        self.handle_stoping = False
        self.send_stop_evt = Event()

        self.timeout_stop_evt = Event()

        self.timeout_handle_greenlet = gevent.spawn(self.on_timeout_handle)
        self.timeout_handle_greenlet.start()
Пример #13
0
 def __init__(self, size, exc_classes=DEFAULT_EXC_CLASSES, keepalive=None):
     """
     :param exc_classes: tuple, exceptions which connection should be dropped when it raises
     """
     self.size = size
     self.connections = deque()
     self.lock = BoundedSemaphore(size)
     self.keepalive = keepalive
     # Exceptions list must be in tuple form to be caught properly
     self.exc_classes = tuple(exc_classes)
     for i in xrange(size):
         self.lock.acquire()
     for i in xrange(size):
         gevent.spawn_later(self.SPAWN_FREQUENCY * i, self._add_one)
     if self.keepalive:
         gevent.spawn(self._keepalive_periodic)
Пример #14
0
    def __init__(self,
                 account,
                 heartbeat=1,
                 refresh_frequency=30,
                 poll_frequency=30,
                 retry_fail_classes=[],
                 refresh_flags_max=2000):
        self.refresh_frequency = refresh_frequency
        self.poll_frequency = poll_frequency
        self.syncmanager_lock = BoundedSemaphore(1)
        self.refresh_flags_max = refresh_flags_max
        self.saved_remote_folders = None

        provider_supports_condstore = account.provider_info.get(
            'condstore', False)
        account_supports_condstore = getattr(account, 'supports_condstore',
                                             False)
        if provider_supports_condstore or account_supports_condstore:
            self.sync_engine_class = CondstoreFolderSyncEngine
        else:
            self.sync_engine_class = FolderSyncEngine

        self.folder_monitors = Group()

        BaseMailSyncMonitor.__init__(self, account, heartbeat,
                                     retry_fail_classes)
Пример #15
0
Файл: gmail.py Проект: wmv/inbox
    def sync(self):
        sync_folder_names_ids = self.prepare_sync()
        thread_download_lock = BoundedSemaphore(1)
        for folder_name, folder_id in sync_folder_names_ids:
            log.info('initializing folder sync')
            thread = GmailFolderSyncEngine(
                thread_download_lock, self.account_id, folder_name, folder_id,
                self.email_address, self.provider_name, self.poll_frequency,
                self.syncmanager_lock, self.refresh_flags_max,
                self.retry_fail_classes)
            thread.start()
            self.folder_monitors.add(thread)
            if thread.should_block:
                while not self._thread_polling(thread) and \
                        not self._thread_finished(thread) and \
                        not thread.ready():
                    sleep(self.heartbeat)

            # Allow individual folder sync monitors to shut themselves down
            # after completing the initial sync.
            if self._thread_finished(thread) or thread.ready():
                log.info('folder sync finished/killed',
                         folder_name=thread.folder_name)
                # NOTE: Greenlet is automatically removed from the group.

        self.folder_monitors.join()
Пример #16
0
 def _get_greenlet_lock(self):
     if not hasattr(self._thread_local, 'greenlet_lock'):
         greenlet_lock = self._thread_local.greenlet_lock = BoundedSemaphore(
             1)
     else:
         greenlet_lock = self._thread_local.greenlet_lock
     return greenlet_lock
Пример #17
0
    def __init__(
        self,
        syncback_id,
        process_number,
        total_processes,
        poll_interval=1,
        retry_interval=120,
        num_workers=NUM_PARALLEL_ACCOUNTS,
        batch_size=20,
        fetch_batch_size=100,
    ):
        self.process_number = process_number
        self.total_processes = total_processes
        self.poll_interval = poll_interval
        self.retry_interval = retry_interval

        # Amount of log entries to fetch before merging/de-duplication to
        # determine which records need to be processed.
        self.fetch_batch_size = fetch_batch_size

        # Amount of log entries to process in a batch.
        self.batch_size = batch_size

        self.keep_running = True
        self.workers = gevent.pool.Group()
        # Dictionary account_id -> semaphore to serialize action syncback for
        # any particular account.
        # TODO(emfree): We really only need to serialize actions that operate
        # on any given object. But IMAP actions are already effectively
        # serialized by using an IMAP connection pool of size 1, so it doesn't
        # matter too much.
        self.account_semaphores = defaultdict(lambda: BoundedSemaphore(1))
        # This SyncbackService performs syncback for only and all the accounts
        # on shards it is reponsible for; shards are divided up between
        # running SyncbackServices.
        self.log = logger.new(component="syncback")
        syncback_assignments = {
            int(k): v
            for k, v in config.get("SYNCBACK_ASSIGNMENTS", {}).items()
        }
        if syncback_id in syncback_assignments:
            self.keys = [
                key for key in engine_manager.engines
                if key in syncback_assignments[syncback_id] and key %
                total_processes == process_number
            ]
        else:
            self.log.warn("No shards assigned to syncback server",
                          syncback_id=syncback_id)
            self.keys = []

        self.log = logger.new(component="syncback")
        self.num_workers = num_workers
        self.num_idle_workers = 0
        self.worker_did_finish = gevent.event.Event()
        self.worker_did_finish.clear()
        self.task_queue = Queue()
        self.running_action_ids = set()
        gevent.Greenlet.__init__(self)
Пример #18
0
    def __init__(self, account, heartbeat=1, refresh_frequency=30):
        self.refresh_frequency = refresh_frequency
        self.syncmanager_lock = BoundedSemaphore(1)
        self.saved_remote_folders = None
        self.sync_engine_class = FolderSyncEngine

        self.folder_monitors = Group()

        BaseMailSyncMonitor.__init__(self, account, heartbeat)
Пример #19
0
 def __init__(self, redis_uve_server, logger):
     self._local_redis_uve = redis_uve_server
     self._redis_uve_list = []
     self._logger = logger
     self._sem = BoundedSemaphore(1)
     self._redis = None
     if self._local_redis_uve:
         self._redis = redis.StrictRedis(self._local_redis_uve[0],
                                         self._local_redis_uve[1], db=1)
Пример #20
0
 def __init__(self, size, exc_classes=DEFAULT_EXC_CLASSES, keepalive=None):
     self.size = size
     self.conn = deque()
     self.lock = BoundedSemaphore(size)
     self.keepalive = keepalive
     # Exceptions list must be in tuple form to be caught properly
     self.exc_classes = tuple(exc_classes)
     # http://stackoverflow.com/a/31136897/357578
     try:
         xrange
     except NameError:
         xrange = range
     for i in xrange(size):
         self.lock.acquire()
     for i in xrange(size):
         gevent.spawn_later(self.SPAWN_FREQUENCY * i, self._addOne)
     if self.keepalive:
         gevent.spawn(self._keepalive_periodic)
Пример #21
0
 def __init__(self, socket, client_class):
     self.socket = None
     self.mutex = BoundedSemaphore(1)
     self.socket = TSocket.TSocket(unix_socket=socket)
     self.transport = TTransport.TBufferedTransport(self.socket)
     self.protocol = TBinaryProtocol.TBinaryProtocolAccelerated(
         self.transport)
     self.client = client_class(self.protocol)
     self.connected = False
Пример #22
0
    def __init__(self,
                 discServer,
                 zk_srv_ip='127.0.0.1',
                 zk_srv_port='2181',
                 reset_config=False):
        self._reset_config = reset_config
        self._service_id_to_type = {}
        self._ds = discServer
        self._zk_sem = BoundedSemaphore(1)

        self._zk = kazoo.client.KazooClient(
            hosts='%s:%s' % (zk_srv_ip, zk_srv_port),
            timeout=120,
            handler=kazoo.handlers.gevent.SequentialGeventHandler())

        # connect
        while True:
            try:
                self._zk.start()
                break
            except gevent.event.Timeout as e:
                self.syslog(
                    'Failed to connect with Zookeeper - will retry in a second'
                )
                gevent.sleep(1)
        self.syslog('Connected to ZooKeeper!')

        if reset_config:
            self._zk.delete("/services", recursive=True)
            self._zk.delete("/clients", recursive=True)
            self._zk.delete("/publishers", recursive=True)
            self._zk.delete("/election", recursive=True)

        # create default paths
        self.create_node("/services")
        self.create_node("/clients")
        self.create_node("/publishers")
        self.create_node("/election")

        self._debug = {
            'subscription_expires': 0,
            'oos_delete': 0,
            'db_excepts': 0,
        }
Пример #23
0
 def __init__(self, poll_interval=1, chunk_size=100):
     semaphore_factory = lambda: BoundedSemaphore(CONCURRENCY_LIMIT)
     self.semaphore_map = defaultdict(semaphore_factory)
     self.keep_running = True
     self.running = False
     self.log = logger.new(component='syncback')
     self.poll_interval = poll_interval
     self.chunk_size = chunk_size
     self._scheduled_actions = set()
     gevent.Greenlet.__init__(self)
Пример #24
0
 def load(self, spider):
     redis_args = dict(host=self.settings.REDIS_URL,
                       port=self.settings.REDIS_PORT,
                       db=self.settings.REDIS_DB)
     if hasattr(self.settings, 'NAMESPACE'):
         redis_args['namespace'] = self.settings.NAMESPACE
     else:
         redis_args['namespace'] = spider.name
     self.url_set = redisds.Set('urlset', **redis_args)
     self.url_queue = redisds.Queue('urlqueue',
                                    serializer=Pickle(),
                                    **redis_args)
     self.runner = redisds.Lock("runner:%s" % uuid4().hex, **redis_args)
     self.runners = redisds.Dict("runner:*", **redis_args)
     self.stats = redisds.Dict("stats:*", **redis_args)
     self.lock = BoundedSemaphore(1)
     self.running_count = 0
     self.allowed_urls_regex = self.get_regex(spider.allowed_domains)
     self.spider = spider
     self.start()
Пример #25
0
    def __init__(self, account_id, num_connections, readonly):
        logger.info('Creating Crispin connection pool for account {} with {} '
                    'connections'.format(account_id, num_connections))
        self.account_id = account_id
        self.readonly = readonly
        self._new_conn_lock = BoundedSemaphore(1)
        self._set_account_info()

        # 1200s == 20min
        geventconnpool.ConnectionPool.__init__(
            self, num_connections, keepalive=1200,
            exc_classes=CONN_DISCARD_EXC_CLASSES)
Пример #26
0
 def __init__(self, size, exc_classes=DEFAULT_EXC_CLASSES, keepalive=None):
     self.size = size
     self.conn = deque()
     self.lock = BoundedSemaphore(size)
     self.keepalive = keepalive
     # Exceptions list must be in tuple form to be caught properly
     self.exc_classes = tuple(exc_classes)
     for i in xrange(size):
         self.lock.acquire()
     for i in xrange(size):
         gevent.spawn_later(self.SPAWN_FREQUENCY*i, self._addOne)
     if self.keepalive:
         gevent.spawn(self._keepalive_periodic)
Пример #27
0
 def __init__(self, poll_interval=1, retry_interval=30):
     self.log = logger.new(component='syncback')
     self.keep_running = True
     self.poll_interval = poll_interval
     self.retry_interval = retry_interval
     self.workers = gevent.pool.Group()
     # Dictionary account_id -> semaphore to serialize action syncback for
     # any particular account.
     # TODO(emfree): We really only need to serialize actions that operate
     # on any given object. But IMAP actions are already effectively
     # serialized by using an IMAP connection pool of size 1, so it doesn't
     # matter too much.
     self.account_semaphores = defaultdict(lambda: BoundedSemaphore(1))
     gevent.Greenlet.__init__(self)
Пример #28
0
class Cache(dict):

    semaphore = BoundedSemaphore()

    def __setattr__(self, key, value):
        with self.semaphore:
            self[key] = value

    def __delattr__(self, key):
        with self.semaphore:
            try:
                del self[key]
            except KeyError, k:
                raise AttributeError, k
Пример #29
0
    def __init__(self, args):

        self.thread = None
        self.socket = None
        self.cli_args = args
        self.force_debug = args['debug']

        # timers, counters and triggers
        self.pokemon_caught = 0
        self._error_counter = 0
        self._error_threshold = 10
        self.start_time = time()
        self.exp_start = None
        self._heartbeat_number = 1  # setting this back to one because we make parse a full heartbeat during login!
        self._heartbeat_frequency = 3  # 1 = always
        self._full_heartbeat_frequency = 15  # 10 = as before (every 10th heartbeat)
        self._farm_mode_triggered = False

        # objects, order is important!
        self.config = None
        self._load_config()

        self.log = create_logger(__name__, self.config.log_colors["poketrainer".upper()])

        self._open_socket()

        self.player = Player({})
        self.player_stats = PlayerStats({})
        self.inventory = Inventory(self, [])
        self.fort_walker = FortWalker(self)
        self.map_objects = MapObjects(self)
        self.poke_catcher = PokeCatcher(self)
        self.incubate = Incubate(self)
        self.evolve = Evolve(self)
        self.release = Release(self)
        self.sniper = Sniper(self)

        self._origPosF = (0, 0, 0)
        self.api = None
        self._load_api()

        # config values that might be changed during runtime
        self.step_size = self.config.step_size
        self.should_catch_pokemon = self.config.should_catch_pokemon

        # threading / locking
        self.sem = BoundedSemaphore(1)  # gevent
        self.persist_lock = False
        self.locker = None
Пример #30
0
    def __init__(self,
                 cluster,
                 nodes,
                 pool_size=10,
                 retries=10,
                 retry_back_off_multiplier=2,
                 retry_interval_sec=2):
        # type: (str, Dict[str, Tuple[str, int]], int, int, int, int) -> None
        """
        Initializes the client
        :param cluster: Identifier of the cluster
        :type cluster: str
        :param nodes: Dict with all node sockets. {name of the node: (ip of node, port of node)}
        :type nodes: dict
        :param pool_size: Number of clients to keep in the pool
        :type pool_size: int
        :param retries: Number of retries to do
        :type retries: int
        :param retry_back_off_multiplier: Back off multiplier. Multiplies the retry_interval_sec with this number ** retry
        :type retry_back_off_multiplier: int
        :param retry_interval_sec: Seconds to wait before retrying. Exponentially increases with every retry.
        :type retry_interval_sec: int
        """
        self.pool_size = pool_size
        self._pyrakoon_args = (cluster, nodes, retries,
                               retry_back_off_multiplier, retry_interval_sec)
        self._sequences = {}

        self._lock = BoundedSemaphore(pool_size)

        self._clients = deque()
        for i in xrange(pool_size):
            # No clients as of yet. Decrease the count
            self._lock.acquire()
        for i in xrange(pool_size):
            gevent.spawn_later(self.SPAWN_FREQUENCY * i, self._add_client)
Пример #31
0
 def __init__(self, redis_uve_server, logger, redis_password=None):
     self._local_redis_uve = redis_uve_server
     self._redis_uve_list = []
     self._logger = logger
     self._sem = BoundedSemaphore(1)
     self._redis = None
     self._redis_password = redis_password
     if self._local_redis_uve:
         self._redis = redis.StrictRedis(self._local_redis_uve[0],
                                         self._local_redis_uve[1],
                                         password=self._redis_password,
                                         db=1)
     self._uve_reverse_map = {}
     for h,m in UVE_MAP.iteritems():
         self._uve_reverse_map[m] = h
Пример #32
0
    def __init__(self, discServer, zk_srv_ip='127.0.0.1',
                 zk_srv_port='2181', reset_config=False):
        self._reset_config = reset_config
        self._service_id_to_type = {}
        self._ds = discServer
        self._zk_sem = BoundedSemaphore(1)
        self._election = None
        self._restarting = False

        zk_endpts = []
        for ip in zk_srv_ip.split(','):
            zk_endpts.append('%s:%s' %(ip, zk_srv_port))

        # logging
        logger = logging.getLogger('discovery-service')
        logger.setLevel(logging.WARNING)
        handler = logging.handlers.RotatingFileHandler('/var/log/contrail/discovery_zk.log', maxBytes=1024*1024, backupCount=10)
        log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
        handler.setFormatter(log_format)
        logger.addHandler(handler)

        self._zk = kazoo.client.KazooClient(
            hosts=','.join(zk_endpts),
            handler=kazoo.handlers.gevent.SequentialGeventHandler(),
            logger=logger)
        self._logger = logger

        # connect
        self.connect()

        if reset_config:
            self.delete_node("/services", recursive=True)
            self.delete_node("/clients", recursive=True)
            self.delete_node("/election", recursive=True)

        # create default paths
        self.create_node("/services")
        self.create_node("/clients")
        self.create_node("/election")

        self._debug = {
            'subscription_expires': 0,
            'oos_delete': 0,
            'db_excepts': 0,
        }
Пример #33
0
    def __init__(self, discServer, zk_srv_ip='127.0.0.1',
                 zk_srv_port='2181', reset_config=False):
        self._reset_config = reset_config
        self._service_id_to_type = {}
        self._ds = discServer
        self._zk_sem = BoundedSemaphore(1)

        zk_endpts = []
        for ip in zk_srv_ip.split(','):
            zk_endpts.append('%s:%s' %(ip, zk_srv_port))

        self._zk = kazoo.client.KazooClient(
            hosts=','.join(zk_endpts), timeout=120,
            handler=kazoo.handlers.gevent.SequentialGeventHandler())

        # connect
        while True:
            try:
                self._zk.start()
                break
            except gevent.event.Timeout as e:
                self.syslog(
                    'Failed to connect with Zookeeper -will retry in a second')
                gevent.sleep(1)
            # Zookeeper is also throwing exception due to delay in master election
            except Exception as e:
                self.syslog('%s -will retry in a second' % (str(e)))
                gevent.sleep(1)
        self.syslog('Connected to ZooKeeper!')

        if reset_config:
            self._zk.delete("/services", recursive=True)
            self._zk.delete("/clients", recursive=True)
            self._zk.delete("/election", recursive=True)

        # create default paths
        self.create_node("/services")
        self.create_node("/clients")
        self.create_node("/election")

        self._debug = {
            'subscription_expires': 0,
            'oos_delete': 0,
            'db_excepts': 0,
        }
Пример #34
0
    def process_parallely(self, year, last_page):
        """
            Process various pages of a particular year parallely

            @param year (Number): year
            @param last_page (Number): Last page number
        """

        threads = []
        params = dict(PARAMS)
        params["year"] = year
        for page in xrange(1, last_page):
            params["page"] = page
            url = "https://www.codechef.com/submissions?" + urlencode(params)
            threads.append(gevent.spawn(self.process_page, year, page, url))

        self.semaphore = BoundedSemaphore(len(threads))
        gevent.joinall(threads)
Пример #35
0
 def load(self, spider):
     redis_args = dict(host=self.settings.REDIS_URL,
                       port=self.settings.REDIS_PORT,
                       db=self.settings.REDIS_DB)
     if hasattr(self.settings, 'NAMESPACE'):
         redis_args['namespace'] = self.settings.NAMESPACE
     else:
         redis_args['namespace'] = spider.name
     self.url_set = redisds.Set('urlset', **redis_args)
     self.url_queue = redisds.Queue('urlqueue', serializer=Pickle(),
                                    **redis_args)
     self.runner = redisds.Lock("runner:%s" % uuid4().hex, **redis_args)
     self.runners = redisds.Dict("runner:*", **redis_args)
     self.stats = redisds.Dict("stats:*", **redis_args)
     self.lock = BoundedSemaphore(1)
     self.running_count = 0
     self.allowed_urls_regex = self.get_regex(spider.allowed_domains)
     self.spider = spider
     self.start()
Пример #36
0
    def __init__(self, input, output, func, nthreads=800, maxcnt=None):
        """@todo: to be defined

        :param input: @todo
        :param output: @todo
        :param func: @todo
        :param qname: @todo

        """
        self._func = func
        self._input = input
        self._output = output
        self._lock = BoundedSemaphore(1)
        self._pool = Pool(nthreads)
        self._nthreads = nthreads
        self._true = 0
        self._false = 0
        self._nogeo = 0
        self._notruth = 0
        self.maxcnt = maxcnt
Пример #37
0
    def __init__(self,
                 account,
                 heartbeat=1,
                 refresh_frequency=30,
                 syncback_frequency=5):
        # DEPRECATED.
        # TODO[k]: Remove after sync-syncback integration deploy is complete.
        self.refresh_frequency = refresh_frequency
        self.syncmanager_lock = BoundedSemaphore(1)
        self.saved_remote_folders = None
        self.sync_engine_class = FolderSyncEngine
        self.folder_monitors = Group()

        self.delete_handler = None

        self.syncback_handler = None
        self.folder_sync_signals = {}
        self.syncback_timestamp = None
        self.syncback_frequency = syncback_frequency

        BaseMailSyncMonitor.__init__(self, account, heartbeat)
Пример #38
0
 def __init__(self, local_ip, local_port, redis_sentinel_client, service):
     self._redis_sentinel_client = redis_sentinel_client
     self._local_ip = local_ip
     self._local_port = int(local_port)
     self._service = service
     self._uve_server_task = None
     self._redis = None
     self._redis_master_info = None
     self._master_last_updated = None
     self._num_mastership_changes = 0
     self._sem = BoundedSemaphore(1)
     if redis_sentinel_client is not None:
         self._redis_master_info = \
             redis_sentinel_client.get_redis_master(service)
         if self._redis_master_info is not None:
             self._num_mastership_changes += 1
             self._master_last_updated = UTCTimestampUsec()
             self._redis = redis.StrictRedis(self._redis_master_info[0],
                                             self._redis_master_info[1],
                                             db=0)
             self._uve_server_task = gevent.spawn(self.run)
Пример #39
0
    def __init__(self, discServer, zk_srv_ip='127.0.0.1',
                 zk_srv_port='2181', reset_config=False):
        self._reset_config = reset_config
        self._service_id_to_type = {}
        self._ds = discServer
        self._zk_sem = BoundedSemaphore(1)

        self._zk = kazoo.client.KazooClient(
            hosts='%s:%s' % (zk_srv_ip, zk_srv_port), timeout=120,
            handler=kazoo.handlers.gevent.SequentialGeventHandler())

        # connect
        while True:
            try:
                self._zk.start()
                break
            except gevent.event.Timeout as e:
                self.syslog(
                    'Failed to connect with Zookeeper -will retry in a second')
                gevent.sleep(1)
        self.syslog('Connected to ZooKeeper!')

        if reset_config:
            self._zk.delete("/services", recursive=True)
            self._zk.delete("/clients", recursive=True)
            self._zk.delete("/publishers", recursive=True)
            self._zk.delete("/election", recursive=True)

        # create default paths
        self.create_node("/services")
        self.create_node("/clients")
        self.create_node("/publishers")
        self.create_node("/election")

        self._debug = {
            'subscription_expires': 0,
            'oos_delete': 0,
            'db_excepts': 0,
        }
Пример #40
0
    def __init__(self, module, server_list):
        # logging
        logger = logging.getLogger(module)
        logger.setLevel(logging.INFO)
        try:
            handler = logging.handlers.RotatingFileHandler('/var/log/contrail/' + module + '-zk.log', maxBytes=10*1024*1024, backupCount=5)
        except IOError:
            print "Cannot open log file in /var/log/contrail/"
        else:
            log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
            handler.setFormatter(log_format)
            logger.addHandler(handler)

        self._zk_client = \
            kazoo.client.KazooClient(
                server_list,
                handler=kazoo.handlers.gevent.SequentialGeventHandler(),
                logger=logger)

        self._logger = logger
        self._election = None
        self._zk_sem = BoundedSemaphore(1)
        self.connect()
Пример #41
0
class UVEServer(object):

    def __init__(self, redis_uve_server, logger, redis_password=None):
        self._local_redis_uve = redis_uve_server
        self._redis_uve_list = []
        self._logger = logger
        self._sem = BoundedSemaphore(1)
        self._redis = None
        self._redis_password = redis_password
        if self._local_redis_uve:
            self._redis = redis.StrictRedis(self._local_redis_uve[0],
                                            self._local_redis_uve[1],
                                            password=self._redis_password,
                                            db=1)
        self._uve_reverse_map = {}
        for h,m in UVE_MAP.iteritems():
            self._uve_reverse_map[m] = h

    #end __init__

    def update_redis_uve_list(self, redis_uve_list):
        self._redis_uve_list = redis_uve_list
    # end update_redis_uve_list

    def fill_redis_uve_info(self, redis_uve_info):
        redis_uve_info.ip = self._local_redis_uve[0]
        redis_uve_info.port = self._local_redis_uve[1]
        try:
            self._redis.ping()
        except redis.exceptions.ConnectionError:
            redis_uve_info.status = 'DisConnected'
        else:
            redis_uve_info.status = 'Connected'
    #end fill_redis_uve_info

    @staticmethod
    def merge_previous(state, key, typ, attr, prevdict):
        print "%s New    val is %s" % (attr, prevdict)
        nstate = copy.deepcopy(state)
        if UVEServer._is_agg_item(prevdict):
            count = int(state[key][typ][attr]['previous']['#text'])
            count += int(prevdict['#text'])
            nstate[key][typ][attr]['previous']['#text'] = str(count)

        if UVEServer._is_agg_list(prevdict):
            sname = ParallelAggregator.get_list_name(
                state[key][typ][attr]['previous'])
            count = len(prevdict['list'][sname]) + \
                len(state[key][typ][attr]['previous']['list'][sname])
            nstate[key][typ][attr]['previous']['list'][sname].extend(
                prevdict['list'][sname])
            nstate[key][typ][attr]['previous']['list']['@size'] = \
                str(count)

            tstate = {}
            tstate[typ] = {}
            tstate[typ][attr] = copy.deepcopy(
                nstate[key][typ][attr]['previous'])
            nstate[key][typ][attr]['previous'] =\
                ParallelAggregator.consolidate_list(tstate, typ, attr)

        print "%s Merged val is %s"\
            % (attr, nstate[key][typ][attr]['previous'])
        return nstate

    def run(self):
        lck = False
        while True:
            try:
                k, value = self._redis.brpop("DELETED")
                self._sem.acquire()
                lck = True
                self._logger.debug("%s del received for " % value)
                # value is of the format: 
                # DEL:<key>:<src>:<node-type>:<module>:<instance-id>:<message-type>:<seqno>
                info = value.rsplit(":", 6)
                key = info[0].split(":", 1)[1]
                typ = info[5]

                existing = self._redis.hgetall("PREVIOUS:" + key + ":" + typ)
                tstate = {}
                tstate[key] = {}
                tstate[key][typ] = {}
                state = UVEServer.convert_previous(existing, tstate, key, typ)

                for attr, hval in self._redis.hgetall(value).iteritems():
                    snhdict = xmltodict.parse(hval)

                    if UVEServer._is_agg_list(snhdict[attr]):
                        if snhdict[attr]['list']['@size'] == "0":
                            continue
                        if snhdict[attr]['list']['@size'] == "1":
                            sname = ParallelAggregator.get_list_name(
                                snhdict[attr])
                            if not isinstance(
                                    snhdict[attr]['list'][sname], list):
                                snhdict[attr]['list'][sname] = \
                                    [snhdict[attr]['list'][sname]]

                    if (attr not in state[key][typ]):
                        # There is no existing entry for the UVE
                        vstr = json.dumps(snhdict[attr])
                    else:
                        # There is an existing entry
                        # Merge the new entry with the existing one
                        state = UVEServer.merge_previous(
                            state, key, typ, attr, snhdict[attr])
                        vstr = json.dumps(state[key][typ][attr]['previous'])

                        # Store the merged result back in the database
                    self._redis.sadd("PUVES:" + typ, key)
                    self._redis.sadd("PTYPES:" + key, typ)
                    self._redis.hset("PREVIOUS:" + key + ":" + typ, attr, vstr)

                self._redis.delete(value)
            except redis.exceptions.ResponseError:
                #send redis connection down msg. Coule be bcos of authentication
                ConnectionState.update(conn_type = ConnectionType.REDIS,
                    name = 'UVE', status = ConnectionStatus.DOWN,
                    message = 'UVE result : Connection Error',
                    server_addrs = ['%s:%d' % (self._local_redis_uve[0],
                    self._local_redis_uve[1])])
                sys.exit()
            except redis.exceptions.ConnectionError:
                if lck:
                    self._sem.release()
                    lck = False
                gevent.sleep(5)
            else:
                if lck:
                    self._sem.release()
                    lck = False
                self._logger.debug("Deleted %s" % value)
                self._logger.debug("UVE %s Type %s" % (key, typ))

    @staticmethod
    def _is_agg_item(attr):
        if attr['@type'] in ['i8', 'i16', 'i32', 'i64', 'byte',
                             'u8', 'u16', 'u32', 'u64']:
            if '@aggtype' in attr:
                if attr['@aggtype'] == "counter":
                    return True
        return False

    @staticmethod
    def _is_agg_list(attr):
        if attr['@type'] in ['list']:
            if '@aggtype' in attr:
                if attr['@aggtype'] == "append":
                    return True
        return False

    @staticmethod
    def convert_previous(existing, state, key, typ, afilter=None):
        # Take the existing delete record, and load it into the state dict
        for attr, hval in existing.iteritems():
            hdict = json.loads(hval)

            if afilter is not None and len(afilter):
                if attr not in afilter:
                    continue

            # When recording deleted attributes, only record those
            # for which delete-time aggregation is needed
            if UVEServer._is_agg_item(hdict):
                if (typ not in state[key]):
                    state[key][typ] = {}
                if (attr not in state[key][typ]):
                    state[key][typ][attr] = {}
                state[key][typ][attr]["previous"] = hdict

            # For lists that require delete-time aggregation, we need
            # to normailize lists of size 1, and ignore those of size 0
            if UVEServer._is_agg_list(hdict):
                if hdict['list']['@size'] != "0":
                    if (typ not in state[key]):
                        state[key][typ] = {}
                    if (attr not in state[key][typ]):
                        state[key][typ][attr] = {}
                    state[key][typ][attr]["previous"] = hdict
                if hdict['list']['@size'] == "1":
                    sname = ParallelAggregator.get_list_name(hdict)
                    if not isinstance(hdict['list'][sname], list):
                        hdict['list'][sname] = [hdict['list'][sname]]

        return state

    def get_part(self, part):
        uves = {}
        for redis_uve in self._redis_uve_list:
            gen_uves = {}
            redish = redis.StrictRedis(host=redis_uve[0],
                                       port=redis_uve[1], db=1)
            for elems in redish.smembers("PART2KEY:" + str(part)): 
                info = elems.split(":", 5)
                gen = info[0] + ":" + info[1] + ":" + info[2] + ":" + info[3]
                key = info[5]
                if not gen_uves.has_key(gen):
                     gen_uves[gen] = {}
                gen_uves[gen][key] = 0
            uves[redis_uve[0] + ":" + str(redis_uve[1])] = gen_uves
        return uves
        
    def get_uve(self, key, flat, filters=None, multi=False, is_alarm=False, base_url=None):
        filters = filters or {}
        sfilter = filters.get('sfilt')
        mfilter = filters.get('mfilt')
        tfilter = filters.get('cfilt')
        ackfilter = filters.get('ackfilt')
        state = {}
        state[key] = {}
        statdict = {}
        for redis_uve in self._redis_uve_list:
            redish = redis.StrictRedis(host=redis_uve[0],
                                       port=redis_uve[1],
                                       password=self._redis_password, db=1)
            try:
                qmap = {}
                origins = redish.smembers("ALARM_ORIGINS:" + key)
                if not is_alarm:
                    origins = origins.union(redish.smembers("ORIGINS:" + key))
                for origs in origins:
                    info = origs.rsplit(":", 1)
                    sm = info[0].split(":", 1)
                    source = sm[0]
                    if sfilter is not None:
                        if sfilter != source:
                            continue
                    mdule = sm[1]
                    if mfilter is not None:
                        if mfilter != mdule:
                            continue
                    dsource = source + ":" + mdule

                    typ = info[1]
                    if tfilter is not None:
                        if typ not in tfilter:
                            continue

                    odict = redish.hgetall("VALUES:" + key + ":" + origs)

                    afilter_list = set()
                    if tfilter is not None:
                        afilter_list = tfilter[typ]
                    for attr, value in odict.iteritems():
                        if len(afilter_list):
                            if attr not in afilter_list:
                                continue

                        if typ not in state[key]:
                            state[key][typ] = {}

                        if value[0] == '<':
                            snhdict = xmltodict.parse(value)
                            if snhdict[attr]['@type'] == 'list':
                                sname = ParallelAggregator.get_list_name(
                                        snhdict[attr])
                                if snhdict[attr]['list']['@size'] == '0':
                                    continue
                                elif snhdict[attr]['list']['@size'] == '1':
                                    if not isinstance(
                                        snhdict[attr]['list'][sname], list):
                                        snhdict[attr]['list'][sname] = [
                                            snhdict[attr]['list'][sname]]
                                if typ == 'UVEAlarms' and attr == 'alarms' and \
                                    ackfilter is not None:
                                    alarms = []
                                    for alarm in snhdict[attr]['list'][sname]:
                                        ack_attr = alarm.get('ack')
                                        if ack_attr:
                                            ack = ack_attr['#text']
                                        else:
                                            ack = 'false'
                                        if ack == ackfilter:
                                            alarms.append(alarm)
                                    if not len(alarms):
                                        continue
                                    snhdict[attr]['list'][sname] = alarms
                                    snhdict[attr]['list']['@size'] = \
                                        str(len(alarms))
                        else:
                            if not flat:
                                continue
                            if typ not in statdict:
                                statdict[typ] = {}
                            statdict[typ][attr] = []
                            statsattr = json.loads(value)
                            for elem in statsattr:
                                edict = {}
                                if elem["rtype"] == "list":
                                    elist = redish.lrange(elem["href"], 0, -1)
                                    for eelem in elist:
                                        jj = json.loads(eelem).items()
                                        edict[jj[0][0]] = jj[0][1]
                                elif elem["rtype"] == "zset":
                                    elist = redish.zrange(
                                        elem["href"], 0, -1, withscores=True)
                                    for eelem in elist:
                                        tdict = json.loads(eelem[0])
                                        tval = long(tdict["ts"])
                                        dt = datetime.datetime.utcfromtimestamp(
                                            float(tval) / 1000000)
                                        tms = (tval % 1000000) / 1000
                                        tstr = dt.strftime('%Y %b %d %H:%M:%S')
                                        edict[tstr + "." + str(tms)] = eelem[1]
                                elif elem["rtype"] == "hash":
                                    elist = redish.hgetall(elem["href"])
                                    edict = elist

                                statdict[typ][attr].append(
                                    {elem["aggtype"]: edict})
                            continue

                        # print "Attr %s Value %s" % (attr, snhdict)
                        if attr not in state[key][typ]:
                            state[key][typ][attr] = {}
                        if dsource in state[key][typ][attr]:
                            print "Found Dup %s:%s:%s:%s:%s = %s" % \
                                (key, typ, attr, source, mdule, state[
                                key][typ][attr][dsource])
                        state[key][typ][attr][dsource] = snhdict[attr]

                if sfilter is None and mfilter is None:
                    for ptyp in redish.smembers("PTYPES:" + key):
                        afilter = None
                        if tfilter is not None:
                            if ptyp not in tfilter:
                                continue
                            afilter = tfilter[ptyp]
                        existing = redish.hgetall("PREVIOUS:" + key + ":" + ptyp)
                        nstate = UVEServer.convert_previous(
                            existing, state, key, ptyp, afilter)
                        state = copy.deepcopy(nstate)

                pa = ParallelAggregator(state, self._uve_reverse_map)
                rsp = pa.aggregate(key, flat, base_url)
            except redis.exceptions.ConnectionError:
                self._logger.error("Failed to connect to redis-uve: %s:%d" \
                                   % (redis_uve[0], redis_uve[1]))
            except Exception as e:
                self._logger.error("Exception: %s" % e)
                return {}
            else:
                self._logger.debug("Computed %s" % key)

        for k, v in statdict.iteritems():               
            if k in rsp:                
                mp = dict(v.items() + rsp[k].items())           
                statdict[k] = mp

        return dict(rsp.items() + statdict.items())
    # end get_uve

    def get_uve_regex(self, key):
        regex = ''
        if key[0] != '*':
            regex += '^'
        regex += key.replace('*', '.*?')
        if key[-1] != '*':
            regex += '$'
        return re.compile(regex)
    # end get_uve_regex

    def multi_uve_get(self, table, flat, filters=None, is_alarm=False, base_url=None):
        # get_uve_list cannot handle attribute names very efficiently,
        # so we don't pass them here
        uve_list = self.get_uve_list(table, filters, False, is_alarm)
        for uve_name in uve_list:
            uve_val = self.get_uve(
                table + ':' + uve_name, flat, filters, True, is_alarm, base_url)
            if uve_val == {}:
                continue
            else:
                uve = {'name': uve_name, 'value': uve_val}
                yield uve
    # end multi_uve_get

    def get_uve_list(self, table, filters=None, parse_afilter=False,
                     is_alarm=False):
        filters = filters or {}
        uve_list = set()
        kfilter = filters.get('kfilt')
        if kfilter is not None:
            patterns = set()
            for filt in kfilter:
                patterns.add(self.get_uve_regex(filt))
        for redis_uve in self._redis_uve_list:
            redish = redis.StrictRedis(host=redis_uve[0],
                                       port=redis_uve[1],
                                       password=self._redis_password, db=1)
            try:
                # For UVE queries, we wanna read both UVE and Alarm table
                entries = redish.smembers('ALARM_TABLE:' + table)
                if not is_alarm:
                    entries = entries.union(redish.smembers('TABLE:' + table))
                for entry in entries:
                    info = (entry.split(':', 1)[1]).rsplit(':', 5)
                    uve_key = info[0]
                    if kfilter is not None:
                        kfilter_match = False
                        for pattern in patterns:
                            if pattern.match(uve_key):
                                kfilter_match = True
                                break
                        if not kfilter_match:
                            continue
                    src = info[1]
                    sfilter = filters.get('sfilt')
                    if sfilter is not None:
                        if sfilter != src:
                            continue
                    module = info[2]+':'+info[3]+':'+info[4]
                    mfilter = filters.get('mfilt')
                    if mfilter is not None:
                        if mfilter != module:
                            continue
                    typ = info[5]
                    tfilter = filters.get('cfilt')
                    if tfilter is not None:
                        if typ not in tfilter:
                            continue
                    if parse_afilter:
                        if tfilter is not None and len(tfilter[typ]):
                            valkey = "VALUES:" + table + ":" + uve_key + \
                                ":" + src + ":" + module + ":" + typ
                            for afilter in tfilter[typ]:
                                attrval = redish.hget(valkey, afilter)
                                if attrval is not None:
                                    break
                            if attrval is None:
                                continue
                    uve_list.add(uve_key)
            except redis.exceptions.ConnectionError:
                self._logger.error('Failed to connect to redis-uve: %s:%d' \
                                   % (redis_uve[0], redis_uve[1]))
            except Exception as e:
                self._logger.error('Exception: %s' % e)
                return set()
        return uve_list
Пример #42
0
class UVEServer(object):

    def __init__(self, local_ip, local_port, redis_sentinel_client, service):
        self._redis_sentinel_client = redis_sentinel_client
        self._local_ip = local_ip
        self._local_port = int(local_port)
        self._service = service
        self._uve_server_task = None
        self._redis = None
        self._redis_master_info = None
        self._master_last_updated = None
        self._num_mastership_changes = 0
        self._sem = BoundedSemaphore(1)
        if redis_sentinel_client is not None:
            self._redis_master_info = \
                redis_sentinel_client.get_redis_master(service)
            if self._redis_master_info is not None:
                self._num_mastership_changes += 1
                self._master_last_updated = UTCTimestampUsec()
                self._redis = redis.StrictRedis(self._redis_master_info[0],
                                                self._redis_master_info[1],
                                                db=0)
                self._uve_server_task = gevent.spawn(self.run)
    #end __init__

    def set_redis_master(self, redis_master):
        if self._redis_master_info != redis_master:
            try:
                self._sem.acquire()
                if self._redis_master_info is not None:
                    gevent.kill(self._uve_server_task)
                self._redis_master_info = redis_master
                self._num_mastership_changes += 1
                self._master_last_updated = UTCTimestampUsec()
                self._redis = redis.StrictRedis(self._redis_master_info[0],
                                                self._redis_master_info[1],
                                                db=0)
                self._uve_server_task = gevent.spawn(self.run)
            except Exception as e:
                print "Failed to set_redis_master: %s" % e
                raise
            finally:
                self._sem.release()

    #end set_redis_master

    def reset_redis_master(self):
        if self._redis_master_info is not None:
            try:
                self._sem.acquire()
                self._redis_master_info = None
                gevent.kill(self._uve_server_task)
                self._redis = None
            except Exception as e:
                print "Failed to reset_redis_master: %s" % e
                raise
            finally:
                self._sem.release()
    #end reset_redis_master

    def fill_redis_uve_master_info(self, uve_master_info):
        if self._redis_master_info is not None:
            uve_master_info.ip = self._redis_master_info[0]
            uve_master_info.port = int(self._redis_master_info[1])
            try:
                self._redis.ping()
            except redis.exceptions.ConnectionError:
                uve_master_info.status = 'DisConnected'
            else:
                uve_master_info.status = 'Connected'
        uve_master_info.master_last_updated = self._master_last_updated
        uve_master_info.num_of_mastership_changes = self._num_mastership_changes
    #end fill_redis_uve_master_info

    @staticmethod
    def merge_previous(state, key, typ, attr, prevdict):
        print "%s New    val is %s" % (attr, prevdict)
        nstate = copy.deepcopy(state)
        if UVEServer._is_agg_item(prevdict):
            count = int(state[key][typ][attr]['previous']['#text'])
            count += int(prevdict['#text'])
            nstate[key][typ][attr]['previous']['#text'] = str(count)

        if UVEServer._is_agg_list(prevdict):
            sname = ParallelAggregator.get_list_name(
                state[key][typ][attr]['previous'])
            count = len(prevdict['list'][sname]) + \
                len(state[key][typ][attr]['previous']['list'][sname])
            nstate[key][typ][attr]['previous']['list'][sname].extend(
                prevdict['list'][sname])
            nstate[key][typ][attr]['previous']['list']['@size'] = \
                str(count)

            tstate = {}
            tstate[typ] = {}
            tstate[typ][attr] = copy.deepcopy(
                nstate[key][typ][attr]['previous'])
            nstate[key][typ][attr]['previous'] =\
                ParallelAggregator.consolidate_list(tstate, typ, attr)

        print "%s Merged val is %s"\
            % (attr, nstate[key][typ][attr]['previous'])
        return nstate

    def run(self):
        lck = False
        while True:
            try:
                k, value = self._redis.brpop("DELETED")
                self._sem.acquire()
                lck = True
                print "%s del received for " % value
                info = value.rsplit(":", 4)
                key = info[0].split(":", 1)[1]
                typ = info[3]

                existing = self._redis.hgetall("PREVIOUS:" + key + ":" + typ)
                tstate = {}
                tstate[key] = {}
                tstate[key][typ] = {}
                state = UVEServer.convert_previous(existing, tstate, key, typ)

                for attr, hval in self._redis.hgetall(value).iteritems():
                    snhdict = xmltodict.parse(hval)

                    if UVEServer._is_agg_list(snhdict[attr]):
                        if snhdict[attr]['list']['@size'] == "0":
                            continue
                        if snhdict[attr]['list']['@size'] == "1":
                            sname = ParallelAggregator.get_list_name(
                                snhdict[attr])
                            if not isinstance(
                                    snhdict[attr]['list'][sname], list):
                                snhdict[attr]['list'][sname] = \
                                    [snhdict[attr]['list'][sname]]

                    if (attr not in state[key][typ]):
                        # There is no existing entry for the UVE
                        vstr = json.dumps(snhdict[attr])
                    else:
                        # There is an existing entry
                        # Merge the new entry with the existing one
                        state = UVEServer.merge_previous(
                            state, key, typ, attr, snhdict[attr])
                        vstr = json.dumps(state[key][typ][attr]['previous'])

                        # Store the merged result back in the database
                    self._redis.sadd("PUVES:" + typ, key)
                    self._redis.sadd("PTYPES:" + key, typ)
                    self._redis.hset("PREVIOUS:" + key + ":" + typ, attr, vstr)

                self._redis.delete(value)
            except redis.exceptions.ConnectionError:
                if lck:
                    self._sem.release()
                    lck = False
                gevent.sleep(5)
            else:
                if lck:
                    self._sem.release()
                    lck = False
                print "Deleted %s" % value
                print "UVE %s Type %s" % (key, typ)

    @staticmethod
    def _is_agg_item(attr):
        if attr['@type'] in ['i8', 'i16', 'i32', 'i64', 'byte',
                             'u8', 'u16', 'u32', 'u64']:
            if '@aggtype' in attr:
                if attr['@aggtype'] == "counter":
                    return True
        return False

    @staticmethod
    def _is_agg_list(attr):
        if attr['@type'] in ['list']:
            if '@aggtype' in attr:
                if attr['@aggtype'] == "append":
                    return True
        return False

    @staticmethod
    def convert_previous(existing, state, key, typ, afilter=None):
        # Take the existing delete record, and load it into the state dict
        for attr, hval in existing.iteritems():
            hdict = json.loads(hval)

            if afilter is not None and len(afilter):
                if attr not in afilter:
                    continue

            # When recording deleted attributes, only record those
            # for which delete-time aggregation is needed
            if UVEServer._is_agg_item(hdict):
                if (typ not in state[key]):
                    state[key][typ] = {}
                if (attr not in state[key][typ]):
                    state[key][typ][attr] = {}
                state[key][typ][attr]["previous"] = hdict

            # For lists that require delete-time aggregation, we need
            # to normailize lists of size 1, and ignore those of size 0
            if UVEServer._is_agg_list(hdict):
                if hdict['list']['@size'] != "0":
                    if (typ not in state[key]):
                        state[key][typ] = {}
                    if (attr not in state[key][typ]):
                        state[key][typ][attr] = {}
                    state[key][typ][attr]["previous"] = hdict
                if hdict['list']['@size'] == "1":
                    sname = ParallelAggregator.get_list_name(hdict)
                    if not isinstance(hdict['list'][sname], list):
                        hdict['list'][sname] = [hdict['list'][sname]]

        return state

    def get_uve(self, key, flat, sfilter=None, mfilter=None, tfilter=None):
        try:
            state = {}
            state[key] = {}

            redish = redis.StrictRedis(host=self._local_ip,
                                       port=self._local_port, db=0)
            statdict = {}
            for origs in redish.smembers("ORIGINS:" + key):
                info = origs.rsplit(":", 2)
                source = info[0]
                if sfilter is not None:
                    if sfilter != source:
                        continue
                mdule = info[1]
                if mfilter is not None:
                    if mfilter != mdule:
                        continue
                dsource = source + ":" + mdule

                typ = info[2]
                if tfilter is not None:
                    if typ not in tfilter:
                        continue

                odict = redish.hgetall("VALUES:" + key + ":" + origs)

                empty = True
                afilter_list = set()
                if tfilter is not None:
                    afilter_list = tfilter[typ]
                for attr, value in odict.iteritems():
                    if len(afilter_list):
                        if attr not in afilter_list:
                            continue

                    if empty:
                        empty = False
                        # print "Src %s, Mod %s, Typ %s" % (source, mdule, typ)
                        if typ not in state[key]:
                            state[key][typ] = {}

                    if value[0] == '<':
                        snhdict = xmltodict.parse(value)
                        if snhdict[attr]['@type'] == 'list':
                            if snhdict[attr]['list']['@size'] == '0':
                                continue
                            elif snhdict[attr]['list']['@size'] == '1':
                                sname = ParallelAggregator.get_list_name(
                                    snhdict[attr])
                                if not isinstance(
                                        snhdict[attr]['list'][sname], list):
                                    snhdict[attr]['list'][sname] = [
                                        snhdict[attr]['list'][sname]]
                    else:
                        if not flat:
                            continue

                        if typ not in statdict:
                            statdict[typ] = {}
                        statdict[typ][attr] = []
                        statsattr = json.loads(value)
                        for elem in statsattr:
                            #import pdb; pdb.set_trace()
                            edict = {}
                            if elem["rtype"] == "list":
                                elist = redish.lrange(elem["href"], 0, -1)
                                for eelem in elist:
                                    jj = json.loads(eelem).items()
                                    edict[jj[0][0]] = jj[0][1]
                            elif elem["rtype"] == "zset":
                                elist = redish.zrange(
                                    elem["href"], 0, -1, withscores=True)
                                for eelem in elist:
                                    tdict = json.loads(eelem[0])
                                    tval = long(tdict["ts"])
                                    dt = datetime.datetime.utcfromtimestamp(
                                        float(tval) / 1000000)
                                    tms = (tval % 1000000) / 1000
                                    tstr = dt.strftime('%Y %b %d %H:%M:%S')
                                    edict[tstr + "." + str(tms)] = eelem[1]
                            elif elem["rtype"] == "hash":
                                elist = redish.hgetall(elem["href"])
                                edict = elist
                            statdict[typ][attr].append(
                                {elem["aggtype"]: edict})
                        continue

                    # print "Attr %s Value %s" % (attr, snhdict)
                    if attr not in state[key][typ]:
                        state[key][typ][attr] = {}
                    if dsource in state[key][typ][attr]:
                        print "Found Dup %s:%s:%s:%s:%s = %s" % \
                            (key, typ, attr, source, mdule, state[
                             key][typ][attr][dsource])
                    state[key][typ][attr][dsource] = snhdict[attr]

            if sfilter is None and mfilter is None:
                for ptyp in redish.smembers("PTYPES:" + key):
                    afilter = None
                    if tfilter is not None:
                        if ptyp not in tfilter:
                            continue
                        afilter = tfilter[ptyp]
                    existing = redish.hgetall("PREVIOUS:" + key + ":" + ptyp)
                    nstate = UVEServer.convert_previous(
                        existing, state, key, ptyp, afilter)
                    state = copy.deepcopy(nstate)

            # print
            # print "Result is as follows"
            # print json.dumps(state, indent = 4, sort_keys = True)
            pa = ParallelAggregator(state)
            rsp = pa.aggregate(key, flat)
        except Exception as e:
            print e
            return {}
        else:
            print "Computed %s" % key

        for k, v in statdict.iteritems():
            if k in rsp:
                mp = dict(v.items() + rsp[k].items())
                statdict[k] = mp

        return dict(rsp.items() + statdict.items())
    # end get_uve

    def get_uve_regex(self, key):
        regex = ''
        if key[0] != '*':
            regex += '^'
        regex += key.replace('*', '.*?')
        if key[-1] != '*':
            regex += '$'
        return re.compile(regex)
    # end get_uve_regex

    def multi_uve_get(self, key, flat, kfilter, sfilter, mfilter, tfilter):
        tbl_uve = key.split(':', 1)
        table = tbl_uve[0]

        # get_uve_list cannot handle attribute names very efficiently,
        # so we don't pass them here
        k1_filter = [tbl_uve[1]]
        uve_list = self.get_uve_list(table, k1_filter, sfilter,
                                     mfilter, tfilter, False)
        if kfilter is not None:
            patterns = set()
            for filt in kfilter:
                patterns.add(self.get_uve_regex(filt))
        for uve_name in uve_list:
            if kfilter is not None:
                kfilter_match = False
                for pattern in patterns:
                    if pattern.match(uve_name):
                        kfilter_match = True
                        break
                if not kfilter_match:
                    continue
            uve_val = self.get_uve(
                table + ':' + uve_name, flat,
                sfilter, mfilter, tfilter)
            if uve_val == {}:
                continue
            else:
                uve = {'name': uve_name, 'value': uve_val}
                yield uve
    # end multi_uve_get

    def get_uve_list(self, key, kfilter, sfilter,
                     mfilter, tfilter, parse_afilter):
        uve_list = set()
        if kfilter is not None:
            patterns = set()
            for filt in kfilter:
                patterns.add(self.get_uve_regex(filt))
        try:
            redish = redis.StrictRedis(host=self._local_ip,
                                       port=self._local_port, db=0)
            for entry in redish.smembers("TABLE:" + key):
                info = (entry.split(':', 1)[1]).rsplit(':', 3)
                uve_key = info[0]
                if kfilter is not None:
                    kfilter_match = False
                    for pattern in patterns:
                        if pattern.match(uve_key):
                            kfilter_match = True
                            break
                    if not kfilter_match:
                        continue
                src = info[1]
                if sfilter is not None:
                    if sfilter != src:
                        continue
                mdule = info[2]
                if mfilter is not None:
                    if mfilter != mdule:
                        continue
                typ = info[3]
                if tfilter is not None:
                    if typ not in tfilter:
                        continue
                if parse_afilter:
                    if tfilter is not None and len(tfilter[typ]):
                        valkey = "VALUES:" + key + ":" + uve_key + ":" + \
                                 src + ":" + mdule + ":" + typ
                        for afilter in tfilter[typ]:
                            attrval = redish.hget(valkey, afilter)
                            if attrval is not None:
                                break
                        if attrval is None:
                            continue

                uve_list.add(uve_key)
        except Exception as e:
            print e
            return set()
        else:
            return uve_list
Пример #43
0
class DiscoveryZkClient(object):

    def __init__(self, discServer, zk_srv_ip='127.0.0.1',
                 zk_srv_port='2181', reset_config=False):
        self._reset_config = reset_config
        self._service_id_to_type = {}
        self._ds = discServer
        self._zk_sem = BoundedSemaphore(1)

        self._zk = kazoo.client.KazooClient(
            hosts='%s:%s' % (zk_srv_ip, zk_srv_port), timeout=120,
            handler=kazoo.handlers.gevent.SequentialGeventHandler())

        # connect
        while True:
            try:
                self._zk.start()
                break
            except gevent.event.Timeout as e:
                self.syslog(
                    'Failed to connect with Zookeeper -will retry in a second')
                gevent.sleep(1)
        self.syslog('Connected to ZooKeeper!')

        if reset_config:
            self._zk.delete("/services", recursive=True)
            self._zk.delete("/clients", recursive=True)
            self._zk.delete("/election", recursive=True)

        # create default paths
        self.create_node("/services")
        self.create_node("/clients")
        self.create_node("/election")

        self._debug = {
            'subscription_expires': 0,
            'oos_delete': 0,
            'db_excepts': 0,
        }
    # end __init__

    def start_background_tasks(self):
        # spawn loop to expire subscriptions
        gevent.Greenlet.spawn(self.inuse_loop)

        # spawn loop to expire services
        gevent.Greenlet.spawn(self.service_oos_loop)
    # end

    def syslog(self, log_msg):
        self._ds.syslog(log_msg)
    # end

    def get_debug_stats(self):
        return self._debug
    # end

    def create_node(self, path, value='', makepath=False):
        try:
            self._zk.set(path, value)
        except kazoo.exceptions.NoNodeException:
            self._zk.create(path, value, makepath=makepath)
            self.syslog('create %s' % (path))
    # end create_node

    def service_entries(self):
        service_types = self._zk.get_children('/services')
        for service_type in service_types:
            services = self._zk.get_children('/services/%s' % (service_type))
            for service_id in services:
                data, stat = self._zk.get(
                    '/services/%s/%s' % (service_type, service_id))
                entry = json.loads(data)
                yield(entry)

    def subscriber_entries(self):
        service_types = self._zk.get_children('/clients')
        for service_type in service_types:
            subscribers = self._zk.get_children('/clients/%s' % (service_type))
            for client_id in subscribers:
                data, stat = self._zk.get(
                    '/clients/%s/%s' % (service_type, client_id))
                cl_entry = json.loads(data)
                yield((client_id, service_type))
    # end

    def update_service(self, service_type, service_id, data):
        path = '/services/%s/%s' % (service_type, service_id)
        self.create_node(path, value=json.dumps(data), makepath=True)
    # end

    def insert_service(self, service_type, service_id, data):

        # ensure election path for service type exists
        path = '/election/%s' % (service_type)
        self.create_node(path)

        # preclude duplicate service entry
        sid_set = set()

        # prevent background task from deleting node under our nose
        self._zk_sem.acquire()
        seq_list = self._zk.get_children(path)
        for sequence in seq_list:
            sid, stat = self._zk.get(
                '/election/%s/%s' % (service_type, sequence))
            sid_set.add(sid)
        self._zk_sem.release()
        if not service_id in sid_set:
            path = '/election/%s/node-' % (service_type)
            pp = self._zk.create(
                path, service_id, makepath=True, sequence=True)
            pat = path + "(?P<id>.*$)"
            mch = re.match(pat, pp)
            seq = mch.group('id')
            data['sequence'] = seq
            self.syslog('ST %s, SID %s not found! Added with sequence %s' %
                        (service_type, service_id, seq))

        self.update_service(service_type, service_id, data)
    # end insert_service

    def delete_service(self, service_type, service_id):
        if self.lookup_subscribers(service_type, service_id):
            return

        path = '/services/%s/%s' %(service_type, service_id)
        self._zk.delete(path)

        # purge in-memory cache - ideally we are not supposed to know about this
        self._ds.delete_pub_data(service_id)

        # delete service node if all services gone
        path = '/services/%s' %(service_type)
        if self._zk.get_children(path):
            return
        self._zk.delete(path)
     #end delete_service

    def lookup_service(self, service_type, service_id=None):
        if not self._zk.exists('/services/%s' % (service_type)):
            return None
        if service_id:
            try:
                data, stat = self._zk.get(
                    '/services/%s/%s' % (service_type, service_id))
                return json.loads(data)
            except kazoo.exceptions.NoNodeException:
                return None
        else:
            r = []
            services = self._zk.get_children('/services/%s' % (service_type))
            for service_id in services:
                entry = self.lookup_service(service_type, service_id)
                r.append(entry)
            return r
    # end lookup_service

    def query_service(self, service_type):
        path = '/election/%s' % (service_type)
        if not self._zk.exists(path):
            return None
        seq_list = self._zk.get_children(path)
        seq_list = sorted(seq_list)

        r = []
        for sequence in seq_list:
            service_id, stat = self._zk.get(
                '/election/%s/%s' % (service_type, sequence))
            entry = self.lookup_service(service_type, service_id)
            r.append(entry)
        return r
    # end

    # TODO use include_data available in new versions of kazoo
    # tree structure /services/<service-type>/<service-id>
    def get_all_services(self):
        r = []
        service_types = self._zk.get_children('/services')
        for service_type in service_types:
            services = self.lookup_service(service_type)
            r.extend(services)
        return r
    # end

    def insert_client(self, service_type, service_id, client_id, blob, ttl):
        data = {'ttl': ttl, 'blob': blob}

        path = '/services/%s/%s/%s' % (service_type, service_id, client_id)
        self.create_node(path, value=json.dumps(data))

        path = '/clients/%s/%s/%s' % (service_type, client_id, service_id)
        self.create_node(path, value=json.dumps(data), makepath=True)
    # end insert_client

    def lookup_subscribers(self, service_type, service_id):
        path = '/services/%s/%s' % (service_type, service_id)
        if not self._zk.exists(path):
            return None
        clients = self._zk.get_children(path)
        return clients
    # end lookup_subscribers

    def lookup_client(self, service_type, client_id):
        try:
            datastr, stat = self._zk.get(
                '/clients/%s/%s' % (service_type, client_id))
            data = json.loads(datastr)
            return data
        except kazoo.exceptions.NoNodeException:
            return None
    # end lookup_client

    def insert_client_data(self, service_type, client_id, cldata):
        path = '/clients/%s/%s' % (service_type, client_id)
        self.create_node(path, value=json.dumps(cldata), makepath=True)
    # end insert_client_data

    def lookup_subscription(self, service_type, client_id=None,
                            service_id=None, include_meta=False):
        if not self._zk.exists('/clients/%s' % (service_type)):
            return None
        if client_id and service_id:
            try:
                datastr, stat = self._zk.get(
                    '/clients/%s/%s/%s'
                    % (service_type, client_id, service_id))
                data = json.loads(datastr)
                blob = data['blob']
                if include_meta:
                    return (blob, stat, data['ttl'])
                else:
                    return blob
            except kazoo.exceptions.NoNodeException:
                return None
        elif client_id:
            # our version of Kazoo doesn't support include_data :-(
            try:
                services = self._zk.get_children(
                    '/clients/%s/%s' % (service_type, client_id))
                r = []
                for service_id in services:
                    datastr, stat = self._zk.get(
                        '/clients/%s/%s/%s'
                        % (service_type, client_id, service_id))
                    data = json.loads(datastr)
                    blob = data['blob']
                    r.append((service_id, blob, stat))
                # sort services in the order of assignment to this client
                # (based on modification time)
                rr = sorted(r, key=lambda entry: entry[2].last_modified)
                return [(service_id, blob) for service_id, blob, stat in rr]
            except kazoo.exceptions.NoNodeException:
                return None
        else:
            clients = self._zk.get_children('/clients/%s' % (service_type))
            return clients
    # end lookup_subscription

    # delete client subscription. Cleanup path if possible
    def delete_subscription(self, service_type, client_id, service_id):
        path = '/clients/%s/%s/%s' % (service_type, client_id, service_id)
        self._zk.delete(path)

        # delete client node if all subscriptions gone
        path = '/clients/%s/%s' % (service_type, client_id)
        if self._zk.get_children(path):
            return
        self._zk.delete(path)

        # purge in-memory cache - ideally we are not supposed to know about
        # this
        self._ds.delete_sub_data(client_id, service_type)

        # delete service node if all clients gone
        path = '/clients/%s' % (service_type)
        if self._zk.get_children(path):
            return
        self._zk.delete(path)

        path = '/services/%s/%s/%s' % (service_type, service_id, client_id)
        self._zk.delete(path)
    # end

    # TODO use include_data available in new versions of kazoo
    # tree structure /clients/<service-type>/<client-id>/<service-id>
    # return tuple (service_type, client_id, service_id)
    def get_all_clients(self):
        r = []
        service_types = self._zk.get_children('/clients')
        for service_type in service_types:
            clients = self._zk.get_children('/clients/%s' % (service_type))
            for client_id in clients:
                services = self._zk.get_children(
                    '/clients/%s/%s' % (service_type, client_id))
                rr = []
                for service_id in services:
                    (datastr, stat, ttl) = self.lookup_subscription(
                        service_type, client_id, service_id, include_meta=True)
                    rr.append(
                        (service_type, client_id, service_id,
                         stat.last_modified, ttl))
                rr = sorted(rr, key=lambda entry: entry[3])
                r.extend(rr)
        return r
    # end get_all_clients

    # reset in-use count of clients for each service
    def inuse_loop(self):
        while True:
            service_types = self._zk.get_children('/clients')
            for service_type in service_types:
                clients = self._zk.get_children('/clients/%s' % (service_type))
                for client_id in clients:
                    services = self._zk.get_children(
                        '/clients/%s/%s' % (service_type, client_id))
                    for service_id in services:
                        path = '/clients/%s/%s/%s' % (
                            service_type, client_id, service_id)
                        datastr, stat = self._zk.get(path)
                        data = json.loads(datastr)
                        now = time.time()
                        exp_t = stat.last_modified + data['ttl'] +\
                            disc_consts.TTL_EXPIRY_DELTA
                        if now > exp_t:
                            self.delete_subscription(
                                service_type, client_id, service_id)
                            svc_info = self.lookup_service(
                                service_type, service_id)
                            self.syslog(
                                'Expiring st:%s sid:%s cid:%s inuse:%d blob:%s'
                                % (service_type, service_id, client_id,
                                   svc_info['in_use'], data['blob']))
                            svc_info['in_use'] -= 1
                            self.update_service(
                                service_type, service_id, svc_info)
                            self._debug['subscription_expires'] += 1
            gevent.sleep(10)

    def service_oos_loop(self):
        while True:
            for entry in self.service_entries():
                if not self._ds.service_expired(entry, include_down=False):
                    continue
                service_type = entry['service_type']
                service_id   = entry['service_id']
                path = '/election/%s/node-%s' % (
                    service_type, entry['sequence'])
                if not self._zk.exists(path):
                    continue
                self.syslog('Deleting sequence node %s for service %s:%s' %
                        (path, service_type, service_id))
                self._zk_sem.acquire()
                self._zk.delete(path)
                self._zk_sem.release()
                self._debug['oos_delete'] += 1
            gevent.sleep(self._ds._args.hc_interval)
class DiscoveryServer():

    def __init__(self, args_str = None):
        self._homepage_links = []
        self._args = None
        self._debug = {
            'hb_stray':0, 
            'msg_pubs':0, 
            'msg_subs':0, 
            'msg_query':0, 
            'heartbeats':0, 
            'ttl_short':0,
            'policy_rr':0,
            'policy_lb':0,
            'policy_fi':0,
        }
        self._ts_use = 1
        self.short_ttl_map = {}
        self._sem = BoundedSemaphore(1)
        if not args_str:
            args_str = ' '.join(sys.argv[1:])
        self._parse_args(args_str)

        self._base_url = "http://%s:%s" %(self._args.listen_ip_addr,
                                          self._args.listen_port)
        self._pipe_start_app = None

        bottle.route('/', 'GET', self.homepage_http_get)

        # publish service
        bottle.route('/publish', 'POST', self.api_publish)
        self._homepage_links.append(LinkObject('action',
            self._base_url + '/publish', 'publish service'))

        # subscribe service
        bottle.route('/subscribe',  'POST', self.api_subscribe)
        self._homepage_links.append(LinkObject('action',
            self._base_url + '/subscribe', 'subscribe service'))

        # query service
        bottle.route('/query',  'POST', self.api_query)
        self._homepage_links.append(LinkObject('action',
            self._base_url + '/query', 'query service'))

        # collection - services
        bottle.route('/services', 'GET', self.show_all_services)
        self._homepage_links.append(LinkObject('action',
            self._base_url + '/services', 'show published services'))
        bottle.route('/services.json', 'GET', self.services_json)
        self._homepage_links.append(LinkObject('action',
            self._base_url + '/services.json', 'List published services in JSON format'))
        # show a specific service type
        bottle.route('/services/<service_type>', 'GET', self.show_all_services)

        # update service
        bottle.route('/service/<id>', 'PUT', self.service_http_put)

        # get service info
        bottle.route('/service/<id>', 'GET',  self.service_http_get)
        bottle.route('/service/<id>/brief', 'GET', self.service_brief_http_get)

        # delete (un-publish) service
        bottle.route('/service/<id>', 'DELETE', self.service_http_delete)

        # collection - clients
        bottle.route('/clients', 'GET', self.show_all_clients)
        self._homepage_links.append(LinkObject('action',
            self._base_url + '/clients', 'list all subscribers'))
        bottle.route('/clients.json', 'GET', self.clients_json)
        self._homepage_links.append(LinkObject('action',
            self._base_url + '/clients.json', 'list all subscribers in JSON format'))

        # show config
        bottle.route('/config', 'GET', self.config_http_get)
        self._homepage_links.append(LinkObject('action',
            self._base_url + '/config', 'show discovery service config'))

        # show debug
        bottle.route('/stats', 'GET', self.show_stats)
        self._homepage_links.append(LinkObject('action',
            self._base_url + '/stats', 'show discovery service stats'))

        if not self._pipe_start_app:
            self._pipe_start_app = bottle.app()

        # sandesh init
        collectors = None
        if self._args.collector and self._args.collector_port:
            collectors = [(self._args.collector, int(self._args.collector_port))]
        self._sandesh = Sandesh()
        self._sandesh.init_generator(ModuleNames[Module.DISCOVERY_SERVICE], 
                socket.gethostname(), collectors, 'discovery_context', 
                int(self._args.http_server_port), ['sandesh', 'uve'])
        self._sandesh.set_logging_params(enable_local_log = self._args.log_local,
                                         category = self._args.log_category,
                                         level = self._args.log_level,
                                         file = self._args.log_file)
        self._sandesh.trace_buffer_create(name = "dsHeartBeatTraceBuf", size = 1000)

        # DB interface initialization
        self._db_connect(self._args.reset_config)

        # build in-memory publisher data
        self._pub_data = {}
        for entry in self._db_conn.service_entries():
            self.create_pub_data(entry['service_id'], entry['service_type'])

        # build in-memory subscriber data
        self._sub_data = {}
        for (client_id, service_type) in self._db_conn.subscriber_entries():
            self.create_sub_data(client_id, service_type)

        # must be done after we have built in-memory publisher data from db.
        self._db_conn.start_background_tasks()
    #end __init__

    def create_pub_data(self, service_id, service_type):
        self._pub_data[service_id] = {
            'service_type': service_type,
            'hbcount'     : 0,
            'heartbeat'   : int(time.time()),
        }

    def create_sub_data(self, client_id, service_type):
        if not client_id in self._sub_data:
            self._sub_data[client_id] = {}
        if not service_type in self._sub_data[client_id]:
            sdata = {
                'ttl_expires' : 0,
                'heartbeat'   : int(time.time()),
            }
            self._sub_data[client_id][service_type] = sdata
        return self._sub_data[client_id][service_type]
    #end

    def delete_sub_data(self, client_id, service_type):
        if client_id in self._sub_data and service_type in self._sub_data[client_id]:
            del self._sub_data[client_id][service_type]
            if len(self._sub_data[client_id]) == 0:
                del self._sub_data[client_id]
    #end

    def get_pub_data(self, id):
        return self._pub_data.get(id, None)
    #end 

    def get_sub_data(self, id, service_type):
        if id in self._sub_data:
            return self._sub_data[id].get(service_type, None)
        return None
    #end 

    # Public Methods
    def get_args(self):
        return self._args
    #end get_args

    def get_ip_addr(self):
        return self._args.listen_ip_addr
    #end get_ip_addr

    def get_port(self):
        return self._args.listen_port
    #end get_port

    def get_pipe_start_app(self):
        return self._pipe_start_app
    #end get_pipe_start_app

    def homepage_http_get(self):
        json_links = []
        for link in self._homepage_links:
            json_links.append({'link': link.to_dict()})

        json_body = \
            { "href": self._base_url,
              "links": json_links
            }

        return json_body
    #end homepage_http_get

    # Private Methods
    def _parse_args(self, args_str):
        '''
        Eg. python discovery.py 

                     --zk_server_ip 10.1.2.3
                     --zk_server_port 9160
                     --listen_ip_addr 127.0.0.1
                     --listen_port 5998
        '''

        # Source any specified config/ini file
        # Turn off help, so we print all options in response to -h
        conf_parser = argparse.ArgumentParser(add_help = False)

        conf_parser.add_argument("-c", "--conf_file",
                                 help="Specify config file", metavar="FILE")
        args, remaining_argv = conf_parser.parse_known_args(args_str.split())

        defaults = {
            'reset_config'     : False,
            'listen_ip_addr'   : disc_consts._WEB_HOST,
            'listen_port'      : disc_consts._WEB_PORT,
            'zk_server_ip'     : disc_consts._ZK_HOST,
            'zk_server_port'   : disc_consts._ZK_PORT,
            'ttl_min'          : disc_consts._TTL_MIN,
            'ttl_max'          : disc_consts._TTL_MAX,
            'ttl_short'        : 0,
            'hc_interval'      : disc_consts.HC_INTERVAL,
            'hc_max_miss'      : disc_consts.HC_MAX_MISS,
            'collector'        : '127.0.0.1',
            'collector_port'   : '8086',
            'http_server_port' : '5997',
            'log_local'        : False,
            'log_level'        : SandeshLevel.SYS_DEBUG,
            'log_category'     : '',
            'log_file'         : Sandesh._DEFAULT_LOG_FILE
            }

        # per service options
        self.default_service_opts = {
            'policy': None,
        }
        self.service_config = {}

        if args.conf_file:
            config = ConfigParser.SafeConfigParser()
            config.read([args.conf_file])
            defaults.update(dict(config.items("DEFAULTS")))
            for section in config.sections():
                if section == "DEFAULTS":
                    continue
                self.service_config[section.lower()] = self.default_service_opts.copy()
                self.service_config[section.lower()].update(dict(config.items(section)))

        # Override with CLI options
        # Don't surpress add_help here so it will handle -h
        parser = argparse.ArgumentParser(
            # Inherit options from config_parser
            parents=[conf_parser],
            # print script description with -h/--help
            description=__doc__,
            # Don't mess with format of description
            formatter_class=argparse.RawDescriptionHelpFormatter,
            )
        parser.set_defaults(**defaults)

        parser.add_argument("--zk_server_ip",
                            help = "IP address of zk server")
        parser.add_argument("--zk_server_port", type=int,
                            help = "Port of zk server")
        parser.add_argument("--reset_config", action = "store_true",
                            help = "Warning! Destroy previous configuration and start clean")
        parser.add_argument("--listen_ip_addr",
                            help = "IP address to provide service on, default %s" %(disc_consts._WEB_HOST))
        parser.add_argument("--listen_port", type=int,
                            help = "Port to provide service on, default %s" %(disc_consts._WEB_PORT))
        parser.add_argument("--ttl_min", type=int,
                            help = "Minimum time to cache service information, default %d" %(disc_consts._TTL_MIN))
        parser.add_argument("--ttl_max", type=int,
                            help = "Maximum time to cache service information, default %d" %(disc_consts._TTL_MAX))
        parser.add_argument("--ttl_short", type=int,
                            help = "Short TTL for agressively subscription schedule")
        parser.add_argument("--hc_interval", type=int,
                            help = "Heartbeat interval, default %d seconds" %(disc_consts.HC_INTERVAL))
        parser.add_argument("--hc_max_miss", type=int,
                            help = "Maximum heartbeats to miss before declaring out-of-service, default %d" %(disc_consts.HC_MAX_MISS))
        parser.add_argument("--collector",
                            help = "IP address of VNC collector server")
        parser.add_argument("--collector_port",
                            help = "Port of VNC collector server")
        parser.add_argument("--http_server_port",
                            help = "Port of local HTTP server")
        parser.add_argument("--log_local", action = "store_true",
                            help = "Enable local logging of sandesh messages")
        parser.add_argument("--log_level",
                            help = "Severity level for local logging of sandesh messages")
        parser.add_argument("--log_category",
                            help = "Category filter for local logging of sandesh messages")
        parser.add_argument("--log_file",
                            help = "Filename for the logs to be written to")
        self._args = parser.parse_args(remaining_argv)
        self._args.conf_file = args.conf_file

    #end _parse_args

    def get_service_config(self, service_type, item):
        service = service_type.lower()
        if service in self.service_config and item in self.service_config[service]:
            return self.service_config[service][item]
        elif item in self._args.__dict__:    
            return self._args.__dict__[item]
        else:
            return None
    #end

    def _db_connect(self, reset_config):
        zk_ip = self._args.zk_server_ip
        zk_port = self._args.zk_server_port

        self._db_conn = DiscoveryZkClient(self, zk_ip, zk_port, reset_config)
    #end _db_connect

    def cleanup(self):
        pass
    #end cleanup

    def syslog(self, log_msg):
        log = sandesh.discServiceLog(log_msg = log_msg, sandesh = self._sandesh)
        log.send(sandesh = self._sandesh)

    def get_ttl_short(self, client_id, service_type, default):
        ttl = default
        if not client_id in self.short_ttl_map:
            self.short_ttl_map[client_id] = {}
        if service_type in self.short_ttl_map[client_id]:
            # keep doubling till we land in normal range
            ttl = self.short_ttl_map[client_id][service_type] * 2
            if ttl >= 32:
                ttl = 32

        self.short_ttl_map[client_id][service_type] = ttl
        return ttl
    #end

    # check if service expired (return color along)
    def service_expired(self, entry, include_color = False, include_down = True):
        pdata = self.get_pub_data(entry['service_id'])
        timedelta = datetime.timedelta(seconds = (int(time.time()) - pdata['heartbeat']))
        if timedelta.seconds <= self._args.hc_interval:
            color = "#00FF00"   # green - all good
            expired = False
        elif timedelta.seconds > self._args.hc_interval*self._args.hc_max_miss:
            color = "#FF0000"   # red - publication expired
            expired = True
        else:
            color = "#FFFF00"   # yellow - missed some heartbeats
            expired = False

        if include_down and entry['admin_state'] != 'up':
            color = "#FF0000"   # red - publication expired
            expired = True

        if include_color:
            return (expired, color, timedelta)
        else:
            return expired
    #end service_expired

    def heartbeat(self, sig):
        self._debug['heartbeats'] += 1
        pdata = self.get_pub_data(sig)
        if not pdata:
            self.syslog('Received stray hearbeat with cookie %s' %(sig))
            self._debug['hb_stray'] += 1
            # resource not found
            return '404 Not Found'

        pdata['hbcount'] += 1
        pdata['heartbeat'] = int(time.time())

        m = sandesh.dsHeartBeat(publisher_id=sig, service_type=pdata['service_type'], sandesh=self._sandesh)
        m.trace_msg(name='dsHeartBeatTraceBuf', sandesh=self._sandesh)
        return '200 OK'
        #print 'heartbeat service "%s", sid=%s' %(entry['service_type'], sig)
    #end heartbeat

    def handle_heartbeat(self):
        sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        sock.bind((self.get_ip_addr(), self.get_port()))
        while True:
            data, addr = sock.recvfrom(1024)
            """
            print ''
            print 'addr = ', addr
            print 'data = ', data
            """
            data = xmltodict.parse(data)
            status = self.heartbeat(data['cookie'])

            # send status back to publisher
            sock.sendto(status, addr)
    #end start

    def api_publish(self):
        self._debug['msg_pubs'] += 1
        ctype = bottle.request.headers['content-type']
        json_req = {}
        if ctype == 'application/json':
            data = bottle.request.json
            for service_type, info in data.items():
                json_req['name'] = service_type
                json_req['info'] = info
        elif ctype == 'application/xml':
            data = xmltodict.parse(bottle.request.body.read())
            for service_type, info in data.items():
                json_req['name'] = service_type
                json_req['info'] = dict(info)
        else:
            bottle.abort(400, e)

        sig = publisher_id(bottle.request.environ['REMOTE_ADDR'], json.dumps(json_req))

        # Rx {'name': u'ifmap-server', 'info': {u'ip_addr': u'10.84.7.1', u'port': u'8443'}}
        info = json_req['info']
        service_type = json_req['name']

        entry = self._db_conn.lookup_service(service_type, service_id = sig)
        if not entry:
            entry = {
                'service_type': service_type,
                'service_id':sig, 
                'in_use'    :0, 
                'ts_use'    :0, 
                'ts_created': int(time.time()),
                'prov_state':'new', 
                'remote'    : bottle.request.environ.get('REMOTE_ADDR'),
                'info'      :info,
            }
            self.create_pub_data(sig, service_type)

        entry['admin_state'] = 'up'
        self._db_conn.insert_service(service_type, sig, entry)

        response = {'cookie': sig}
        if ctype != 'application/json':
            response = xmltodict.unparse({'response':response})

        self.syslog('publish service "%s", sid=%s, info=%s' \
            %(service_type, sig, info))

        if not service_type.lower() in self.service_config:
            self.service_config[service_type.lower()] = self.default_service_opts

        return response
    #end api_publish

    # find least loaded service instances - sort by subscriber count
    def service_list_round_robin(self, pubs):
        self._debug['policy_rr'] += 1
        return sorted(pubs, key=lambda service: service['in_use'])
    #end

    # most recently used on top of round robin - MRU first
    def service_list_load_balance(self, pubs):
        self._debug['policy_lb'] += 1
        temp = sorted(pubs, key=lambda service: service['ts_use'], reverse=True)
        return sorted(temp, key=lambda service: service['in_use'])
    #end

    # master election
    def service_list_fixed(self, pubs):
        self._debug['policy_fi'] += 1
        return sorted(pubs, key=lambda service: service['sequence'])
    #end

    def service_list(self, service_type, pubs):
        policy = self.get_service_config(service_type, 'policy')

        if policy == 'load-balance':
            f = self.service_list_load_balance
        elif policy == 'fixed':
            f = self.service_list_fixed
        else:
            f = self.service_list_round_robin

        return f(pubs)
    #end


    def api_subscribe(self):
        self._debug['msg_subs'] += 1
        ctype = bottle.request.headers['content-type']
        if ctype == 'application/json':
            json_req = bottle.request.json
        elif ctype == 'application/xml':
            data = xmltodict.parse(bottle.request.body.read())
            json_req = {}
            for service_type, info in data.items():
                json_req['service'] = service_type
                json_req.update(dict(info))
        else:
            bottle.abort(400, e)

        service_type = json_req['service']
        client_id = json_req['client']
        count = reqcnt = int(json_req['instances'])
        client_type = json_req.get('client-type', '')

        assigned_sid = set()
        r = []
        ttl = randint(self._args.ttl_min, self._args.ttl_max)

        cl_entry = self._db_conn.lookup_client(service_type, client_id)
        if not cl_entry:
            cl_entry = {
                'instances': count,
                'remote': bottle.request.environ.get('REMOTE_ADDR'),
                'client_type': client_type,
            }
            self.create_sub_data(client_id, service_type)
            self._db_conn.insert_client_data(service_type, client_id, cl_entry)
            self.syslog('subscribe: service type=%s, client=%s:%s, ttl=%d, asked=%d' \
                %(service_type, client_type, client_id, ttl, count))

        sdata = self.get_sub_data(client_id, service_type)
        sdata['ttl_expires'] += 1

        # check existing subscriptions 
        subs = self._db_conn.lookup_subscription(service_type, client_id)
        if subs:
            for service_id, result in subs:
                entry = self._db_conn.lookup_service(service_type, service_id = service_id)
                if self.service_expired(entry):
                    #self.syslog('skipping expired service %s, info %s' %(service_id, entry['info']))
                    continue
                self._db_conn.insert_client(service_type, service_id, client_id, result, ttl)
                #self.syslog(' refresh subscrition for service %s' %(service_id))
                r.append(result)
                assigned_sid.add(service_id)
                count -= 1
                if count == 0:
                    response = {'ttl': ttl, service_type: r}
                    if ctype == 'application/xml':
                        response = xmltodict.unparse({'response':response})
                    return response

        # acquire lock to update use count and TS
        self._sem.acquire()

        # lookup publishers of the service
        pubs = self._db_conn.lookup_service(service_type)
        if not pubs:
            # force client to come back soon if service expectation is not met
            if  len(r) < reqcnt:
                ttl_short = self.get_service_config(service_type, 'ttl_short')
                if ttl_short:
                    ttl = self.get_ttl_short(client_id, service_type, ttl_short)
                    self._debug['ttl_short'] += 1
                    #self.syslog(' sending short ttl %d to %s' %(ttl, client_id))

            response = {'ttl': ttl, service_type: r}
            if ctype == 'application/xml':
                response = xmltodict.unparse({'response':response})
            self._sem.release()
            return response

        # eliminate inactive services
        pubs_active = [item for item in pubs if not self.service_expired(item)]
        #self.syslog(' Found %s publishers, %d active, need %d' %(len(pubs), len(pubs_active), count))

        # find least loaded instances
        pubs = self.service_list(service_type, pubs_active)

        # prepare response - send all if count 0
        for index in range(min(count, len(pubs)) if count else len(pubs)):
            entry = pubs[index]

            # skip duplicates - could happen if some publishers have quit and
            # we have already picked up others from cached information above
            if entry['service_id'] in assigned_sid:
                continue
            assigned_sid.add(entry['service_id'])

            result = entry['info']
            r.append(result)

            self.syslog(' assign service=%s, info=%s' %(entry['service_id'], json.dumps(result)))

            # don't update pubsub data if we are sending entire list
            if count == 0:
                continue

            # create client entry
            self._db_conn.insert_client(service_type, entry['service_id'], client_id, result, ttl)

            # update publisher entry 
            entry['in_use'] += 1
            entry['ts_use'] = self._ts_use; self._ts_use += 1
            self._db_conn.update_service(service_type, entry['service_id'], entry)

        self._sem.release()

        # force client to come back soon if service expectation is not met
        if  len(r) < reqcnt:
            ttl_short = self.get_service_config(service_type, 'ttl_short')
            if ttl_short:
                ttl = self.get_ttl_short(client_id, service_type, ttl_short)
                self._debug['ttl_short'] += 1
                #self.syslog(' sending short ttl %d to %s' %(ttl, client_id))

        response = {'ttl': ttl, service_type: r}
        if ctype == 'application/xml':
            response = xmltodict.unparse({'response':response})
        return response
    #end api_subscribe

    def api_query(self):
        self._debug['msg_query'] += 1
        ctype = bottle.request.headers['content-type']
        if ctype == 'application/json':
            json_req = bottle.request.json
        elif ctype == 'application/xml':
            data = xmltodict.parse(bottle.request.body.read())
            json_req = {}
            for service_type, info in data.items():
                json_req['service'] = service_type
                json_req.update(dict(info))
        else:
            bottle.abort(400, e)

        service_type = json_req['service']
        count = int(json_req['instances'])

        r = []

        # lookup publishers of the service
        pubs = self._db_conn.query_service(service_type)
        if not pubs:
            return {service_type: r}

        # eliminate inactive services
        pubs_active = [item for item in pubs if not self.service_expired(item)]
        self.syslog(' query: Found %s publishers, %d active, need %d' %(len(pubs), len(pubs_active), count))

        # find least loaded instances
        pubs = pubs_active

        # prepare response - send all if count 0
        for index in range(min(count, len(pubs)) if count else len(pubs)):
            entry = pubs[index]

            result = entry['info']
            r.append(result)

            self.syslog(' assign service=%s, info=%s' %(entry['service_id'], json.dumps(result)))

            # don't update pubsub data if we are sending entire list
            if count == 0:
                continue

        response = {service_type: r}
        if ctype == 'application/xml':
            response = xmltodict.unparse({'response':response})
        return response
    #end api_subscribe

    def show_all_services(self, service_type = None):

        rsp = output.display_user_menu()
        rsp += ' <table border="1" cellpadding="1" cellspacing="0">\n'
        rsp += '    <tr>\n'
        rsp += '        <td>Service Type</td>\n'
        rsp += '        <td>Remote IP</td>\n'
        rsp += '        <td>Service Id</td>\n'
        rsp += '        <td>Provision State</td>\n'
        rsp += '        <td>Admin State</td>\n'
        rsp += '        <td>In Use</td>\n'
        rsp += '        <td>Heartbeats</td>\n'
        rsp += '        <td>Time since last Heartbeat</td>\n'
        rsp += '    </tr>\n'

        # lookup publishers of the service
        if service_type:
            pubs = self._db_conn.lookup_service(service_type)
        else:
            pubs = self._db_conn.get_all_services()

        if not pubs:
            return rsp

        for pub in pubs:
            info = pub['info']
            pdata = self.get_pub_data(pub['service_id'])
            rsp += '    <tr>\n'
            if service_type:
                rsp += '        <td>' + pub['service_type'] + '</td>\n'
            else:
                link = do_html_url("/services/%s"%(pub['service_type']), pub['service_type'])
                rsp += '        <td>' + link + '</td>\n'
            rsp += '        <td>' + pub['remote'] + '</td>\n'
            link = do_html_url("/service/%s/brief"%(pub['service_id']), pub['service_id'])
            rsp += '        <td>' + link + '</td>\n'
            rsp += '        <td>' + pub['prov_state'] + '</td>\n'
            rsp += '        <td>' + pub['admin_state'] + '</td>\n'
            rsp += '        <td>' + str(pub['in_use']) + '</td>\n'
            rsp += '        <td>' + str(pdata['hbcount']) + '</td>\n'
            (expired, color, timedelta) = self.service_expired(pub, include_color = True)
            #status = "down" if expired else "up"
            rsp += '        <td bgcolor=%s>' %(color) + str(timedelta) + '</td>\n'
            rsp += '    </tr>\n'
        rsp += ' </table>\n'

        return rsp
    #end show_services

    def services_json(self, service_type = None):
        rsp = []

        # lookup publishers of the service
        if service_type:
            pubs = self._db_conn.lookup_service(service_type)
        else:
            pubs = self._db_conn.get_all_services()

        if not pubs:
            return {'services': rsp}

        for pub in pubs:
            entry = pub.copy()
            pdata = self.get_pub_data(pub['service_id'])
            entry['hbcount'] = pdata['hbcount']
            entry['status'] = "down" if self.service_expired(entry) else "up"
            entry['heartbeat'] = pdata['heartbeat']
            rsp.append(entry)
        return {'services': rsp}
    #end services_json

    def service_http_put(self, id):
        self.syslog('Update service %s' %(id))
        try:
            json_req = bottle.request.json
            service_type = json_req['service_type']
            self.syslog('Entry %s' %(json_req))
        except (ValueError, KeyError, TypeError) as e:
            bottle.abort(400, e)

        entry = self._db_conn.lookup_service(service_type, service_id = id)
        if not entry: 
            bottle.abort(405, 'Unknown service')

        if 'admin_state' in json_req:
            entry['admin_state'] = json_req['admin_state']
        self._db_conn.update_service(service_type, id, entry)

        self.syslog('update service=%s, sid=%s, info=%s' \
            %(service_type, id, entry))

        return {}
    #end service_http_put

    def service_http_delete(self, id):
        self.syslog('Delete service %s' %(id))
        pdata = self.get_pub_data(id)
        entry = self._db_conn.lookup_service(pdata['service_type'], id)
        if not entry: 
            bottle.abort(405, 'Unknown service')
        service_type = entry['service_type']

        entry['admin_state'] = 'down'
        self._db_conn.update_service(service_type, id, entry)

        self.syslog('delete service=%s, sid=%s, info=%s' \
            %(service_type, id, entry))

        return {}
    #end service_http_put

    # return service info - meta as well as published data
    def service_http_get(self, id):
        entry = {}
        pdata = self.get_pub_data(id)
        pub = self._db_conn.lookup_service(pdata['service_type'], id)
        if pub:
            entry = pub.copy()
            entry['hbcount'] = pdata['hbcount']
            entry['status'] = "down" if self.service_expired(entry) else "up"
            entry['heartbeat'] = pdata['heartbeat']

        return entry
    #end service_http_get

    # return service info - only published data
    def service_brief_http_get(self, id):
        pdata = self.get_pub_data(id)
        entry = self._db_conn.lookup_service(pdata['service_type'], id)
        return entry['info']
    #end service_http_get

    def show_all_clients(self):

        rsp = output.display_user_menu()
        rsp += ' <table border="1" cellpadding="1" cellspacing="0">\n'
        rsp += '    <tr>\n'
        rsp += '        <td>Client IP</td>\n'
        rsp += '        <td>Client Type</td>\n'
        rsp += '        <td>Client Id</td>\n'
        rsp += '        <td>Service Type</td>\n'
        rsp += '        <td>Service Id</td>\n'
        rsp += '        <td>TTL (sec)</td>\n'
        rsp += '        <td>TTL Refreshes</td>\n'
        rsp += '        <td>Last refreshed</td>\n'
        rsp += '    </tr>\n'

        # lookup subscribers of the service
        clients = self._db_conn.get_all_clients()

        if not clients:
            return rsp

        for client in clients:
            (service_type, client_id, service_id, mtime, ttl) = client
            cl_entry = self._db_conn.lookup_client(service_type, client_id)
            if cl_entry is None:
                continue
            sdata = self.get_sub_data(client_id, service_type)
            if sdata is None:
                self.syslog('Missing sdata for client %s, service %s' %(client_id, service_type))
                continue
            rsp += '    <tr>\n'
            rsp += '        <td>' + cl_entry['remote'] + '</td>\n'
            client_type = cl_entry.get('client_type', '')
            rsp += '        <td>' + client_type  + '</td>\n'
            rsp += '        <td>' + client_id    + '</td>\n'
            rsp += '        <td>' + service_type + '</td>\n'
            link = do_html_url("service/%s/brief"%(service_id), service_id)
            rsp += '        <td>' + link   + '</td>\n'
            rsp += '        <td>' + str(ttl) + '</td>\n'
            rsp += '        <td>' + str(sdata['ttl_expires']) + '</td>\n'
            rsp += '        <td>' + time.ctime(mtime) + '</td>\n'
            rsp += '    </tr>\n'
        rsp += ' </table>\n'

        return rsp
    #end show_clients

    def clients_json(self):

        rsp = []
        clients = self._db_conn.get_all_clients()

        if not clients:
            return {'services': rsp}

        for client in clients:
            (service_type, client_id, service_id, mtime, ttl) = client
            cl_entry = self._db_conn.lookup_client(service_type, client_id)
            sdata = self.get_sub_data(client_id, service_type)
            entry = cl_entry.copy()
            entry.update(sdata)

            entry['client_id'] = client_id
            entry['service_type'] = service_type
            entry['service_id'] = service_id
            entry['ttl'] = ttl
            rsp.append(entry)

        return {'services': rsp}
    #end show_clients

    def config_http_get(self):
        """
        r = {}
        r['global'] = self._args.__dict__
        for service, config in self.service_config.items():
            r[service] = config
        return r
        """

        rsp = output.display_user_menu()

        #rsp += '<h4>Defaults:</h4>'
        rsp += '<table border="1" cellpadding="1" cellspacing="0">\n'
        rsp += '<tr><th colspan="2">Defaults</th></tr>'
        for k, v in self._args.__dict__.items():
            rsp += '<tr><td>%s</td><td>%s</td></tr>' %(k, v)
        rsp += '</table>'
        rsp += '<br>'

        for service, config in self.service_config.items():
            #rsp += '<h4>%s:</h4>' %(service)
            rsp += '<table border="1" cellpadding="1" cellspacing="0">\n'
            rsp += '<tr><th colspan="2">%s</th></tr>' %(service)
            for k, v in config.items():
                rsp += '<tr><td>%s</td><td>%s</td></tr>' %(k, v)
            rsp += '</table>'
            rsp += '<br>'
        return rsp
    #end config_http_get

    def show_stats(self):
        stats = self._debug
        stats.update(self._db_conn.get_debug_stats())

        rsp = output.display_user_menu()
        rsp += ' <table border="1" cellpadding="1" cellspacing="0">\n'
        rsp += '    <tr>\n'
        rsp += '        <td>Publishers</td>\n'
        rsp += '        <td>%s</td>\n' % len(self._pub_data)
        rsp += '    </tr>\n'
        rsp += '    <tr>\n'
        rsp += '        <td>Subscribers</td>\n'
        rsp += '        <td>%s</td>\n' % len(self._sub_data)
        rsp += '    </tr>\n'
        for k, v in stats.items():
            rsp += '    <tr>\n'
            rsp += '        <td>%s</td>\n' %(k)
            rsp += '        <td>%s</td>\n' %(v)
            rsp += '    </tr>\n'
        return rsp
Пример #45
0
class QueueManager(object):
    
    def __init__(self, clear_interval=DEFAULT_CLEAR_INTERVAL):
        self.__updates = {}
        self.__updates_lock = BoundedSemaphore()
        # Start clearing daemon thread.
        spawn(self._daemon_clear, interval=clear_interval)

    def _load(self, queue_id):
        """Load and return queue update tracker for queue_id."""
        self.__updates_lock.acquire()
        # Hit.
        if queue_id in self.__updates:
            self.__updates_lock.release()
            return self.__updates[queue_id].fresh()
        # Miss.
        self.__updates[queue_id] = QueueUpdate(queue_id)
        self.__updates_lock.release()
        return self.__updates[queue_id]

    def _clear(self):
        """Clear the in-memory update tracking dictionary"""
        self.__updates_lock.acquire()
        print 'Clearing'
        # Make sure anyone currently waiting reloads.
        for queue_id in self.__updates:
            self.__updates[queue_id].event.set()
            self.__updates[queue_id].event.clear()
        self.__updates = {}
        print 'Clear'
        self.__updates_lock.release()

    def _daemon_clear(self, interval):
        """Clear the update tracking dictionary every interval seconds."""
        while True:
            sleep(interval)
            self._clear()

    def edit(self, user, queue_id, room_idlist, draw):
        # Put together the list of Room objects.
        rooms = []
        print 'edit', room_idlist
        for roomid in room_idlist:
            room = Room.objects.get(pk=roomid)
            if (not room) or not draw in room.building.draw.all():
                return {'error':'bad room/draw'}
            rooms.append(room)
        
        update = self._load(queue_id)
        # Clear out the old list.
        queue = update.queue
        queue.queuetoroom_set.all().delete()
        # Put in new relationships
        for i in range(0, len(rooms)):
            qtr = QueueToRoom(queue=queue, room=rooms[i], ranking=i)
            qtr.save()
        # Store the update information on the queue.
        queue.version += 1
        queue.update_kind = Queue.EDIT
        queue.update_user = user
        queue.save()
        # Notify others of the update.
        update.event.set()
        update.event.clear()
        # Assemble and return response.
        room_list = []
        for room in rooms:
            room_list.append({'id':room.id, 'number':room.number,
                              'building':room.building.name})
        return {'rooms':room_list}

    def check(self, user, queue_id, last_version):
        update = self._load(queue_id)
        print user, update.queue, last_version
        if last_version == update.queue.version:
            print 'going to wait'
            print update.queue.version
            update.event.wait()
            update = self._load(queue_id)
        print 'past wait'
        queueToRooms = QueueToRoom.objects.filter(queue=update.queue).order_by('ranking')
        if not queueToRooms:
            return {'id':update.queue.version, 'rooms':[]}
        room_list = []
        if update.queue.update_user:
            netid = update.queue.update_user.netid
        else:
            netid = ''
        for qtr in queueToRooms:
            room_list.append({'id':qtr.room.id, 'number':qtr.room.number,
                              'building':qtr.room.building.name})
        return {'id':update.queue.version,
                'kind':Queue.UPDATE_KINDS[update.queue.update_kind][1],
                'netid':netid,
                'rooms':room_list}
Пример #46
0
class ZookeeperClient(object):

    def __init__(self, module, server_list):
        # logging
        logger = logging.getLogger(module)
        logger.setLevel(logging.INFO)
        try:
            handler = logging.handlers.RotatingFileHandler('/var/log/contrail/' + module + '-zk.log', maxBytes=10*1024*1024, backupCount=5)
        except IOError:
            print "Cannot open log file in /var/log/contrail/"
        else:
            log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
            handler.setFormatter(log_format)
            logger.addHandler(handler)

        self._zk_client = \
            kazoo.client.KazooClient(
                server_list,
                handler=kazoo.handlers.gevent.SequentialGeventHandler(),
                logger=logger)

        self._logger = logger
        self._election = None
        self._zk_sem = BoundedSemaphore(1)
        self.connect()
    # end __init__

    # reconnect
    def reconnect(self):
        self._zk_sem.acquire()
        self.syslog("restart: acquired lock; state %s " % self._zk_client.state)
        # initiate restart if our state is suspended or lost
        if self._zk_client.state != "CONNECTED":
            self.syslog("restart: starting ...")
            try:
                self._zk_client.stop() 
                self._zk_client.close() 
                self._zk_client.start() 
                self.syslog("restart: done")
            except gevent.event.Timeout as e:
                self.syslog("restart: timeout!")
            except Exception as e:
                self.syslog('restart: exception %s' % str(e))
            except Exception as str:
                self.syslog('restart: str exception %s' % str)
        self._zk_sem.release()

    # start 
    def connect(self):
        while True:
            try:
                self._zk_client.start()
                break
            except gevent.event.Timeout as e:
                self.syslog(
                    'Failed to connect with Zookeeper -will retry in a second')
                gevent.sleep(1)
            # Zookeeper is also throwing exception due to delay in master election
            except Exception as e:
                self.syslog('%s -will retry in a second' % (str(e)))
                gevent.sleep(1)
        self.syslog('Connected to ZooKeeper!')
    # end

    def syslog(self, msg):
        if not self._logger:
            return
        self._logger.info(msg)
    # end syslog

    def _zk_listener(self, state):
        if state == "CONNECTED":
            self._election.cancel()
    # end

    def _zk_election_callback(self, func, *args, **kwargs):
        self._zk_client.remove_listener(self._zk_listener)
        func(*args, **kwargs)
        # Exit if running master encounters error or exception
        exit(1)
    # end

    def master_election(self, path, identifier, func, *args, **kwargs):
        self._zk_client.add_listener(self._zk_listener)
        while True:
            self._election = self._zk_client.Election(path, identifier)
            self._election.run(self._zk_election_callback, func, *args, **kwargs)
    # end master_election

    def create_node(self, path, value=None):
        try:
            if value is None:
                value = uuid.uuid4()
            self._zk_client.create(path, str(value), makepath=True)
        except (kazoo.exceptions.SessionExpiredError,
                kazoo.exceptions.ConnectionLoss):
            self.reconnect()
            return self.create_node(path, value)
        except kazoo.exceptions.NodeExistsError:
            current_value = self.read_node(path)
            if current_value == value:
                return True;
            raise ResourceExistsError(path, str(current_value))
    # end create_node

    def delete_node(self, path, recursive=False):
        try:
            self._zk_client.delete(path, recursive=recursive)
        except (kazoo.exceptions.SessionExpiredError,
                kazoo.exceptions.ConnectionLoss):
            self.reconnect()
            self.delete_node(path, recursive=recursive)
        except kazoo.exceptions.NoNodeError:
            pass
    # end delete_node

    def read_node(self, path):
        try:
            value = self._zk_client.get(path)
            return value[0]
        except (kazoo.exceptions.SessionExpiredError,
                kazoo.exceptions.ConnectionLoss):
            self.reconnect()
            return self.read_node(path)
        except Exception:
            return None
    # end read_node

    def get_children(self, path):
        try:
            return self._zk_client.get_children(path)
        except (kazoo.exceptions.SessionExpiredError,
                kazoo.exceptions.ConnectionLoss):
            self.reconnect()
            return self.get_children(path)
        except Exception:
            return []
    def __init__(self, args_str = None):
        self._homepage_links = []
        self._args = None
        self._debug = {
            'hb_stray':0, 
            'msg_pubs':0, 
            'msg_subs':0, 
            'msg_query':0, 
            'heartbeats':0, 
            'ttl_short':0,
            'policy_rr':0,
            'policy_lb':0,
            'policy_fi':0,
        }
        self._ts_use = 1
        self.short_ttl_map = {}
        self._sem = BoundedSemaphore(1)
        if not args_str:
            args_str = ' '.join(sys.argv[1:])
        self._parse_args(args_str)

        self._base_url = "http://%s:%s" %(self._args.listen_ip_addr,
                                          self._args.listen_port)
        self._pipe_start_app = None

        bottle.route('/', 'GET', self.homepage_http_get)

        # publish service
        bottle.route('/publish', 'POST', self.api_publish)
        self._homepage_links.append(LinkObject('action',
            self._base_url + '/publish', 'publish service'))

        # subscribe service
        bottle.route('/subscribe',  'POST', self.api_subscribe)
        self._homepage_links.append(LinkObject('action',
            self._base_url + '/subscribe', 'subscribe service'))

        # query service
        bottle.route('/query',  'POST', self.api_query)
        self._homepage_links.append(LinkObject('action',
            self._base_url + '/query', 'query service'))

        # collection - services
        bottle.route('/services', 'GET', self.show_all_services)
        self._homepage_links.append(LinkObject('action',
            self._base_url + '/services', 'show published services'))
        bottle.route('/services.json', 'GET', self.services_json)
        self._homepage_links.append(LinkObject('action',
            self._base_url + '/services.json', 'List published services in JSON format'))
        # show a specific service type
        bottle.route('/services/<service_type>', 'GET', self.show_all_services)

        # update service
        bottle.route('/service/<id>', 'PUT', self.service_http_put)

        # get service info
        bottle.route('/service/<id>', 'GET',  self.service_http_get)
        bottle.route('/service/<id>/brief', 'GET', self.service_brief_http_get)

        # delete (un-publish) service
        bottle.route('/service/<id>', 'DELETE', self.service_http_delete)

        # collection - clients
        bottle.route('/clients', 'GET', self.show_all_clients)
        self._homepage_links.append(LinkObject('action',
            self._base_url + '/clients', 'list all subscribers'))
        bottle.route('/clients.json', 'GET', self.clients_json)
        self._homepage_links.append(LinkObject('action',
            self._base_url + '/clients.json', 'list all subscribers in JSON format'))

        # show config
        bottle.route('/config', 'GET', self.config_http_get)
        self._homepage_links.append(LinkObject('action',
            self._base_url + '/config', 'show discovery service config'))

        # show debug
        bottle.route('/stats', 'GET', self.show_stats)
        self._homepage_links.append(LinkObject('action',
            self._base_url + '/stats', 'show discovery service stats'))

        if not self._pipe_start_app:
            self._pipe_start_app = bottle.app()

        # sandesh init
        collectors = None
        if self._args.collector and self._args.collector_port:
            collectors = [(self._args.collector, int(self._args.collector_port))]
        self._sandesh = Sandesh()
        self._sandesh.init_generator(ModuleNames[Module.DISCOVERY_SERVICE], 
                socket.gethostname(), collectors, 'discovery_context', 
                int(self._args.http_server_port), ['sandesh', 'uve'])
        self._sandesh.set_logging_params(enable_local_log = self._args.log_local,
                                         category = self._args.log_category,
                                         level = self._args.log_level,
                                         file = self._args.log_file)
        self._sandesh.trace_buffer_create(name = "dsHeartBeatTraceBuf", size = 1000)

        # DB interface initialization
        self._db_connect(self._args.reset_config)

        # build in-memory publisher data
        self._pub_data = {}
        for entry in self._db_conn.service_entries():
            self.create_pub_data(entry['service_id'], entry['service_type'])

        # build in-memory subscriber data
        self._sub_data = {}
        for (client_id, service_type) in self._db_conn.subscriber_entries():
            self.create_sub_data(client_id, service_type)

        # must be done after we have built in-memory publisher data from db.
        self._db_conn.start_background_tasks()
Пример #48
0
class UVEServer(object):

    def __init__(self, redis_uve_server, logger):
        self._local_redis_uve = redis_uve_server
        self._redis_uve_list = []
        self._logger = logger
        self._sem = BoundedSemaphore(1)
        self._redis = None
        if self._local_redis_uve:
            self._redis = redis.StrictRedis(self._local_redis_uve[0],
                                            self._local_redis_uve[1], db=1)
    #end __init__

    def update_redis_uve_list(self, redis_uve_list):
        self._redis_uve_list = redis_uve_list
    # end update_redis_uve_list 

    def fill_redis_uve_info(self, redis_uve_info):
        redis_uve_info.ip = self._local_redis_uve[0]
        redis_uve_info.port = self._local_redis_uve[1]
        try:
            self._redis.ping()
        except redis.exceptions.ConnectionError:
            redis_uve_info.status = 'DisConnected'
        else:
            redis_uve_info.status = 'Connected'
    #end fill_redis_uve_info

    @staticmethod
    def merge_previous(state, key, typ, attr, prevdict):
        print "%s New    val is %s" % (attr, prevdict)
        nstate = copy.deepcopy(state)
        if UVEServer._is_agg_item(prevdict):
            count = int(state[key][typ][attr]['previous']['#text'])
            count += int(prevdict['#text'])
            nstate[key][typ][attr]['previous']['#text'] = str(count)

        if UVEServer._is_agg_list(prevdict):
            sname = ParallelAggregator.get_list_name(
                state[key][typ][attr]['previous'])
            count = len(prevdict['list'][sname]) + \
                len(state[key][typ][attr]['previous']['list'][sname])
            nstate[key][typ][attr]['previous']['list'][sname].extend(
                prevdict['list'][sname])
            nstate[key][typ][attr]['previous']['list']['@size'] = \
                str(count)

            tstate = {}
            tstate[typ] = {}
            tstate[typ][attr] = copy.deepcopy(
                nstate[key][typ][attr]['previous'])
            nstate[key][typ][attr]['previous'] =\
                ParallelAggregator.consolidate_list(tstate, typ, attr)

        print "%s Merged val is %s"\
            % (attr, nstate[key][typ][attr]['previous'])
        return nstate

    def run(self):
        lck = False
        while True:
            try:
                k, value = self._redis.brpop("DELETED")
                self._sem.acquire()
                lck = True
                self._logger.debug("%s del received for " % value)
                # value is of the format: 
                # DEL:<key>:<src>:<node-type>:<module>:<instance-id>:<message-type>:<seqno>
                info = value.rsplit(":", 6)
                key = info[0].split(":", 1)[1]
                typ = info[5]

                existing = self._redis.hgetall("PREVIOUS:" + key + ":" + typ)
                tstate = {}
                tstate[key] = {}
                tstate[key][typ] = {}
                state = UVEServer.convert_previous(existing, tstate, key, typ)

                for attr, hval in self._redis.hgetall(value).iteritems():
                    snhdict = xmltodict.parse(hval)

                    if UVEServer._is_agg_list(snhdict[attr]):
                        if snhdict[attr]['list']['@size'] == "0":
                            continue
                        if snhdict[attr]['list']['@size'] == "1":
                            sname = ParallelAggregator.get_list_name(
                                snhdict[attr])
                            if not isinstance(
                                    snhdict[attr]['list'][sname], list):
                                snhdict[attr]['list'][sname] = \
                                    [snhdict[attr]['list'][sname]]

                    if (attr not in state[key][typ]):
                        # There is no existing entry for the UVE
                        vstr = json.dumps(snhdict[attr])
                    else:
                        # There is an existing entry
                        # Merge the new entry with the existing one
                        state = UVEServer.merge_previous(
                            state, key, typ, attr, snhdict[attr])
                        vstr = json.dumps(state[key][typ][attr]['previous'])

                        # Store the merged result back in the database
                    self._redis.sadd("PUVES:" + typ, key)
                    self._redis.sadd("PTYPES:" + key, typ)
                    self._redis.hset("PREVIOUS:" + key + ":" + typ, attr, vstr)

                self._redis.delete(value)
            except redis.exceptions.ConnectionError:
                if lck:
                    self._sem.release()
                    lck = False
                gevent.sleep(5)
            else:
                if lck:
                    self._sem.release()
                    lck = False
                self._logger.debug("Deleted %s" % value)
                self._logger.debug("UVE %s Type %s" % (key, typ))

    @staticmethod
    def _is_agg_item(attr):
        if attr['@type'] in ['i8', 'i16', 'i32', 'i64', 'byte',
                             'u8', 'u16', 'u32', 'u64']:
            if '@aggtype' in attr:
                if attr['@aggtype'] == "counter":
                    return True
        return False

    @staticmethod
    def _is_agg_list(attr):
        if attr['@type'] in ['list']:
            if '@aggtype' in attr:
                if attr['@aggtype'] == "append":
                    return True
        return False

    @staticmethod
    def convert_previous(existing, state, key, typ, afilter=None):
        # Take the existing delete record, and load it into the state dict
        for attr, hval in existing.iteritems():
            hdict = json.loads(hval)

            if afilter is not None and len(afilter):
                if attr not in afilter:
                    continue

            # When recording deleted attributes, only record those
            # for which delete-time aggregation is needed
            if UVEServer._is_agg_item(hdict):
                if (typ not in state[key]):
                    state[key][typ] = {}
                if (attr not in state[key][typ]):
                    state[key][typ][attr] = {}
                state[key][typ][attr]["previous"] = hdict

            # For lists that require delete-time aggregation, we need
            # to normailize lists of size 1, and ignore those of size 0
            if UVEServer._is_agg_list(hdict):
                if hdict['list']['@size'] != "0":
                    if (typ not in state[key]):
                        state[key][typ] = {}
                    if (attr not in state[key][typ]):
                        state[key][typ][attr] = {}
                    state[key][typ][attr]["previous"] = hdict
                if hdict['list']['@size'] == "1":
                    sname = ParallelAggregator.get_list_name(hdict)
                    if not isinstance(hdict['list'][sname], list):
                        hdict['list'][sname] = [hdict['list'][sname]]

        return state

    def get_uve(self, key, flat, sfilter=None, mfilter=None, tfilter=None, multi=False):
        state = {}
        state[key] = {}
        statdict = {}
        for redis_uve in self._redis_uve_list:
            redish = redis.StrictRedis(host=redis_uve[0],
                                       port=redis_uve[1], db=1)
            try:
                qmap = {}
                for origs in redish.smembers("ORIGINS:" + key):
                    info = origs.rsplit(":", 1)
                    sm = info[0].split(":", 1)
                    source = sm[0]
                    if sfilter is not None:
                        if sfilter != source:
                            continue
                    mdule = sm[1]
                    if mfilter is not None:
                        if mfilter != mdule:
                            continue
                    dsource = source + ":" + mdule

                    typ = info[1]
                    if tfilter is not None:
                        if typ not in tfilter:
                            continue

                    odict = redish.hgetall("VALUES:" + key + ":" + origs)

                    afilter_list = set()
                    if tfilter is not None:
                        afilter_list = tfilter[typ]
                    for attr, value in odict.iteritems():
                        if len(afilter_list):
                            if attr not in afilter_list:
                                continue

                        if typ not in state[key]:
                            state[key][typ] = {}

                        if value[0] == '<':
                            snhdict = xmltodict.parse(value)
                            if snhdict[attr]['@type'] == 'list':
                                if snhdict[attr]['list']['@size'] == '0':
                                    continue
                                elif snhdict[attr]['list']['@size'] == '1':
                                    sname = ParallelAggregator.get_list_name(
                                        snhdict[attr])
                                    if not isinstance(
                                        snhdict[attr]['list'][sname], list):
                                        snhdict[attr]['list'][sname] = [
                                            snhdict[attr]['list'][sname]]
                        else:
                            if not flat:
                                continue
                            if typ not in statdict:
                                statdict[typ] = {}
                            statdict[typ][attr] = []
                            statsattr = json.loads(value)
                            for elem in statsattr:
                                #import pdb; pdb.set_trace()
                                edict = {}
                                if elem["rtype"] == "list":
                                    elist = redish.lrange(elem["href"], 0, -1)
                                    for eelem in elist:
                                        jj = json.loads(eelem).items()
                                        edict[jj[0][0]] = jj[0][1]
                                elif elem["rtype"] == "zset":
                                    elist = redish.zrange(
                                        elem["href"], 0, -1, withscores=True)
                                    for eelem in elist:
                                        tdict = json.loads(eelem[0])
                                        tval = long(tdict["ts"])
                                        dt = datetime.datetime.utcfromtimestamp(
                                            float(tval) / 1000000)
                                        tms = (tval % 1000000) / 1000
                                        tstr = dt.strftime('%Y %b %d %H:%M:%S')
                                        edict[tstr + "." + str(tms)] = eelem[1]
                                elif elem["rtype"] == "hash":
                                    elist = redish.hgetall(elem["href"])
                                    edict = elist
                                elif elem["rtype"] == "query":
                                    if sfilter is None and mfilter is None and not multi:
                                        qdict = {}
                                        qdict["table"] = elem["aggtype"]
                                        qdict["select_fields"] = elem["select"]
                                        qdict["where"] =[[{"name":"name",
                                            "value":key.split(":",1)[1],
                                            "op":1}]]
                                        qmap[elem["aggtype"]] = {"query":qdict,
                                            "type":typ, "attr":attr}
                                    # For the stats query case, defer processing
                                    continue

                                statdict[typ][attr].append(
                                    {elem["aggtype"]: edict})
                            continue

                        # print "Attr %s Value %s" % (attr, snhdict)
                        if attr not in state[key][typ]:
                            state[key][typ][attr] = {}
                        if dsource in state[key][typ][attr]:
                            print "Found Dup %s:%s:%s:%s:%s = %s" % \
                                (key, typ, attr, source, mdule, state[
                                key][typ][attr][dsource])
                        state[key][typ][attr][dsource] = snhdict[attr]
                
                if len(qmap):
                    url = OpServerUtils.opserver_query_url(
                        self._local_redis_uve[0],
                        str(8081))
                    for t,q in qmap.iteritems():
                        try:
                            q["query"]["end_time"] = OpServerUtils.utc_timestamp_usec()
                            q["query"]["start_time"] = qdict["end_time"] - (3600 * 1000000)
                            json_str = json.dumps(q["query"])
                            resp = OpServerUtils.post_url_http(url, json_str, True)
                            if resp is not None:
                                edict = json.loads(resp)
                                edict = edict['value']
                                statdict[q["type"]][q["attr"]].append(
                                    {t: edict})
                        except Exception as e:
                            print "Stats Query Exception:" + str(e)
                        
                if sfilter is None and mfilter is None:
                    for ptyp in redish.smembers("PTYPES:" + key):
                        afilter = None
                        if tfilter is not None:
                            if ptyp not in tfilter:
                                continue
                            afilter = tfilter[ptyp]
                        existing = redish.hgetall("PREVIOUS:" + key + ":" + ptyp)
                        nstate = UVEServer.convert_previous(
                            existing, state, key, ptyp, afilter)
                        state = copy.deepcopy(nstate)

                pa = ParallelAggregator(state)
                rsp = pa.aggregate(key, flat)
            except redis.exceptions.ConnectionError:
                self._logger.error("Failed to connect to redis-uve: %s:%d" \
                                   % (redis_uve[0], redis_uve[1]))
            except Exception as e:
                self._logger.error("Exception: %s" % e)
                return {}
            else:
                self._logger.debug("Computed %s" % key)

        for k, v in statdict.iteritems():
            if k in rsp:
                mp = dict(v.items() + rsp[k].items())
                statdict[k] = mp

        return dict(rsp.items() + statdict.items())
    # end get_uve

    def get_uve_regex(self, key):
        regex = ''
        if key[0] != '*':
            regex += '^'
        regex += key.replace('*', '.*?')
        if key[-1] != '*':
            regex += '$'
        return re.compile(regex)
    # end get_uve_regex

    def multi_uve_get(self, key, flat, kfilter, sfilter, mfilter, tfilter):
        tbl_uve = key.split(':', 1)
        table = tbl_uve[0]

        # get_uve_list cannot handle attribute names very efficiently,
        # so we don't pass them here
        k1_filter = [tbl_uve[1]]
        uve_list = self.get_uve_list(table, k1_filter, sfilter,
                                     mfilter, tfilter, False)
        if kfilter is not None:
            patterns = set()
            for filt in kfilter:
                patterns.add(self.get_uve_regex(filt))
        for uve_name in uve_list:
            if kfilter is not None:
                kfilter_match = False
                for pattern in patterns:
                    if pattern.match(uve_name):
                        kfilter_match = True
                        break
                if not kfilter_match:
                    continue
            uve_val = self.get_uve(
                table + ':' + uve_name, flat,
                sfilter, mfilter, tfilter, True)
            if uve_val == {}:
                continue
            else:
                uve = {'name': uve_name, 'value': uve_val}
                yield uve
    # end multi_uve_get

    def get_uve_list(self, key, kfilter, sfilter,
                     mfilter, tfilter, parse_afilter):
        uve_list = set()
        if kfilter is not None:
            patterns = set()
            for filt in kfilter:
                patterns.add(self.get_uve_regex(filt))
        for redis_uve in self._redis_uve_list:
            redish = redis.StrictRedis(host=redis_uve[0],
                                       port=redis_uve[1], db=1)
            try:
                for entry in redish.smembers("TABLE:" + key):
                    info = (entry.split(':', 1)[1]).rsplit(':', 5)
                    uve_key = info[0]
                    if kfilter is not None:
                        kfilter_match = False
                        for pattern in patterns:
                            if pattern.match(uve_key):
                                kfilter_match = True
                                break
                        if not kfilter_match:
                            continue
                    src = info[1]
                    if sfilter is not None:
                        if sfilter != src:
                            continue
                    node_type = info[2]
                    mdule = info[3]
                    if mfilter is not None:
                        if mfilter != mdule:
                            continue
                    inst = info[4] 
                    typ = info[5]
                    if tfilter is not None:
                        if typ not in tfilter:
                            continue
                    if parse_afilter:
                        if tfilter is not None and len(tfilter[typ]):
                            valkey = "VALUES:" + key + ":" + uve_key + ":" + \
                                 src + ":" + node_type + ":" + mdule + \
                                 ":" + inst + ":" + typ
                            for afilter in tfilter[typ]:
                                attrval = redish.hget(valkey, afilter)
                                if attrval is not None:
                                    break
                            if attrval is None:
                                continue
                    uve_list.add(uve_key)
            except redis.exceptions.ConnectionError:
                self._logger.error('Failed to connect to redis-uve: %s:%d' \
                                   % (redis_uve[0], redis_uve[1]))
            except Exception as e:
                self._logger.error('Exception: %s' % e)
                return set()
        return uve_list
Пример #49
0
class DiscoveryZkClient(object):

    def __init__(self, discServer, zk_srv_ip='127.0.0.1',
                 zk_srv_port='2181', reset_config=False):
        self._reset_config = reset_config
        self._service_id_to_type = {}
        self._ds = discServer
        self._zk_sem = BoundedSemaphore(1)
        self._election = None
        self._restarting = False

        zk_endpts = []
        for ip in zk_srv_ip.split(','):
            zk_endpts.append('%s:%s' %(ip, zk_srv_port))

        # logging
        logger = logging.getLogger('discovery-service')
        logger.setLevel(logging.WARNING)
        handler = logging.handlers.RotatingFileHandler('/var/log/contrail/discovery_zk.log', maxBytes=1024*1024, backupCount=10)
        log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
        handler.setFormatter(log_format)
        logger.addHandler(handler)

        self._zk = kazoo.client.KazooClient(
            hosts=','.join(zk_endpts),
            handler=kazoo.handlers.gevent.SequentialGeventHandler(),
            logger=logger)
        self._logger = logger

        # connect
        self.connect()

        if reset_config:
            self.delete_node("/services", recursive=True)
            self.delete_node("/clients", recursive=True)
            self.delete_node("/election", recursive=True)

        # create default paths
        self.create_node("/services")
        self.create_node("/clients")
        self.create_node("/election")

        self._debug = {
            'subscription_expires': 0,
            'oos_delete': 0,
            'db_excepts': 0,
        }
    # end __init__

    # Discovery server used for syslog, cleanup etc
    def set_ds(self, discServer):
        self._ds = discServer
    # end set_ds

    def is_restarting(self):
        return self._restarting
    # end is_restarting

    # restart
    def restart(self):
        self._zk_sem.acquire()
        self._restarting = True
        self.syslog("restart: acquired lock; state %s " % self._zk.state)
        # initiate restart if our state is suspended or lost
        if self._zk.state != "CONNECTED":
            self.syslog("restart: starting ...")
            try:
                self._zk.stop() 
                self._zk.close() 
                self._zk.start() 
                self.syslog("restart: done")
            except:
                e = sys.exc_info()[0]
                self.syslog('restart: exception %s' % str(e))
        self._restarting = False
        self._zk_sem.release()

    # start 
    def connect(self):
        while True:
            try:
                self._zk.start()
                break
            except gevent.event.Timeout as e:
                self.syslog(
                    'Failed to connect with Zookeeper -will retry in a second')
                gevent.sleep(1)
            # Zookeeper is also throwing exception due to delay in master election
            except Exception as e:
                self.syslog('%s -will retry in a second' % (str(e)))
                gevent.sleep(1)
        self.syslog('Connected to ZooKeeper!')
    # end

    def start_background_tasks(self):
        # spawn loop to expire subscriptions
        gevent.Greenlet.spawn(self.inuse_loop)

        # spawn loop to expire services
        gevent.Greenlet.spawn(self.service_oos_loop)
    # end

    def syslog(self, log_msg):
        if self._logger is None:
            return
        self._logger.info(log_msg)
    # end

    def get_debug_stats(self):
        return self._debug
    # end

    def _zk_listener(self, state):
        if state == "CONNECTED":
            self._election.cancel()
    # end

    def _zk_election_callback(self, func, *args, **kwargs):
        self._zk.remove_listener(self._zk_listener)
        func(*args, **kwargs)
    # end

    def master_election(self, path, identifier, func, *args, **kwargs):
        self._zk.add_listener(self._zk_listener)
        while True:
            self._election = self._zk.Election(path, identifier)
            self._election.run(self._zk_election_callback, func, *args, **kwargs)
    # end master_election

    def create_node(self, path, value='', makepath=True, sequence=False):
        value = str(value)
        while True:
            try:
                return self._zk.set(path, value)
            except kazoo.exceptions.NoNodeException:
                self.syslog('create %s' % (path))
                return self._zk.create(path, value, makepath=makepath, sequence=sequence)
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()
    # end create_node

    def get_children(self, path):
        while True:
            try:
                return self._zk.get_children(path)
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()
            except Exception:
                return []
    # end get_children

    def read_node(self, path):
        while True:
            try:
                data, stat = self._zk.get(path)
                return data,stat
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()
            except kazoo.exceptions.NoNodeException:
                self.syslog('exc read: node %s does not exist' % path)
                return (None, None)
    # end read_node

    def delete_node(self, path, recursive=False):
        while True:
            try:
                return self._zk.delete(path, recursive=recursive)
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()
            except kazoo.exceptions.NoNodeException:
                self.syslog('exc delete: node %s does not exist' % path)
                return None
    # end delete_node

    def exists_node(self, path):
        while True:
            try:
                return self._zk.exists(path)
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()
    # end exists_node

    def service_entries(self):
        service_types = self.get_children('/services')
        for service_type in service_types:
            services = self.get_children('/services/%s' % (service_type))
            for service_id in services:
                data, stat = self.read_node(
                    '/services/%s/%s' % (service_type, service_id))
                entry = json.loads(data)
                yield(entry)

    def subscriber_entries(self):
        service_types = self.get_children('/clients')
        for service_type in service_types:
            subscribers = self.get_children('/clients/%s' % (service_type))
            for client_id in subscribers:
                cl_entry = self.lookup_client(service_type, client_id)
                if cl_entry:
                    yield((client_id, service_type))
    # end

    def update_service(self, service_type, service_id, data):
        path = '/services/%s/%s' % (service_type, service_id)
        self.create_node(path, value=json.dumps(data), makepath=True)
    # end

    def insert_service(self, service_type, service_id, data):

        # ensure election path for service type exists
        path = '/election/%s' % (service_type)
        self.create_node(path)

        # preclude duplicate service entry
        sid_set = set()

        # prevent background task from deleting node under our nose
        seq_list = self.get_children(path)
        # data for election node is service ID
        for sequence in seq_list:
            sid, stat = self.read_node(
                '/election/%s/%s' % (service_type, sequence))
            if sid is not None:
                sid_set.add(sid)
        if not service_id in sid_set:
            path = '/election/%s/node-' % (service_type)
            pp = self.create_node(
                path, service_id, makepath=True, sequence=True)
            pat = path + "(?P<id>.*$)"
            mch = re.match(pat, pp)
            seq = mch.group('id')
            data['sequence'] = seq
            self.syslog('ST %s, SID %s not found! Added with sequence %s' %
                        (service_type, service_id, seq))
    # end insert_service

    # forget service and subscribers
    def delete_service(self, service_type, service_id, recursive = False):
        #if self.lookup_subscribers(service_type, service_id):
        #    return

        path = '/services/%s/%s' %(service_type, service_id)
        self.delete_node(path, recursive = recursive)

        # delete service node if all services gone
        path = '/services/%s' %(service_type)
        if self.get_children(path):
            return
        self.delete_node(path)
     #end delete_service

    def lookup_service(self, service_type, service_id=None):
        if not self.exists_node('/services/%s' % (service_type)):
            return None
        if service_id:
            data = None
            path = '/services/%s/%s' % (service_type, service_id)
            datastr, stat = self.read_node(path)
            if datastr:
                data = json.loads(datastr)
                clients = self.get_children(path)
                data['in_use'] = len(clients)
            return data
        else:
            r = []
            services = self.get_children('/services/%s' % (service_type))
            for service_id in services:
                entry = self.lookup_service(service_type, service_id)
                r.append(entry)
            return r
    # end lookup_service

    def query_service(self, service_type):
        path = '/election/%s' % (service_type)
        if not self.exists_node(path):
            return None
        seq_list = self.get_children(path)
        seq_list = sorted(seq_list)

        r = []
        for sequence in seq_list:
            service_id, stat = self.read_node(
                '/election/%s/%s' % (service_type, sequence))
            entry = self.lookup_service(service_type, service_id)
            r.append(entry)
        return r
    # end

    # TODO use include_data available in new versions of kazoo
    # tree structure /services/<service-type>/<service-id>
    def get_all_services(self):
        r = []
        service_types = self.get_children('/services')
        for service_type in service_types:
            services = self.lookup_service(service_type)
            r.extend(services)
        return r
    # end

    def insert_client(self, service_type, service_id, client_id, blob, ttl):
        data = {'ttl': ttl, 'blob': blob}

        path = '/services/%s/%s/%s' % (service_type, service_id, client_id)
        self.create_node(path, value=json.dumps(data))

        path = '/clients/%s/%s/%s' % (service_type, client_id, service_id)
        self.create_node(path, value=json.dumps(data), makepath=True)
    # end insert_client

    def lookup_subscribers(self, service_type, service_id):
        path = '/services/%s/%s' % (service_type, service_id)
        if not self.exists_node(path):
            return None
        clients = self.get_children(path)
        return clients
    # end lookup_subscribers

    def lookup_client(self, service_type, client_id):
        try:
            datastr, stat = self.read_node(
                '/clients/%s/%s' % (service_type, client_id))
            data = json.loads(datastr) if datastr else None
        except ValueError:
            self.syslog('raise ValueError st=%s, cid=%s' %(service_type, client_id))
            data = None
        return data
    # end lookup_client

    def insert_client_data(self, service_type, client_id, cldata):
        path = '/clients/%s/%s' % (service_type, client_id)
        self.create_node(path, value=json.dumps(cldata), makepath=True)
    # end insert_client_data

    def lookup_subscription(self, service_type, client_id=None,
                            service_id=None, include_meta=False):
        if not self.exists_node('/clients/%s' % (service_type)):
            return None
        if client_id and service_id:
            try:
                datastr, stat = self.read_node(
                    '/clients/%s/%s/%s'
                    % (service_type, client_id, service_id))
                data = json.loads(datastr)
                blob = data['blob']
                if include_meta:
                    return (blob, stat, data['ttl'])
                else:
                    return blob
            except kazoo.exceptions.NoNodeException:
                return None
        elif client_id:
            # our version of Kazoo doesn't support include_data :-(
            try:
                services = self.get_children(
                    '/clients/%s/%s' % (service_type, client_id))
                r = []
                for service_id in services:
                    datastr, stat = self.read_node(
                        '/clients/%s/%s/%s'
                        % (service_type, client_id, service_id))
                    if datastr:
                        data = json.loads(datastr)
                        blob = data['blob']
                        r.append((service_id, blob, stat))
                # sort services in the order of assignment to this client
                # (based on modification time)
                rr = sorted(r, key=lambda entry: entry[2].last_modified)
                return [(service_id, blob) for service_id, blob, stat in rr]
            except kazoo.exceptions.NoNodeException:
                return None
        else:
            clients = self.get_children('/clients/%s' % (service_type))
            return clients
    # end lookup_subscription

    # delete client subscription. Cleanup path if possible
    def delete_subscription(self, service_type, client_id, service_id):
        path = '/clients/%s/%s/%s' % (service_type, client_id, service_id)
        self.delete_node(path)

        path = '/services/%s/%s/%s' % (service_type, service_id, client_id)
        self.delete_node(path)

        # delete client node if all subscriptions gone
        path = '/clients/%s/%s' % (service_type, client_id)
        if self.get_children(path):
            return
        self.delete_node(path)

        # purge in-memory cache - ideally we are not supposed to know about
        # this
        self._ds.delete_sub_data(client_id, service_type)

        # delete service node if all clients gone
        path = '/clients/%s' % (service_type)
        if self.get_children(path):
            return
        self.delete_node(path)
    # end

    # TODO use include_data available in new versions of kazoo
    # tree structure /clients/<service-type>/<client-id>/<service-id>
    # return tuple (service_type, client_id, service_id)
    def get_all_clients(self):
        r = []
        service_types = self.get_children('/clients')
        for service_type in service_types:
            clients = self.get_children('/clients/%s' % (service_type))
            for client_id in clients:
                services = self.get_children(
                    '/clients/%s/%s' % (service_type, client_id))
                rr = []
                for service_id in services:
                    (datastr, stat, ttl) = self.lookup_subscription(
                        service_type, client_id, service_id, include_meta=True)
                    rr.append(
                        (service_type, client_id, service_id,
                         stat.last_modified, ttl))
                rr = sorted(rr, key=lambda entry: entry[3])
                r.extend(rr)
        return r
    # end get_all_clients

    # reset in-use count of clients for each service
    def inuse_loop(self):
        while True:
            service_types = self.get_children('/clients')
            for service_type in service_types:
                clients = self.get_children('/clients/%s' % (service_type))
                for client_id in clients:
                    services = self.get_children(
                        '/clients/%s/%s' % (service_type, client_id))
                    for service_id in services:
                        path = '/clients/%s/%s/%s' % (
                            service_type, client_id, service_id)
                        datastr, stat = self.read_node(path)
                        data = json.loads(datastr)
                        now = time.time()
                        exp_t = stat.last_modified + data['ttl'] +\
                            disc_consts.TTL_EXPIRY_DELTA
                        if now > exp_t:
                            self.delete_subscription(
                                service_type, client_id, service_id)
                            self.syslog(
                                'Expiring st:%s sid:%s cid:%s'
                                % (service_type, service_id, client_id))
                            self._debug['subscription_expires'] += 1
            gevent.sleep(10)

    def service_oos_loop(self):
        if self._ds._args.hc_interval <= 0:
            return

        while True:
            for entry in self.service_entries():
                if not self._ds.service_expired(entry, include_down=False):
                    continue
                service_type = entry['service_type']
                service_id   = entry['service_id']
                path = '/election/%s/node-%s' % (
                    service_type, entry['sequence'])
                if not self.exists_node(path):
                    continue
                self.syslog('Deleting sequence node %s for service %s:%s' %
                        (path, service_type, service_id))
                self.delete_node(path)
                entry['sequence'] = -1
                self.update_service(service_type, service_id, entry)
                self._debug['oos_delete'] += 1
            gevent.sleep(self._ds._args.hc_interval)
Пример #50
0
class ConnectionPool(object):
    """
    Generic TCP connection pool, with the following features:
        * Configurable pool size
        * Auto-reconnection when a broken socket is detected
        * Optional periodic keepalive
    """

    # Frequency at which the pool is populated at startup
    SPAWN_FREQUENCY = 0.1

    def __init__(self, size, exc_classes=DEFAULT_EXC_CLASSES, keepalive=None):
        self.size = size
        self.conn = deque()
        self.lock = BoundedSemaphore(size)
        self.keepalive = keepalive
        # Exceptions list must be in tuple form to be caught properly
        self.exc_classes = tuple(exc_classes)
        # http://stackoverflow.com/a/31136897/357578
        try:
            xrange
        except NameError:
            xrange = range
        for i in xrange(size):
            self.lock.acquire()
        for i in xrange(size):
            gevent.spawn_later(self.SPAWN_FREQUENCY*i, self._addOne)
        if self.keepalive:
            gevent.spawn(self._keepalive_periodic)

    def _new_connection(self):
        """
        Estabilish a new connection (to be implemented in subclasses).
        """
        raise NotImplementedError

    def _keepalive(self, c):
        """
        Implement actual application-level keepalive (to be
        reimplemented in subclasses).

        :raise: socket.error if the connection has been closed or is broken.
        """
        raise NotImplementedError()

    def _keepalive_periodic(self):
        delay = float(self.keepalive) / self.size
        while 1:
            try:
                with self.get() as c:
                    self._keepalive(c)
            except self.exc_classes:
                # Nothing to do, the pool will generate a new connection later
                pass
            gevent.sleep(delay)

    def _addOne(self):
        stime = 0.1
        while 1:
            c = self._new_connection()
            if c:
                break
            gevent.sleep(stime)
            if stime < 400:
                stime *= 2

        self.conn.append(c)
        self.lock.release()

    @contextmanager
    def get(self):
        """
        Get a connection from the pool, to make and receive traffic.

        If the connection fails for any reason (socket.error), it is dropped
        and a new one is scheduled. Please use @retry as a way to automatically
        retry whatever operation you were performing.
        """
        self.lock.acquire()
        try:
            c = self.conn.popleft()
            yield c
        except self.exc_classes:
            # The current connection has failed, drop it and create a new one
            gevent.spawn_later(1, self._addOne)
            raise
        except:
            self.conn.append(c)
            self.lock.release()
            raise
        else:
            # NOTE: cannot use finally because MUST NOT reuse the connection
            # if it failed (socket.error)
            self.conn.append(c)
            self.lock.release()
Пример #51
0
 def __init__(self, clear_interval=DEFAULT_CLEAR_INTERVAL):
     self.__updates = {}
     self.__updates_lock = BoundedSemaphore()
     # Start clearing daemon thread.
     spawn(self._daemon_clear, interval=clear_interval)
Пример #52
0
class UVEServer(object):

    def __init__(self, redis_uve_server, logger, redis_password=None):
        self._local_redis_uve = redis_uve_server
        self._redis_uve_map = {}
        self._logger = logger
        self._sem = BoundedSemaphore(1)
        self._redis = None
        self._redis_password = redis_password
        if self._local_redis_uve:
            self._redis = redis.StrictRedis(self._local_redis_uve[0],
                                            self._local_redis_uve[1],
                                            password=self._redis_password,
                                            db=1)
        self._uve_reverse_map = {}
        for h,m in UVE_MAP.iteritems():
            self._uve_reverse_map[m] = h

    #end __init__

    def update_redis_uve_list(self, redis_uve_list):
        newlist = set()
        for elem in redis_uve_list:
            newlist.add((elem[0],elem[1]))

        # if some redis instances are gone, remove them from our map
        for test_elem in self._redis_uve_map.keys():
            if test_elem not in newlist:
                del self._redis_uve_map[test_elem]
        
        # new redis instances need to be inserted into the map
        for test_elem in newlist:
            if test_elem not in self._redis_uve_map:
                (r_ip, r_port) = test_elem
                self._redis_uve_map[test_elem] = redis.StrictRedis(
                        r_ip, r_port, password=self._redis_password, db=1)
    # end update_redis_uve_list

    def fill_redis_uve_info(self, redis_uve_info):
        redis_uve_info.ip = self._local_redis_uve[0]
        redis_uve_info.port = self._local_redis_uve[1]
        try:
            self._redis.ping()
        except redis.exceptions.ConnectionError:
            redis_uve_info.status = 'DisConnected'
        else:
            redis_uve_info.status = 'Connected'
    #end fill_redis_uve_info

    @staticmethod
    def merge_previous(state, key, typ, attr, prevdict):
        print "%s New    val is %s" % (attr, prevdict)
        nstate = copy.deepcopy(state)
        if UVEServer._is_agg_item(prevdict):
            count = int(state[key][typ][attr]['previous']['#text'])
            count += int(prevdict['#text'])
            nstate[key][typ][attr]['previous']['#text'] = str(count)

        if UVEServer._is_agg_list(prevdict):
            sname = ParallelAggregator.get_list_name(
                state[key][typ][attr]['previous'])
            count = len(prevdict['list'][sname]) + \
                len(state[key][typ][attr]['previous']['list'][sname])
            nstate[key][typ][attr]['previous']['list'][sname].extend(
                prevdict['list'][sname])
            nstate[key][typ][attr]['previous']['list']['@size'] = \
                str(count)

            tstate = {}
            tstate[typ] = {}
            tstate[typ][attr] = copy.deepcopy(
                nstate[key][typ][attr]['previous'])
            nstate[key][typ][attr]['previous'] =\
                ParallelAggregator.consolidate_list(tstate, typ, attr)

        print "%s Merged val is %s"\
            % (attr, nstate[key][typ][attr]['previous'])
        return nstate

    def run(self):
        lck = False
        while True:
            try:
                k, value = self._redis.brpop("DELETED")
                self._sem.acquire()
                lck = True
                self._logger.debug("%s del received for " % value)
                # value is of the format: 
                # DEL:<key>:<src>:<node-type>:<module>:<instance-id>:<message-type>:<seqno>
                self._redis.delete(value)
            except redis.exceptions.ResponseError:
                #send redis connection down msg. Coule be bcos of authentication
                ConnectionState.update(conn_type = ConnectionType.REDIS,
                    name = 'UVE', status = ConnectionStatus.DOWN,
                    message = 'UVE result : Connection Error',
                    server_addrs = ['%s:%d' % (self._local_redis_uve[0],
                    self._local_redis_uve[1])])
                sys.exit()
            except redis.exceptions.ConnectionError:
                if lck:
                    self._sem.release()
                    lck = False
                gevent.sleep(5)
            else:
                if lck:
                    self._sem.release()
                    lck = False
                self._logger.debug("Deleted %s" % value)
                self._logger.debug("UVE %s Type %s" % (key, typ))

    @staticmethod
    def _is_agg_item(attr):
        if attr['@type'] in ['i8', 'i16', 'i32', 'i64', 'byte',
                             'u8', 'u16', 'u32', 'u64']:
            if '@aggtype' in attr:
                if attr['@aggtype'] == "counter":
                    return True
        return False

    @staticmethod
    def _is_agg_list(attr):
        if attr['@type'] in ['list']:
            if '@aggtype' in attr:
                if attr['@aggtype'] == "append":
                    return True
        return False

    def get_part(self, part):
        uves = {}
        for r_inst in self._redis_uve_map.keys():
            try:
                (r_ip,r_port) = r_inst
                if not self._redis_uve_map[r_inst]:
                    self._redis_uve_map[r_inst] = redis.StrictRedis(
                            host=r_ip, port=r_port,
                            password=self._redis_password, db=1)

                redish = self._redis_uve_map[r_inst]
                gen_uves = {}
                for elems in redish.smembers("PART2KEY:" + str(part)): 
                    info = elems.split(":", 5)
                    gen = info[0] + ":" + info[1] + ":" + info[2] + ":" + info[3]
                    key = info[5]
                    if not gen_uves.has_key(gen):
                         gen_uves[gen] = {}
                    gen_uves[gen][key] = 0
                uves[r_ip + ":" + str(r_port)] = gen_uves
            except Exception as e:
                self._logger.error("get_part failed %s for : %s:%d tb %s" \
                                   % (str(e), r_ip, r_port, traceback.format_exc()))
                self._redis_uve_map[r_inst] = None
                raise e
        return uves

    def get_uve(self, key, flat, filters=None, is_alarm=False, base_url=None):
        filters = filters or {}
        sfilter = filters.get('sfilt')
        mfilter = filters.get('mfilt')
        tfilter = filters.get('cfilt')
        ackfilter = filters.get('ackfilt')
        state = {}
        state[key] = {}
        rsp = {}
        failures = False
       
        for r_inst in self._redis_uve_map.keys():
            try:
                (r_ip,r_port) = r_inst
                if not self._redis_uve_map[r_inst]:
                    self._redis_uve_map[r_inst] = redis.StrictRedis(
                            host=r_ip, port=r_port,
                            password=self._redis_password, db=1)

                redish = self._redis_uve_map[r_inst]
                qmap = {}

                ppe = redish.pipeline()
                ppe.smembers("ALARM_ORIGINS:" + key)
                if not is_alarm:
                    ppe.smembers("ORIGINS:" + key)
                pperes = ppe.execute()
                origins = set()
                for origset in pperes:
                    for smt in origset:
                        tt = smt.rsplit(":",1)[1]
                        sm = smt.rsplit(":",1)[0]
                        source = sm.split(":", 1)[0]
                        mdule = sm.split(":", 1)[1]
                        if tfilter is not None:
                            if tt not in tfilter:
                                continue
                        if sfilter is not None:
                            if sfilter != source:
                                continue
                        if mfilter is not None:
                            if mfilter != mdule:
                                continue
                        origins.add(smt)

                ppeval = redish.pipeline()
                for origs in origins:
                    ppeval.hgetall("VALUES:" + key + ":" + origs)
                odictlist = ppeval.execute()

                idx = 0    
                for origs in origins:

                    odict = odictlist[idx]
                    idx = idx + 1

                    info = origs.rsplit(":", 1)
                    dsource = info[0]
                    typ = info[1]

                    afilter_list = set()
                    if tfilter is not None:
                        afilter_list = tfilter[typ]

                    for attr, value in odict.iteritems():
                        if len(afilter_list):
                            if attr not in afilter_list:
                                continue

                        if typ not in state[key]:
                            state[key][typ] = {}

                        if value[0] == '<':
                            snhdict = xmltodict.parse(value)
                            if snhdict[attr]['@type'] == 'list':
                                sname = ParallelAggregator.get_list_name(
                                        snhdict[attr])
                                if snhdict[attr]['list']['@size'] == '0':
                                    continue
                                elif snhdict[attr]['list']['@size'] == '1':
                                    if not isinstance(
                                        snhdict[attr]['list'][sname], list):
                                        snhdict[attr]['list'][sname] = [
                                            snhdict[attr]['list'][sname]]
                                if typ == 'UVEAlarms' and attr == 'alarms' and \
                                    ackfilter is not None:
                                    alarms = []
                                    for alarm in snhdict[attr]['list'][sname]:
                                        ack_attr = alarm.get('ack')
                                        if ack_attr:
                                            ack = ack_attr['#text']
                                        else:
                                            ack = 'false'
                                        if ack == ackfilter:
                                            alarms.append(alarm)
                                    if not len(alarms):
                                        continue
                                    snhdict[attr]['list'][sname] = alarms
                                    snhdict[attr]['list']['@size'] = \
                                        str(len(alarms))
                        else:
                            continue

                        # print "Attr %s Value %s" % (attr, snhdict)
                        if attr not in state[key][typ]:
                            state[key][typ][attr] = {}
                        if dsource in state[key][typ][attr]:
                            print "Found Dup %s:%s:%s:%s:%s = %s" % \
                                (key, typ, attr, source, mdule, state[
                                key][typ][attr][dsource])
                        state[key][typ][attr][dsource] = snhdict[attr]

                pa = ParallelAggregator(state, self._uve_reverse_map)
                rsp = pa.aggregate(key, flat, base_url)
            except Exception as e:
                self._logger.error("redis-uve failed %s for : %s:%d tb %s" \
                                   % (str(e), r_ip, r_port, traceback.format_exc()))
                self._redis_uve_map[r_inst] = None
                failures = True
            else:
                self._logger.debug("Computed %s as %s" % (key,str(rsp)))

        return failures, rsp
    # end get_uve

    def get_uve_regex(self, key):
        regex = ''
        if key[0] != '*':
            regex += '^'
        regex += key.replace('*', '.*?')
        if key[-1] != '*':
            regex += '$'
        return re.compile(regex)
    # end get_uve_regex

    def multi_uve_get(self, table, flat, filters=None, is_alarm=False, base_url=None):
        # get_uve_list cannot handle attribute names very efficiently,
        # so we don't pass them here
        uve_list = self.get_uve_list(table, filters, False, is_alarm)
        for uve_name in uve_list:
            _,uve_val = self.get_uve(
                table + ':' + uve_name, flat, filters, is_alarm, base_url)
            if uve_val == {}:
                continue
            else:
                uve = {'name': uve_name, 'value': uve_val}
                yield uve
    # end multi_uve_get

    def get_uve_list(self, table, filters=None, parse_afilter=False,
                     is_alarm=False):
        filters = filters or {}
        uve_list = set()
        kfilter = filters.get('kfilt')
        if kfilter is not None:
            patterns = set()
            for filt in kfilter:
                patterns.add(self.get_uve_regex(filt))

        for r_inst in self._redis_uve_map.keys():
            try:
                (r_ip,r_port) = r_inst
                if not self._redis_uve_map[r_inst]:
                    self._redis_uve_map[r_inst] = redis.StrictRedis(
                            host=r_ip, port=r_port,
                            password=self._redis_password, db=1)

                redish = self._redis_uve_map[r_inst]

                # For UVE queries, we wanna read both UVE and Alarm table
                entries = redish.smembers('ALARM_TABLE:' + table)
                if not is_alarm:
                    entries = entries.union(redish.smembers('TABLE:' + table))
                for entry in entries:
                    info = (entry.split(':', 1)[1]).rsplit(':', 5)
                    uve_key = info[0]
                    if kfilter is not None:
                        kfilter_match = False
                        for pattern in patterns:
                            if pattern.match(uve_key):
                                kfilter_match = True
                                break
                        if not kfilter_match:
                            continue
                    src = info[1]
                    sfilter = filters.get('sfilt')
                    if sfilter is not None:
                        if sfilter != src:
                            continue
                    module = info[2]+':'+info[3]+':'+info[4]
                    mfilter = filters.get('mfilt')
                    if mfilter is not None:
                        if mfilter != module:
                            continue
                    typ = info[5]
                    tfilter = filters.get('cfilt')
                    if tfilter is not None:
                        if typ not in tfilter:
                            continue
                    if parse_afilter:
                        if tfilter is not None and len(tfilter[typ]):
                            valkey = "VALUES:" + table + ":" + uve_key + \
                                ":" + src + ":" + module + ":" + typ
                            for afilter in tfilter[typ]:
                                attrval = redish.hget(valkey, afilter)
                                if attrval is not None:
                                    break
                            if attrval is None:
                                continue
                    uve_list.add(uve_key)
            except Exception as e:
                self._logger.error("get_uve_list failed %s for : %s:%d tb %s" \
                                   % (str(e), r_ip, r_port, traceback.format_exc()))
                self._redis_uve_map[r_inst] = None
        return uve_list
Пример #53
0
class Crawler:
    settings = CrawlSettings()

    def __init__(self, spider_class, settings):
        def get(value, default={}):
            try:
                return getattr(settings, value)
            except AttributeError:
                return default
        self.settings = CrawlSettings(get('CRAWL'))
        Request.settings = RequestSettings(get('REQUEST'))
        spider_settings = SpiderSettings(get('SPIDER'))
        spider = spider_class(spider_settings)
        log = LogSettings(get('LOGFORMATTERS'), get('LOGHANDLERS'),
                          get('LOGGERS'))
        spider.logger = log.getLogger(spider.name)
        self.logger = log.getLogger(spider.name)
        self.load(spider)
        Request.stats = self.stats

    def load(self, spider):
        redis_args = dict(host=self.settings.REDIS_URL,
                          port=self.settings.REDIS_PORT,
                          db=self.settings.REDIS_DB)
        if hasattr(self.settings, 'NAMESPACE'):
            redis_args['namespace'] = self.settings.NAMESPACE
        else:
            redis_args['namespace'] = spider.name
        self.url_set = redisds.Set('urlset', **redis_args)
        self.url_queue = redisds.Queue('urlqueue', serializer=Pickle(),
                                       **redis_args)
        self.runner = redisds.Lock("runner:%s" % uuid4().hex, **redis_args)
        self.runners = redisds.Dict("runner:*", **redis_args)
        self.stats = redisds.Dict("stats:*", **redis_args)
        self.lock = BoundedSemaphore(1)
        self.running_count = 0
        self.allowed_urls_regex = self.get_regex(spider.allowed_domains)
        self.spider = spider
        self.start()

    def get_regex(self, domains):
        default = (r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|'  # domain...
                   r'localhost|'  # localhost...
                   r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'  # ...or ip
                   r'(?::\d+)?')
        domain_regex = r'(%s)' % '|'.join(domains) if len(domains) else default
        url_regex = r'^https?://%s(?:/?|[/?]\S+)$' % domain_regex
        regex = re.compile(url_regex, re.IGNORECASE)
        return regex

    def current_time(self):
        tz = timezone(self.settings.TIME_ZONE)
        return datetime.now(tz).isoformat()

    def start(self):
        if not self.settings.RESUME and self.completed():
            self.url_queue.clear()
            self.url_set.clear()
        if self.url_queue.empty():
            self.stats.clear()
        if isinstance(self.spider.start, list):
            requests = self.spider.start
        else:
            requests = [self.spider.start]
        for request in requests:
            if isinstance(request, str):
                request = Request(request)
            if request.callback is None:
                request.callback = "parse"
            self.insert(request)
        self.stats['status'] = "running"
        self.stats['start_time'] = self.current_time()

    def clear(self, finished):
        self.runner.release()
        if finished:
            self.stats['status'] = 'finished'
            self.url_queue.clear()
            self.url_set.clear()
        elif self.completed():
            self.stats['end_time'] = self.current_time()
            self.stats['status'] = 'stopped'
        stats = dict(self.stats)
        stats['runners'] = len(self.runners)
        self.logger.info("%s", str(stats))

    def completed(self):
        return len(self.runners) == 0

    def inc_count(self):
        self.lock.acquire()
        if self.running_count == 0:
            self.runner.acquire()
        self.running_count += 1
        self.lock.release()

    def decr_count(self):
        self.lock.acquire()
        self.running_count -= 1
        if self.running_count == 0:
            self.runner.release()
        self.lock.release()

    def insert(self, request, check=True):
        if not isinstance(request, Request):
            return
        reqhash = request.get_unique_id()
        if check:
            if not self.allowed_urls_regex.match(request.url):
                return
            elif reqhash in self.url_set:
                return
        self.url_set.add(reqhash)
        self.url_queue.put(request)
        del request

    def process_url(self):
        while True:
            request = self.url_queue.get(timeout=2)
            if request:
                self.logger.info("Processing %s", request)
                self.inc_count()
                try:
                    response = request.send()
                    try:
                        callback = getattr(self.spider, request.callback)
                        requests = callback(response)
                    except KeyboardInterrupt:
                        raise KeyboardInterrupt
                    except:
                        self.logger.exception("Failed to execute callback")
                        requests = None
                    if requests:
                        for i in requests:
                            self.insert(i)
                except RequestError as e:
                    request.retry += 1
                    if request.retry >= self.settings.MAX_RETRY:
                        self.logger.warning("Rejecting %s", request)
                    else:
                        self.logger.debug("Retrying %s", request)
                        self.insert(request, False)
                # except Exception as e:
                # self.logger.exception('Failed to open the url %s', request)
                except KeyboardInterrupt:
                    self.insert(request, False)
                    raise KeyboardInterrupt
                else:
                    self.logger.info("Finished processing %s", request)
                finally:
                    self.decr_count()
            else:
                if self.completed():
                    break
                self.logger.info("Waiting for %s", self.running_count)
Пример #54
0
class Poketrainer:
    """ Public functions (without _**) are callable by the webservice! """

    def __init__(self, args):

        self.thread = None
        self.socket = None
        self.cli_args = args
        self.force_debug = args['debug']

        self.log = logging.getLogger(__name__)

        # timers, counters and triggers
        self.pokemon_caught = 0
        self._error_counter = 0
        self._error_threshold = 10
        self.start_time = time()
        self.exp_start = None
        self._heartbeat_number = 1  # setting this back to one because we make parse a full heartbeat during login!
        self._heartbeat_frequency = 3  # 1 = always
        self._full_heartbeat_frequency = 15  # 10 = as before (every 10th heartbeat)
        self._farm_mode_triggered = False

        # objects, order is important!
        self.config = None
        self._load_config()
        self._open_socket()

        self.player = Player({})
        self.player_stats = PlayerStats({})
        self.inventory = Inventory(self, [])
        self.fort_walker = FortWalker(self)
        self.map_objects = MapObjects(self)
        self.poke_catcher = PokeCatcher(self)
        self.incubate = Incubate(self)
        self.evolve = Evolve(self)
        self.release = Release(self)
        self.sniper = Sniper(self)

        self._origPosF = (0, 0, 0)
        self.api = None
        self._load_api()

        # config values that might be changed during runtime
        self.step_size = self.config.step_size
        self.should_catch_pokemon = self.config.should_catch_pokemon

        # threading / locking
        self.sem = BoundedSemaphore(1)  # gevent
        self.persist_lock = False
        self.locker = None

    def sleep(self, t):
        # eventlet.sleep(t * self.config.sleep_mult)
        gevent.sleep(t * self.config.sleep_mult)

    def _open_socket(self):
        desc_file = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), ".listeners")
        s = socket.socket()
        s.bind(("", 0))  # let the kernel find a free port
        sock_port = s.getsockname()[1]
        s.close()
        data = {}

        if os.path.isfile(desc_file):
            with open(desc_file, 'r+') as f:
                data = f.read()
                if PY2:
                    data = json.loads(data.encode() if len(data) > 0 else '{}')
                else:
                    data = json.loads(data if len(data) > 0 else '{}')
        data[self.config.username] = sock_port
        with open(desc_file, "w+") as f:
            f.write(json.dumps(data, indent=2))

        s = zerorpc.Server(self)
        s.bind("tcp://127.0.0.1:%i" % sock_port)  # the free port should still be the same
        self.socket = gevent.spawn(s.run)

        # zerorpc requires gevent, thus we would need a solution for eventlets
        # self.socket = self.thread_pool.spawn(wsgi.server, eventlet.listen(('127.0.0.1', sock_port)), self)
        # self.socket = self.thread_pool.spawn(eventlet.serve, eventlet.listen(('127.0.0.1', sock_port)), self)
        # alternative: GreenRPCService

    def _load_config(self):
        if self.config is None:
            config_file = "config.json"

            # If config file exists, load variables from json
            load = {}
            if os.path.isfile(config_file):
                with open(config_file) as data:
                    load.update(json.load(data))

            defaults = load.get('defaults', {})
            config = load.get('accounts', [])[self.cli_args['config_index']]

            if self.cli_args['debug'] or config.get('debug', False):
                logging.getLogger("requests").setLevel(logging.DEBUG)
                logging.getLogger("pgoapi").setLevel(logging.DEBUG)
                logging.getLogger("poketrainer").setLevel(logging.DEBUG)
                logging.getLogger("rpc_api").setLevel(logging.DEBUG)

            if config.get('auth_service', '') not in ['ptc', 'google']:
                logger.error("Invalid Auth service specified for account %s! ('ptc' or 'google')", config.get('username', 'NA'))
                return False

                # merge account section with defaults
            self.config = Config(dict_merge(defaults, config), self.cli_args)
        return True

    def reload_config(self):
        self.config = None
        return self._load_config()

    def _load_api(self, prev_location=None):
        if self.api is None:
            self.api = api.pgoapi.PGoApi()
            # set signature!
            self.api.activate_signature(
                os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), self.cli_args['encrypt_lib'])
            )

            # get position and set it in the API
            if self.cli_args['location']:
                position = get_location(self.cli_args['location'])
            else:
                position = get_location(self.config.location)
            self._origPosF = position
            if prev_location:
                position = prev_location
            self.api.set_position(*position)

            # retry login every 30 seconds if any errors
            self.log.info('Starting Login process...')
            login = False
            while not login:
                login = self.api.login(self.config.auth_service, self.config.username, self.config.get_password())
                if not login:
                    logger.error('Login error, retrying Login in 30 seconds')
                    self.sleep(30)
            self.log.info('Login successful')
            self._heartbeat(login, True)
        return True

    def reload_api(self, prev_location=None):
        self.api = None
        return self._load_api(prev_location)

    '''
    Blocking lock
        - only locks if current thread (greenlet) doesn't own the lock
        - persist=True will ensure the lock will not be released until the user
          explicitly sets self.persist_lock=False.
    '''
    def thread_lock(self, persist=False):
        if self.sem.locked():
            if self.locker == id(gevent.getcurrent()):
                self.log.debug("Locker is -- %s. No need to re-lock", id(gevent.getcurrent()))
                return False
            else:
                self.log.debug("Already locked by %s. Greenlet %s will wait...", self.locker, id(gevent.getcurrent()))
        self.sem.acquire()
        self.persist_lock = persist
        self.locker = id(gevent.getcurrent())
        self.log.debug("%s acquired lock (persist=%s)!", self.locker, persist)
        return True

    '''
    Releases the lock if needed and the user didn't persist it
    '''
    def thread_release(self):
        if self.sem.locked() and self.locker == id(gevent.getcurrent()) and not self.persist_lock:
            self.log.debug("%s is now releasing lock", id(gevent.getcurrent()))
            self.sem.release()

    def _callback(self, gt):
        try:
            if not gt.exception:
                result = gt.value
                logger.info('Thread finished with result: %s', result)
        except KeyboardInterrupt:
            return

        logger.exception('Error in main loop %s, restarting at location: %s',
                         gt.exception, self.get_position())
        # restart after sleep
        self.sleep(30)
        self.reload_config()
        self.reload_api(self.get_position())
        self.start()

    def start(self):
        self.thread = gevent.spawn(self._main_loop)

        self.thread.link(self._callback)

    def stop(self):
        if self.thread:
            self.thread.kill()

    def _main_loop(self):
        if self.config.enable_caching and self.config.experimental:
            if not self.config.use_cache:
                self.log.info('==== CACHING MODE: CACHE FORTS ====')
            else:
                self.log.info('==== CACHING MODE: ROUTE+SPIN CACHED FORTS ====')
            self.fort_walker.setup_cache()
        while True:
            # acquire lock for this thread
            if self.thread_lock(persist=True):
                try:
                    self._heartbeat()

                    self.poke_catcher.catch_all()
                    self.fort_walker.loop()
                    self.fort_walker.spin_nearest_fort()
                finally:
                    # after we're done, release lock
                    self.persist_lock = False
                    self.thread_release()
            self.sleep(1.0)

    def _heartbeat(self, res=False, login_response=False):
        if not isinstance(res, dict):
            # limit the amount of heartbeats, every second is just too much in my opinion!
            if (not self._heartbeat_number % self._heartbeat_frequency == 0 and
                    not self._heartbeat_number % self._full_heartbeat_frequency == 0):
                self._heartbeat_number += 1
                return

            # making a standard call to update position, etc
            req = self.api.create_request()
            req.get_player()
            if self._heartbeat_number % 10 == 0:
                req.check_awarded_badges()
                req.get_inventory()
            res = req.call()
            if not res or res.get("direction", -1) == 102:
                self.log.error("There were a problem responses for api call: %s. Restarting!!!", res)
                self.api.force_refresh_access_token()
                raise AuthException("Token probably expired?")

        self.log.debug(
            'Response dictionary: \n\r{}'.format(json.dumps(res, indent=2, default=lambda obj: obj.decode('utf8'))))

        responses = res.get('responses', {})
        if 'GET_PLAYER' in responses:
            self.player = Player(responses.get('GET_PLAYER', {}).get('player_data', {}))
            self.log.info("Player Info: {0}, Pokemon Caught in this run: {1}".format(self.player, self.pokemon_caught))

        if 'GET_INVENTORY' in responses:

            # update objects
            inventory_items = responses.get('GET_INVENTORY', {}).get('inventory_delta', {}).get('inventory_items', [])
            self.inventory = Inventory(self, inventory_items)
            for inventory_item in self.inventory.inventory_items:
                if "player_stats" in inventory_item['inventory_item_data']:
                    self.player_stats = PlayerStats(
                        inventory_item['inventory_item_data']['player_stats'],
                        self.pokemon_caught, self.start_time, self.exp_start
                    )
                    if self.exp_start is None:
                        self.exp_start = self.player_stats.run_exp_start
                    self.log.info("Player Stats: {}".format(self.player_stats))
            if self.config.list_inventory_before_cleanup:
                self.log.info("Player Inventory: %s", self.inventory)
            if not login_response:
                self.log.debug(self.inventory.cleanup_inventory())
                self.log.info("Player Inventory after cleanup: %s", self.inventory)
            if self.config.list_pokemon_before_cleanup:
                self.log.info(os.linesep.join(map(str, self.inventory.get_caught_pokemon())))

            if not login_response:
                # maintenance
                self.incubate.incubate_eggs()
                self.inventory.use_lucky_egg()
                self.evolve.attempt_evolve()
                self.release.cleanup_pokemon()

            # save data dump
            with open("data_dumps/%s.json" % self.config.username, "w") as f:
                posf = self.get_position()
                responses['lat'] = posf[0]
                responses['lng'] = posf[1]
                responses['GET_PLAYER']['player_data']['hourly_exp'] = self.player_stats.run_hourly_exp
                f.write(json.dumps(responses, indent=2, default=lambda obj: obj.decode('utf8')))

            # Farm precon
            if self.config.farm_items_enabled:
                pokeball_count = 0
                if not self.config.farm_ignore_pokeball_count:
                    pokeball_count += self.inventory.poke_balls
                if not self.config.farm_ignore_greatball_count:
                    pokeball_count += self.inventory.great_balls
                if not self.config.farm_ignore_ultraball_count:
                    pokeball_count += self.inventory.ultra_balls
                if not self.config.farm_ignore_masterball_count:
                    pokeball_count += self.inventory.master_balls
                if self.config.pokeball_farm_threshold > pokeball_count and not self._farm_mode_triggered:
                    self.should_catch_pokemon = False
                    self._farm_mode_triggered = True
                    self.log.info("Player only has %s Pokeballs, farming for more...", pokeball_count)
                    if self.config.farm_override_step_size != -1:
                        self.step_size = self.config.farm_override_step_size
                        self.log.info("Player has changed speed to %s", self.step_size)
                elif self.config.pokeball_continue_threshold <= pokeball_count and self._farm_mode_triggered:
                    self.should_catch_pokemon = self.config.should_catch_pokemon  # Restore catch pokemon setting from config file
                    self._farm_mode_triggered = False
                    self.log.info("Player has %s Pokeballs, continuing to catch more!", pokeball_count)
                    if self.config.farm_override_step_size != -1:
                        self.step_size = self.config.step_size
                        self.log.info("Player has returned to normal speed of %s", self.step_size)

        if 'DOWNLOAD_SETTINGS' in responses:
            settings = responses.get('DOWNLOAD_SETTINGS', {}).get('settings', {})
            if settings.get('minimum_client_version', '0.0.0') > '0.33.0':
                self.log.error("Minimum client version has changed... the bot needs to be updated! Will now stop!")
                exit(0)
            map_settings = settings.get('map_settings', {})

            get_map_objects_min_refresh_seconds = map_settings.get('get_map_objects_min_refresh_seconds', 0.0)  # std. 5.0
            if get_map_objects_min_refresh_seconds != self.map_objects.get_api_rate_limit():
                self.map_objects.update_rate_limit(get_map_objects_min_refresh_seconds)

            """
            fort_settings = settings.get('fort_settings', {})
            inventory_settings = settings.get('inventory_settings', {})

            get_map_objects_max_refresh_seconds = map_settings.get('get_map_objects_max_refresh_seconds', 30.0)
            get_map_objects_min_distance_meters = map_settings.get('get_map_objects_min_distance_meters', 10.0)
            encounter_range_meters = map_settings.get('encounter_range_meters', 50.0)
            poke_nav_range_meters = map_settings.get('poke_nav_range_meters', 201.0)
            pokemon_visible_range = map_settings.get('pokemon_visible_range', 70.0)
            get_map_objects_min_refresh_seconds = map_settings.get('get_map_objects_min_refresh_seconds', 5.0)
            google_maps_api_key = map_settings.get('google_maps_api_key', '')

            self.log.info('complete settings: %s', responses.get('DOWNLOAD_SETTINGS', {}))

            self.log.info('minimum_client_version: %s', str(settings.get('minimum_client_version', '0.0.0')))

            self.log.info('poke_nav_range_meters: %s', str(poke_nav_range_meters))
            self.log.info('pokemon_visible_range: %s', str(pokemon_visible_range))

            self.log.info('get_map_objects_min_refresh_seconds: %s', str(get_map_objects_min_refresh_seconds))
            self.log.info('get_map_objects_max_refresh_seconds: %s', str(get_map_objects_max_refresh_seconds))
            self.log.info('get_map_objects_min_distance_meters: %s', str(get_map_objects_min_distance_meters))
            self.log.info('encounter_range_meters: %s', str(encounter_range_meters))
            """

        self._heartbeat_number += 1
        return res

    def set_position(self, *pos):
        return self.api.set_position(*pos)

    def get_position(self):
        return self.api.get_position()

    def get_orig_position(self):
        return self._origPosF

    """ FOLLOWING ARE FUNCTIONS FOR THE WEB LISTENER """

    def release_pokemon_by_id(self, p_id):
        # acquire lock for this thread
        if self.thread_lock(persist=True):
            try:
                return self.release.do_release_pokemon_by_id(p_id)
            finally:
                # after we're done, release lock
                self.persist_lock = False
                self.thread_release()
        else:
            return 'Only one Simultaneous request allowed'

    def current_location(self):
        self.log.info("Web got position: %s", self.get_position())
        return self.get_position()

    def get_caught_pokemons(self):
        return self.inventory.get_caught_pokemon_by_family(as_json=True)

    def get_inventory(self):
        return self.inventory.to_json()

    def get_player_info(self):
        return self.player.to_json()

    def snipe_pokemon(self, lat, lng):
        # acquire lock for this thread
        if self.thread_lock(persist=True):
            try:
                return self.sniper.snipe_pokemon(float(lat), float(lng))
            finally:
                # after we're done, release lock
                self.map_objects.wait_for_api_timer()
                self.persist_lock = False
                self.thread_release()
        else:
            return 'Only one Simultaneous request allowed'

    def ping(self):
        self.log.info("Responding to ping")
        return "pong"