Example #1
0
def api_delete_feed():
    feed_id = request.json['id']
    inbox = Queue()
    mail = Mail(inbox, {'type': 'delete-feed', 'feed_id': feed_id})
    deadlineManager.inbox.put(mail)
    resp = inbox.get()
    return jsonify({"msg": resp['success']})
Example #2
0
class FlowTests(TestCase):
    def create(self, conf={}, events=[]):
        self.input = Queue()
        self.output = Queue()

        context = DummyContext()
        with context:
            self.i = self.create_stage(**conf)
            self.input = self.i.setup(self.output)

        self.assertEquals(1, len(context.stages))

        self.i.start()
        for ev in events:
            self.input.put(ev)
        return self.i

    def wait(self, timeout=1.0, events=1):
        with gevent.Timeout(timeout):
            # wait for input to be consumed and output to be produced
            while self.input.qsize():
                gevent.sleep(0.0)
            while self.output.qsize() < events:
                gevent.sleep(0.0)

        self.i.stop()
        if events:
            return [self.output.get() for n in xrange(events)]
Example #3
0
    def test_kill_run(self, datetime, kill_pid_tree_mock):
        """
        Test :func:`.kill_run`.
        """
        event_queue = Mock()
        kill_request = Mock()
        kill_request.id = 1234
        kill_request.run.pid = 5678

        dts = datetime.now.return_value.isoformat.return_value

        kill_queue = Queue()
        kill_queue.put(kill_request)
        exit_queue = Mock()

        exit_queue_return = [Empty, None]

        def exit_queue_side_effect(*args, **kwargs):
            value = exit_queue_return.pop(0)
            if callable(value):
                raise value()

        exit_queue.get.side_effect = exit_queue_side_effect

        kill_run(kill_queue, event_queue, exit_queue)

        kill_pid_tree_mock.assert_called_with(5678)
        kill_request.patch.assert_called_with({
            'execute_dts': dts,
        })
        event_queue.put.assert_called_with((
            '{"kill_request_id": 1234, "kind": "kill_request", '
            '"event": "executed"}'
        ))
    def __init__(self, config):
        super(ContractingDataBridge, self).__init__()
        self.config = config

        self.tenders_sync_client = TendersClientSync('',
            host_url=self.config_get('tenders_api_server'),
            api_version=self.config_get('tenders_api_version'),
        )

        self.client = TendersClient(
            self.config_get('api_token'),
            host_url=self.config_get('tenders_api_server'),
            api_version=self.config_get('tenders_api_version'),
        )

        self.contracting_client = ContractingClient(
            self.config_get('api_token'),
            host_url=self.config_get('tenders_api_server'),
            api_version=self.config_get('tenders_api_version')
        )

        self.initial_sync_point = {}
        self.tenders_queue = Queue(maxsize=500)
        self.handicap_contracts_queue = Queue(maxsize=500)
        self.contracts_put_queue = Queue(maxsize=500)
        self.contracts_retry_put_queue = Queue(maxsize=500)
class CHubCallbackQueueBase(CHubCallbackBasicBase):
    def __init__(self,sHubId):
        CHubCallbackBasicBase.__init__(self,sHubId)
        self.__queue4Return = Queue() #当前应答队列

    def PutCmdStrToReturnQueue(self, lsCmdStr):
        self.__queue4Return.put(lsCmdStr)

    def GetCmdStrFmReturnQueue(self):
        return self.__queue4Return.get() # (sClientIPPort,dwCmdId,CmdOStr)

    def HandleRequestCmd(self, sClientIPPort, dwCmdId, CmdIStr):
        # 处理客户端请求命令
        bDone = CHubCallbackBasicBase.HandleRequestCmd(self, sClientIPPort, dwCmdId, CmdIStr)
        if not bDone and CmdIStr[0].startswith(CMD0_ECHO_CMD):
            CmdOStr = ['OK','CHubCallbackQueueBase']
            CmdOStr.extend(CmdIStr)
            dwCmdId = GetCmdReplyFmRequest(dwCmdId)
            self.PutCmdStrToReturnQueue([sClientIPPort,dwCmdId,CmdOStr])
            bDone = True
        return bDone

    def DoHandleCheckAllLinkReply(self):
        # 处理检查所有链接的应答返还消息(包括通知消息)等
        # 该函数在该类中实现后,一般情况下子类无需再继承。
        while not self.bQuitLoopFlag:
            return self.GetCmdStrFmReturnQueue()
Example #6
0
def joinall(greenlets, timeout=None, raise_error=False, count=None):
    from gevent.queue import Queue
    queue = Queue()
    put = queue.put
    if count is None:
        count = len(greenlets)
    timeout = Timeout.start_new(timeout)
    try:
        try:
            for greenlet in greenlets:
                greenlet.rawlink(put)
            if raise_error:
                for _ in xrange(count):
                    greenlet = queue.get()
                    if not greenlet.successful():
                        raise greenlet.exception
            else:
                for _ in xrange(count):
                    queue.get()
        except:
            if sys.exc_info()[1] is not timeout:
                raise
        finally:
            for greenlet in greenlets:
                greenlet.unlink(put)
    finally:
        timeout.cancel()
    def test_last_update_cache(self):
        handle = self.start_worker()
        queue = Queue()
        o_process = handle.process
        def new_process(msg):
            o_process(msg)
            queue.put(True)
        handle.process = new_process



        definition = SBE37_CDM_stream_definition()
        publisher = Publisher()

        stream_def_id = self.pubsub_cli.create_stream_definition(container=definition)
        stream_id = self.pubsub_cli.create_stream(stream_definition_id=stream_def_id)

        time = float(0.0)

        for granule in self.make_points(definition=definition, stream_id=stream_id, N=10):

            publisher.publish(granule, to_name=(self.XP, stream_id+'.data'))
            # Determinism sucks
            try:
                queue.get(timeout=5)
            except Empty:
                self.assertTrue(False, 'Process never received the message.')

            doc = self.db.read(stream_id)
            ntime = doc.variables['time'].value
            self.assertTrue(ntime >= time, 'The documents did not sequentially get updated correctly.')
            time = ntime
Example #8
0
class __AudioNode(gevent.Greenlet):
    RATE = 44100
    CHUNK = 512
    PORT = 20000

    def __init__(self, is_log=True):
        gevent.Greenlet.__init__(self)
        self.is_log = is_log
        self.command = Queue()

    def is_quit(self):
        try:
            cmd = self.command.get_nowait()
            return cmd == "q"
        except Empty:
            return False

    def stop(self, msg=""):
        self.command.put("q")
        if self.is_log:
            print "%s - stopping %s" % (self.__class__.__name__, msg)

    def _run(self):
        if self.is_log:
            print "starting: %s" % self.__class__.__name__
        try:
            self.engine()
        finally:
            self.pa.close()
            self.sock.close()
Example #9
0
    def get_subscriber(self, last_event_id=None):
        """Obtain a new subscriber.

        The returned subscriber will receive all messages after the one
        with the given index (if they are still in the cache).

        last_event_id (unicode): the ID of the last message the client
            did receive, to request the one generated since then to be
            sent again. If not given no past message will be sent.

        return (Subscriber): a new subscriber instance.

        """
        queue = Queue()
        # If a valid last_event_id is provided see if cache can supply
        # missed events.
        if last_event_id is not None and \
                re.match("^[0-9A-Fa-f]+$", last_event_id):
            last_event_key = int(last_event_id, 16)
            if len(self._cache) > 0 and last_event_key >= self._cache[0][0]:
                # All missed events are in cache.
                for key, msg in self._cache:
                    if key > last_event_key:
                        queue.put(msg)
            else:
                # Some events may be missing. Ask to reinit.
                queue.put(b"event:reinit\n\n")
        # Store the queue and return a subscriber bound to it.
        self._sub_queues.add(queue)
        return Subscriber(queue)
Example #10
0
class Qoorate(BrooklynCodeBrubeck):
    """Custom application class for Qoorate."""
    def __init__(self, settings_file=None, project_dir=None,
                 *args, **kwargs):
        """ Most of the parameters are dealt with by Brubeck,
            Additional functionality follow call to super
        """
        super(Qoorate, self).__init__(settings_file, project_dir, **kwargs)

        pool_size = 10

        if self.db_conn == None:
            # create our db_conn if we have the settings to
            if settings_file != None:
                mysql_settings = self.get_settings('mysql')
                if mysql_settings != None:
                    logging.debug("creating application db_conn pool")
                    self.db_conn = Queue()
                    for i in range(pool_size): 
                        self.db_conn.put_nowait(create_db_conn(mysql_settings)) 

        self.template_env.filters['sc'] = unsanitize_safe_htmlentities

    def determine_relevency(self, item):
        """schedule an indexing using concurrency"""
        logging.info("qoorate_determine_relevency, start: %s" % item)
        logging.info("qoorate_generate_relevency, star greenlet: %s" % item)
        qoorate_determine_relevency(item)
        logging.info("qoorate_generate_relevency, end: %s" % item)
Example #11
0
class Spider(object):
	"""docstring for Spider"""
	def __init__(self, route, RDB):
		
		self.route = route
		self.visited = RDB #must be a redis client
		self.todolst = Queue(100) 

	def put(self, item):
		self.todolst.put(item)

	def _fetch(self, timeout):
		todo = self.todolst
		route = self.route
		visited = self.visited
		try:
			while True:
				url = todo.get(timeout=timeout)
				handler = route.match(url)
				if not handler: continue
				hdl = handler(url)
				next_urls = hdl.get()
				visited.set(url,url)
				gevent.sleep(0.1)
				
				[todo.put(ul,timeout=timeout+10) for ul in next_urls if  not (visited.exists(ul) or todo.full())]
		except Empty,Full:						
		#except :
			#fix me
			traceback.print_exc()
			return 
Example #12
0
    def parallel_loop(func=None, elem=None, *args, **kwargs):
        import gevent
        from gevent.queue import Queue
        ret = []
        api.bui.cli.logger.debug('Using gevent')

        if not callable(func):
            api.abort(500, 'The provided \'func\' is not callable!')
        if not elem:
            return []

        output = Queue()

        processes = [
            gevent.spawn(
                func,
                e,
                output,
                *args,
                **kwargs
            ) for e in elem
        ]
        # wait for process termination
        gevent.joinall(processes)

        for p in processes:
            tmp = output.get()
            if isinstance(tmp, basestring):
                api.abort(500, tmp)
            elif tmp:
                ret.append(tmp)

        return ret
class WebSocketHandler(ThreadedHandler):
    """WebSocket API for handlers

    This provides a socket-like interface similar to the browser
    WebSocket API for managing a WebSocket connection.
    """

    def __init__(self, sock, protocols, extensions, environ):
        ThreadedHandler.__init__(self, sock, protocols, extensions)

        self.environ = environ

        self._messages = Queue()
        self._lock = Lock()
        self._th = gevent.spawn(self._receive)

    def closed(self, code, reason=None):
        self._messages.put(StreamClosed(code, reason))

    def received_message(self, m):
        self._messages.put(copy.deepcopy(m))

    def receive(self, msg_obj=False):
        msg = self._messages.get()

        if isinstance(msg, StreamClosed):
            # Maybe we'll do something better
            return None

        if msg_obj:
            return msg
        else:
            return msg.data
Example #14
0
File: gactor.py Project: heckj/om
class Actor(gevent.Greenlet):

    def __init__(self):
        self.running = False
        self.inbox = Queue()
        gevent.Greenlet.__init__(self)

    def received(self, message):
        """
        Define your subclass to handle incoming messages here...
        """
        raise NotImplementedError()

    def cast(self, message):
        """ Send a message to the actor.

        If the actor is busy, the message will be enqueued for later
        consumption.  There is no return value.

        >>> a = Actor()
        >>> a.received = lambda msg: msg
        >>> a.cast("hello")
        """
        self.inbox.put_nowait(message)

    def _run(self):
        self.running = True

        while self.running:
            message = self.inbox.get()
            self.received(message)
Example #15
0
class WorkerPool(object):

    def __init__(self):
        self.pool_size = options.pool_size
        self.job_pool = Pool(size=self.pool_size)
        self.result = Queue()
        self.target_queue = Queue()

    def add_job(self, job_func, *args, **kwargs):
        job = self.job_pool.apply_async(
            job_func,
            args=args,
            kwds=kwargs,
            callback=self._call_func)
        self.job_pool.add(job)

    def run(self, timeout=None):
        self.job_pool.join(timeout=timeout, raise_error=False)

    def _call_func(self, job_ret):
        if job_ret:
            self.result.put(job_ret)

    def shutdown(self):
        self.job_pool.kill()
Example #16
0
def test_async_multi_publish_consume():
    with conn.channel() as channel:
        # first message
        message_body = 'test_async_multi_publish_consume message 1'
        channel.basic_publish(
            exchange='unit_test_room',
            routing_key='user1',
            body=message_body
        )

    recv_queue = Queue()
    rchannel = conn.allocate_channel()
    rchannel.basic_consume(queue='listener1', callback=recv_queue.put)

    resp = recv_queue.get()
    eq_(resp.body, message_body)
    resp.ack()

    assert recv_queue.empty()

    with conn.channel() as channel:
        # second message
        message_body = 'test_async_multi_publish_consume message 2'
        channel.basic_publish(
            exchange='unit_test_room',
            routing_key='user1',
            body=message_body
        )

    resp = recv_queue.get()
    eq_(resp.body, message_body)
    resp.ack()
Example #17
0
    def __init__(self, addr, game):
        self.hub  = gevent.get_hub()
        self.addr = addr
        self.sock = None
        self.game = game

        self.time_diff   = 0

        self.write_queue = Queue()
        self.write_tr    = None

        self.read_queue  = Queue()
        self.read_tr     = None
        self.dispatch_tr = None


        entype = Config.get("encrypt", None)
        if entype == "rc4":
            self.c2s_encrypt = RC4(Config["c2s_key"]).crypt
            self.s2c_encrypt = RC4(Config["s2c_key"]).crypt
        elif entype == None:
            self.c2s_encrypt = None
            self.s2c_encrypt = None
        else:
            raise ValueError("not support %s encrypt"%entype)

        self._sessions = {}
class LocalControlProgramDescriptor(object):
    def __init__(self, hc, agentUuid, program_id):
        self.log = logging.getLogger('LocalControlProgramDescriptor_{}'.format(program_id))
        self.agentUuid = agentUuid
        self.id = program_id
        self.hc = hc
        self.queue = Queue()

    def _receive_msg(self, msg):
        self.queue.put(msg)

    def recv(self, block=True, timeout=None):
        try:
            self.log.debug("Waiting for msg in blocking call")
            msg = self.queue.get(block=block, timeout=timeout)
            return msg
        except gevent.timeout.Timeout as e:
            return None
        except gevent.queue.Empty as e:
            return None

    def send(self, msg):
        return self.hc.send_msg_to_local_control_program(self.id, msg, self.agentUuid)

    def close(self):
        return self.hc.stop_local_control_program(self.id, self.agentUuid)
Example #19
0
class IMapUnordered(Greenlet):

    def __init__(self, spawn, func, iterable):
        from gevent.queue import Queue
        Greenlet.__init__(self)
        self.spawn = spawn
        self.func = func
        self.iterable = iterable
        self.queue = Queue()
        self.count = 0

    def __iter__(self):
        return self.queue

    def _run(self):
        try:
            func = self.func
            for item in self.iterable:
                self.count += 1
                self.spawn(func, item).rawlink(self._on_result)
        finally:
            self.__dict__.pop('spawn', None)
            self.__dict__.pop('func', None)
            self.__dict__.pop('iterable', None)

    def _on_result(self, greenlet):
        self.count -= 1
        if greenlet.successful():
            self.queue.put(greenlet.value)
        if self.ready() and self.count <= 0:
            self.queue.put(StopIteration)
Example #20
0
 def _run(self):
     utils.log("[%s] parsing site %s" % (self, self.base))
     
     queue = Queue()
     pool  = Pool(64)
     seed  = 'http://community.seattletimes.nwsource.com/entertainment/i_results.php?search=venue&type=Restaurant&page=1'
     
     pool.spawn(self._parseResultsPage, pool, queue, seed, '1', True)
     
     while True:
         items = []
         
         while not queue.empty():
             item = queue.get_nowait()
             items.append(item)
         
         if 0 == len(items) and 0 == len(pool):
             break
         
         for item in items:
             pool.spawn(item[0], pool, queue, item[1], item[2], item[3])
         
         time.sleep(0.01)
     
     pool.join()
     self._output.put(StopIteration)
Example #21
0
class SystemClient(object):
    def __init__(self, socket, address, gateway, server=False):
        self.socket = socket
        self.address = address
        self.gateway = gateway
        self.server = server
        self.queue = Queue(maxsize=100)

        self.receiver = Receiver(self)
        self.sender = Sender(self)

    def start(self):
        self.receiver.start()
        self.sender.start()

    def kill(self):
        if gevent.getcurrent() in [self.receiver, self.sender]:
            logger.error("SystemClient.kill() may not be called by the client's greenlets!")
            self.gateway.unregister(self)
            return

        self.receiver.kill()
        self.sender.kill()
        self.socket.close()

    def send(self, message, sender):
        self.queue.put(message)

    def close(self, message=""):
        try:
            logger.warning(u"Closing connection to {0} due to {1}".format(self.address, message))
            self.gateway.unregister(self)
        except Exception, e:
            traceback.print_exc()
            logger.error(u"Got {0} while trying to close and unregister a client!".format(e))
Example #22
0
class C2DMService(object):
    def __init__(self, source, email, password):
        self.source = source
        self.email = email
        self.password = password
        self._send_queue = Queue()
        self._send_queue_cleared = Event()
        self.log = logging.getLogger('pulsus.service.c2dm')

    def _send_loop(self):
        self._send_greenlet = gevent.getcurrent()
        try:
            self.log.info("C2DM service started")
            while True:
                notification = self._send_queue.get()
                try:
                    self._do_push(notification)
                except Exception, e:
                    self.log.exception("Error while pushing")
                    self._send_queue.put(notification)
                    gevent.sleep(5.0)
                finally:
                    if self._send_queue.qsize() < 1 and \
                            not self._send_queue_cleared.is_set():
                        self._send_queue_cleared.set()
class WebSocketClient(ThreadedClient):
    def __init__(self, url, protocols=None, version='8'):
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
        ThreadedClient.__init__(self, url, protocols=protocols, version=version, sock=sock)
        
        self._lock = Semaphore()
        self._th = Greenlet(self._receive)
        self._messages = Queue()
        
        self.extensions = []

    def opened(self, protocols, extensions):
        self.protocols = protocols
        self.extensions = extensions
    
    def received_message(self, m):
        self._messages.put(copy.deepcopy(m))
    
    def write_to_connection(self, bytes):
        if not self.client_terminated:
            return self.sock.sendall(bytes)    
    
    def closed(self, code, reason=None):
        self._messages.put(StreamClosed(code, reason))
    
    def receive(self, msg_obj=False):
        msg = self._messages.get()
        
        if isinstance(msg, StreamClosed):
            return None
            
        if msg_obj:
            return msg
        else:
            return msg.data
Example #24
0
class Worker():
    def __init__(self,inputdict, timeout, outputmode, validation_func):
        self.threads = []
        self.queue = Queue()
        self.inputdict = inputdict
        self.timeout = timeout
        self.outputmode = outputmode
        self.validation_func = validation_func
 
    
    def infi(self, th, thm):
        k = 0
        while k<10000:
	    print 'I am in INFI ', th, thm
            time.sleep(.1)
        print "out while infi", thm
        self.queue.put_nowait(thm)
        
    
    def test(self, th, thm):
        print "inside test", thm
        self.queue.put_nowait(thm)
    
    def start(self, thm):
        print "Hii"
        self.threads.append(gevent.spawn(self.infi, 1, thm))
	self.threads.append(gevent.spawn(self.test, 2, thm))
	while self.queue.empty():
	    print "queue is empty %s" % thm
	    gevent.sleep(0)
        raise TaskComplete
        
    def stop(self):
	gevent.killall(self.threads)
Example #25
0
class NotifyingQueue(Event):
    """ A queue that follows the wait protocol. """

    def __init__(self):
        super(NotifyingQueue, self).__init__()
        self._queue = Queue()

    def put(self, item):
        """ Add new item to the queue. """
        self._queue.put(item)
        self.set()

    def empty(self):
        return self._queue.empty()

    def get(self, block=True, timeout=None):
        """ Removes and returns an item from the queue. """
        value = self._queue.get(block, timeout)
        if self._queue.empty():
            self.clear()
        return value

    def stop(self):
        """ Request a stop event. """
        self.set()
class ConnectionPool:
    def __init__(self, db_config, time_to_sleep=30, test_run=False):
        self.username = db_config.get('user')
        self.password = db_config.get('password')
        self.host = db_config.get('host')
        self.port = int(db_config.get('port'))
        self.max_pool_size = 20
        self.test_run = test_run
        self.pool = None
        self.time_to_sleep = time_to_sleep
        self._initialize_pool()

    def get_initialized_connection_pool(self):
        return self.pool

    def _initialize_pool(self):
        self.pool = Queue(maxsize=self.max_pool_size)
        current_pool_size = self.pool.qsize()
        if current_pool_size < self.max_pool_size:  # this is a redundant check, can be removed
            for _ in xrange(0, self.max_pool_size - current_pool_size):
                try:
                    conn = db.connect(host=self.host,
                                      user=self.username,
                                      passwd=self.password,
                                      port=self.port)
                    self.pool.put_nowait(conn)

                except db.OperationalError, e:
                    LOGGER.error("Cannot initialize connection pool - retrying in {} seconds".format(self.time_to_sleep))
                    LOGGER.exception(e)
                    break
        self._check_for_connection_loss()
Example #27
0
class RemoteIterator(object):

    def __init__(self):
        self.queue = Queue()

    def __iter__(self):
        return self

    def send(self, value):
        if self.queue is None:
            raise StopIteration
        self.queue.put((True, value))

    def throw(self, exc):
        if self.queue is None:
            raise StopIteration
        self.queue.put((False, exc))

    def close(self):
        self.throw(StopIteration)

    def next(self):
        if self.queue is None:
            raise StopIteration
        yields, value = self.queue.get()
        if yields:
            return value
        else:
            self.queue = None
            raise value
Example #28
0
class BlackBerryPushService(object):
    def __init__(self, app_id, password, push_url):
        self.app_id = app_id
        self.password = password
        self.push_url = push_url
        self._send_queue = Queue()
        self._send_queue_cleared = Event()
        self.log = logging.getLogger('pulsus.service.bbp')

    def _send_loop(self):
        self._send_greenlet = gevent.getcurrent()
        try:
            self.log.info("BlackBerry Push service started")
            while True:
                notification = self._send_queue.get()
                try:
                    self._do_push(notification)
                except Exception, e:
                    print e
                    self._send_queue.put(notification)
                    gevent.sleep(5.0)
                finally:
                    if self._send_queue.qsize() < 1 and \
                            not self._send_queue_cleared.is_set():
                        self._send_queue_cleared.set()
Example #29
0
class Actor(Greenlet):
	__metaclass__ = MetaActor
	
	def __init__(self):
		Greenlet.__init__(self)
		self.inbox = Queue()
		Actor.actors.append(self)
		
	def send(self, actor, message):
		actor.inbox.put(message)
		
	def receive(self, message):
		raise NotImplemented()
		
	@staticmethod
	def wait_actors():
		gevent.joinall(Actor.actors)
		
	def loop(self):
		if not self.inbox.empty():
			self.receive(self.inbox.get())
		gevent.sleep()
		
	def _run(self):
		while self.started:
			self.loop()
 def _run(self):
     utils.log("[%s] parsing site %s" % (self, self.base))
     
     queue = Queue()
     pool  = Pool(64)
     seed  = 'http://www.nytimes.com/best-sellers-books/'
     
     pool.spawn(self._parseResultsPage, pool, queue, seed, 'current', True)
     
     while True:
         items = []
         
         while not queue.empty():
             item = queue.get_nowait()
             items.append(item)
         
         if 0 == len(items) and 0 == len(pool):
             break
         
         for item in items:
             pool.spawn(item[0], pool, queue, item[1], item[2], item[3])
         
         time.sleep(0.01)
     
     pool.join()
     self._output.put(StopIteration)
def transfer_queue():
    """Transfer event queue used by the confirmation sender."""
    return Queue()
class CoroutineEngine(object):
    """协程加速控件"""

    def __init__(self, core, power: int = None, **kwargs) -> None:
        """

        :param core: 驱动核心
        :param power: 协程功率
        :param progress_name: 进度条
        """
        # 初始化参数
        self.max_queue_size = 0
        self.power = power
        # self.work_Q = Queue()

        # 驱动器
        self.core = core

        # 额外参数
        self.config_path = kwargs.get("config_path")  # 配置文件
        self.docker = kwargs.get("docker")  # 业务列表
        self.progress_name = kwargs.get('progress_name')  # 进度条注释
        self.silence = kwargs.get('silence')  # selenium 静默启动
        self.work_Q = Queue()

    def load_tasks(self, tasks: list) -> None:
        if isinstance(tasks, list):
            for task in tasks:
                self.work_Q.put_nowait(task)

    def flexible_power(self):
        """
        @todo 优化弹性协程算法
        @return:
        """
        import psutil
        # 若未指定self.power 则使用弹性协程方案调度资源
        if not self.power:
            self.max_queue_size = self.work_Q.qsize()
            # limit = round((psutil.cpu_count() / CRAWLER_SEQUENCE.__len__()))
            limit = psutil.cpu_count()
            self.power = limit if self.max_queue_size >= limit else self.max_queue_size

    def launch(self, ) -> None:
        while not self.work_Q.empty():
            task = self.work_Q.get_nowait()
            self.control_driver(task)

    def control_driver(self, task) -> None:
        """
        重写此模块
        :param task:
        :return:
        """

    def progress_manager(self, total, desc='Example', leave=True, ncols=100, unit='B', unit_scale=True) -> None:
        """
        进度监测
        :return:
        """
        from tqdm import tqdm
        import time
        # iterable: 可迭代的对象, 在手动更新时不需要进行设置
        # desc: 字符串, 左边进度条描述文字
        # page_size: 总的项目数
        # leave: bool值, 迭代完成后是否保留进度条
        # file: 输出指向位置, 默认是终端, 一般不需要设置
        # ncols: 调整进度条宽度, 默认是根据环境自动调节长度, 如果设置为0, 就没有进度条, 只有输出的信息
        # unit: 描述处理项目的文字

        with tqdm(total=total, desc=desc, leave=leave, ncols=ncols,
                  unit=unit, unit_scale=unit_scale) as progress_bar:
            progress_bar.update(self.power)
            # while self.max_queue_size != Middleware.hera.qsize():
            #     progress_bar.update(Middleware.hera.qsize())
            #     time.sleep(1)
            while not self.work_Q.empty():
                now_1 = self.work_Q.qsize()
                time.sleep(0.1)
                now_2 = self.work_Q.qsize() - now_1
                progress_bar.update(abs(now_2))

    def run(self, speed_up=True, use_bar=False) -> None:
        """
        协程任务接口
        :return:
        """
        task_list = []

        if isinstance(self.docker, list):
            # 刷新任务队列
            self.load_tasks(tasks=self.docker)
        else:
            # 业务交接
            self.work_Q = self.docker

        # 弹性协程
        if not speed_up:
            self.power = 1
        else:
            self.flexible_power()
        logger.info('Flexible Power:{}'.format(self.power))
        logger.info('Queue Capacity:{}'.format(self.max_queue_size))

        # 启动进度条
        if use_bar:
            import threading
            threading.Thread(target=self.progress_manager,
                             args=(self.max_queue_size, self.progress_name + '[{}]'.format(self.power))).start()

        for x in range(self.power):
            task = gevent.spawn(self.launch)
            task_list.append(task)
        gevent.joinall(task_list)
Example #33
0
def _queue():
    # 队列
    q = Queue()
    q.put(1)
    while not q.empty():
        q.get()
Example #34
0
 def __init__(self):
     self.queue = Queue()
Example #35
0
 def __init__(self, download_urls):
     super(DownloadByGevent, self).__init__()
     self.workQueue = Queue(1000)
     self.download_urls = download_urls
 def _init():
     TaskResponseQueueManager._queue = Queue(0)
     return True
Example #37
0
from PIL import Image, ImageFile
from gevent.event import AsyncResult
from gevent.queue import Empty, Queue
from gevent.timeout import Timeout
import flask
from flask import send_from_directory

from app import app

DATA_DIR = 'app/src/data'
KEEP_ALIVE_DELAY = 25
MAX_IMAGE_SIZE = 800, 600
MAX_IMAGES = 10
MAX_DURATION = 300

BROADCAST_QUEUE = Queue()

try:  # Reset saved files on each start
    # rmtree(DATA_DIR, True)
    os.mkdir(DATA_DIR)
except OSError:
    pass


def broadcast(message):
    """Notify all waiting waiting gthreads of message."""
    waiting = []
    try:
        while True:
            waiting.append(BROADCAST_QUEUE.get(block=False))
    except Empty:
Example #38
0
class Worker():
    def __init__(self, master_address, worker_address, debug):
        self.id = None
        self.master_address = master_address
        if (worker_address is None):
            self.worker_address = self.getMyAddress()
            self.is_remote = False
        else:
            self.worker_address = worker_address
            self.is_remote = True
        self.all_task_list = {}
        self.task_queue = Queue()
        self.debug = debug
        self.streaming_data = {}
        self.streaming_meta_data = {}
        self.event_queue = Queue()
        self.task_node_table = {}
        self.worker_list = None

    def getMyAddress(self):
        try:
            csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
            csock.connect(('8.8.8.8', 80))
            (addr, port) = csock.getsockname()
            csock.close()
            return addr + ":" + port
        except socket.error:
            return "127.0.0.1"

    def get_partition_infor(self, partition_infor, job_id, worker_list):
        self.streaming_meta_data = partition_infor
        self.worker_list = worker_list
        if job_id not in self.streaming_data.keys():
            self.streaming_data[job_id] = {}
        for partition in partition_infor[self.id]:
            if partition not in self.streaming_data[job_id].keys():
                self.streaming_data[job_id][partition] = []
        # debug_print_by_name('wentao', str(self.streaming_data))

    def find_worker_in_metadata(self, partition_id, metadata):
        worker = []
        debug_print_by_name('wentao', str(metadata))
        for worker_id, partition_list in metadata.items():
            if partition_id in partition_list and worker_id != self.id:
                worker.append(worker_id)
        return worker

    def replicate(self, job_id, partition, value, timestamp):
        if job_id in self.streaming_data.keys():
            self.streaming_data[job_id][partition].append({
                'value':
                value,
                'timestamp':
                timestamp
            })
            #print self.streaming_data

    def get_streaming_message(self, message):
        """
        Function to get and store streaming message.

        :param value: spark streaming message
               spark streaming message is "job_id,value"
        """
        job_id, value = message.split(",")
        job_id = int(job_id)
        #print message
        try:
            if job_id in self.streaming_data.keys():
                p_id = None
                length = 9999999
                partitions = self.streaming_data[job_id]
                for partition_id, data in partitions.items():
                    if len(data) < length:
                        length = len(data)
                        p_id = partition_id
                timestamp = int(time.time())
                partitions[p_id].append({
                    'value': value,
                    'timestamp': timestamp
                })
                #todo replica
                worker_list = self.find_worker_in_metadata(
                    p_id, self.streaming_meta_data)
                debug_print_by_name('wentao', str(worker_list))
                for worker_id in worker_list:
                    client = get_client(self.worker_list[worker_id]['address'],
                                        1)
                    execute_command(client, client.replicate, job_id, p_id,
                                    value, timestamp)
                #print self.streaming_data
        except Exception:
            pass
        #self.streaming_data = {}

    def startRPCServer(self):
        master = zerorpc.Server(self)
        if self.is_remote:
            addr = self.worker_address
        else:
            addr = "0.0.0.0:" + self.worker_address.split(":")[1]
        # print "worker address is: %s at %s " % (addr, time.asctime(time.localtime(time.time())))
        # addr = "tcp://0.0.0.0:"+port
        master.bind('tcp://' + addr)
        master.run()

    def runPartition(self, task):
        job_id = task.job_id
        task_id = task.task_id
        # create job if not exist
        if not self.all_task_list.has_key(job_id):
            task_list = {}
            task_list[task_id] = {"status": Status.START, "data": None}
            self.all_task_list[job_id] = task_list
        else:
            self.all_task_list[job_id][task_id] = {
                "status": Status.START,
                "data": None
            }
        debug_print(
            "[Worker]Start task with job : %s task: %s at %s" %
            (job_id, task_id, time.asctime(time.localtime(time.time()))),
            self.debug)
        # try:
        debug_print_by_name('wentao', str(task.input_source))
        result = task.last_rdd.get(task.input_source)
        # debug_print("[Worker] Result of Task {0} is generated:{1}".format(task.task_id, result),self.debug)
        self.all_task_list[job_id][task_id] = {
            "status": Status.FINISH,
            "data": result
        }
        debug_print(
            "[Worker]Finish task with job : %s task: %s at %s" %
            (job_id, task_id, time.asctime(time.localtime(time.time()))),
            self.debug)
        # except Exception:
        #     print Exception.
        #     debug_print("[Worker] Result of Task {0} is failed".format(task.task_id), self.debug)
        #     self.all_task_list[job_id][task_id] = {"status": Status.FAIL, "data": None}

    def get_rdd_result(self, job_id, task_id, partition_id):
        data = None
        if self.all_task_list.has_key(
                job_id) and self.all_task_list[job_id].has_key(task_id):
            data = self.all_task_list[job_id][task_id]['data']
            # if data is not None:
            # debug_print(
            # "[Worker]Get RDD result val {0} with job : {1} task: {2} partition: {3} at {4}".format(data, job_id, task_id, partition_id, time.asctime(time.localtime(time.time()))),
            # self.debug)
            #print "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%data={0}, partition_id={1} isDict={2}".format(data, partition_id, isinstance(data, dict))
            if isinstance(data, dict):
                if data.has_key(int(partition_id)):
                    return data[int(partition_id)]
                else:
                    return []
        return data

    def register(self):
        while self.id is None:
            client = get_client(self.master_address)
            self.id = execute_command(client, client.registerWorker,
                                      self.worker_address)
            # self.id = client.registerWorker(self.worker_address)
            if self.id is not None:
                debug_print(
                    "[Worker] worker %d  %s registered at %s " %
                    (self.id, self.worker_address,
                     time.asctime(time.localtime(time.time()))), self.debug)
                break
            else:
                gevent.sleep(2)

    def filter_data(self, job_id, interval):
        cur_time = time.time()
        data = self.streaming_data[job_id]
        for partition_id, partition_data in data.items():
            #debug_print_by_name('kaijie', str(partition_data))
            data[partition_id] = filter(
                lambda a: cur_time - a['timestamp'] < interval, partition_data)

    def check_if_streaming(self, task):
        rdd_iter = task.last_rdd
        while True:
            if not isinstance(rdd_iter, NarrowRDD):
                return isinstance(rdd_iter, Streaming)
            rdd_iter = rdd_iter.parent

    def start_task(self, serialized_task, task_node_table):
        task = unpickle_object(serialized_task)
        for source in task.input_source:
            source['task_node_table'] = self.task_node_table
            if self.check_if_streaming(task):
                # source['streaming_data'] = self.streaming_data
                # debug_print_by_name('wentao', str(self.streaming_data))
                # debug_print_by_name('wentao', str(task.input_source[0]))
                self.filter_data(task.input_source[0]['job_id'],
                                 task.input_source[0]['interval'])
                s_data = []
                job_id = task.input_source[0]['job_id']
                parition_id = task.input_source[0]['partition_id']
                debug_print_by_name('wentao', str(self.streaming_data))
                debug_print_by_name('wentao', str(job_id))
                debug_print_by_name('wentao', str(parition_id))
                for data in self.streaming_data[job_id][parition_id]:
                    s_data.append(data['value'])
                source['streaming_data'] = s_data

        debug_print("[Worker] Received Task {0}".format(task.task_id),
                    self.debug)
        # event = {
        #     'type' : 'Update',
        #     'data' : task_node_table
        # }
        self.event_queue.put(task_node_table)
        self.task_queue.put(task)
        return 0

    def task_manager(self):
        while True:
            while not self.task_queue.empty():
                task = self.task_queue.get()
                # debug_print("Create thread: %s at %s" % (0, time.asctime(time.localtime(time.time()))))
                thread = gevent.spawn(self.runPartition, task)
                debug_print(
                    "[Worker] Task created: Key: {0} at {1}".format(
                        task.task_id,
                        time.asctime(time.localtime(time.time()))), self.debug)
            gevent.sleep(0)

    def update_task_node_table(self, task_node_table):
        try:
            self.event_queue.put(task_node_table)
        except:
            return 1
        return 0

    def event_handler(self):
        while True:
            while not self.event_queue.empty():
                task_node_table = self.event_queue.get()
                #update task_node_table
                self.task_node_table.update(task_node_table)
                # for job_task_id, worker_info in  task_node_table :
                #     self.task_node_table[job_task_id] = worker_info
            gevent.sleep(0)

    def heartbeat(self):
        while True:
            if self.id is not None:
                # traverse task list and report processing tasks
                task_status_list = {}
                for job_id, task_list in self.all_task_list.items():
                    task_status_list[job_id] = {}
                    for task_id, value in task_list.items():
                        if value['status'] != Status.FINISH_REPORTED:
                            task_status_list[job_id][task_id] = value['status']

                client = get_client(self.master_address)
                debug_print(
                    "[Worker] Worker update task status: worker_id: %s at %s" %
                    (self.id, time.asctime(time.localtime(time.time()))),
                    self.debug)
                print("task status list: ", str(task_status_list))
                ret = execute_command(client, client.updateWorkerStatus,
                                      self.id, task_status_list)
                # ret = client.updateWorkerStatus(self.id, task_status_list)
                if ret is not None:
                    # client.close()
                    if ret == 0:
                        # if already reported finish task, don't need to report finish again
                        for job_id, task_list in self.all_task_list.items():
                            for task_id, value in task_list.items():
                                if value['status'] == Status.FINISH:
                                    value['status'] = Status.FINISH_REPORTED
            debug_print_by_name('kaijie', str(self.all_task_list))
            gevent.sleep(2)

    def run(self):
        self.register()
        # self.startRPCServer()
        thread1 = gevent.spawn(self.heartbeat)
        thread2 = gevent.spawn(self.event_handler)
        thread3 = gevent.spawn(self.task_manager)
        thread4 = gevent.spawn(self.startRPCServer)
        # self.startRPCServer()
        gevent.joinall([thread1, thread2, thread3, thread4])
Example #39
0
class SequentialGeventHandler(object):
    """Gevent handler for sequentially executing callbacks.

    This handler executes callbacks in a sequential manner. A queue is
    created for each of the callback events, so that each type of event
    has its callback type run sequentially.

    Each queue type has a greenlet worker that pulls the callback event
    off the queue and runs it in the order the client sees it.

    This split helps ensure that watch callbacks won't block session
    re-establishment should the connection be lost during a Zookeeper
    client call.

    Watch callbacks should avoid blocking behavior as the next callback
    of that type won't be run until it completes. If you need to block,
    spawn a new greenlet and return immediately so callbacks can
    proceed.

    """
    name = "sequential_gevent_handler"
    sleep_func = staticmethod(gevent.sleep)

    def __init__(self):
        """Create a :class:`SequentialGeventHandler` instance"""
        self.callback_queue = Queue()
        self._running = False
        self._async = None
        self._state_change = Semaphore()
        self._workers = []

    class timeout_exception(gevent.event.Timeout):
        def __init__(self, msg):
            gevent.event.Timeout.__init__(self, exception=msg)

    def _create_greenlet_worker(self, queue):
        def greenlet_worker():
            while True:
                try:
                    func = queue.get()
                    if func is _STOP:
                        break
                    func()
                except Empty:
                    continue
                except Exception as exc:
                    log.warning("Exception in worker greenlet")
                    log.exception(exc)
        return gevent.spawn(greenlet_worker)

    def start(self):
        """Start the greenlet workers."""
        with self._state_change:
            if self._running:
                return

            self._running = True

            # Spawn our worker greenlets, we have
            # - A callback worker for watch events to be called
            for queue in (self.callback_queue,):
                w = self._create_greenlet_worker(queue)
                self._workers.append(w)
            atexit.register(self.stop)

    def stop(self):
        """Stop the greenlet workers and empty all queues."""
        with self._state_change:
            if not self._running:
                return

            self._running = False

            for queue in (self.callback_queue,):
                queue.put(_STOP)

            while self._workers:
                worker = self._workers.pop()
                worker.join()

            # Clear the queues
            self.callback_queue = Queue()  # pragma: nocover

            if hasattr(atexit, "unregister"):
                atexit.unregister(self.stop)

    def select(self, *args, **kwargs):
        return gevent.select.select(*args, **kwargs)

    def socket(self, *args, **kwargs):
        return create_tcp_socket(socket)

    def create_connection(self, *args, **kwargs):
        return create_tcp_connection(socket, *args, **kwargs)

    def event_object(self):
        """Create an appropriate Event object"""
        return gevent.event.Event()

    def lock_object(self):
        """Create an appropriate Lock object"""
        return gevent.thread.allocate_lock()

    def rlock_object(self):
        """Create an appropriate RLock object"""
        return RLock()

    def async_result(self):
        """Create a :class:`AsyncResult` instance

        The :class:`AsyncResult` instance will have its completion
        callbacks executed in the thread the
        :class:`SequentialGeventHandler` is created in (which should be
        the gevent/main thread).

        """
        return AsyncResult()

    def spawn(self, func, *args, **kwargs):
        """Spawn a function to run asynchronously"""
        return gevent.spawn(func, *args, **kwargs)

    def dispatch_callback(self, callback):
        """Dispatch to the callback object

        The callback is put on separate queues to run depending on the
        type as documented for the :class:`SequentialGeventHandler`.

        """
        self.callback_queue.put(lambda: callback.func(*callback.args))
Example #40
0
#导入gevent、time、requests
from gevent.queue import Queue
#从gevent库里导入queue模块

start = time.time()

url_list = ['https://www.baidu.com/',
'https://www.sina.com.cn/',
'http://www.sohu.com/',
'https://www.qq.com/',
'https://www.163.com/',
'http://www.iqiyi.com/',
'https://www.tmall.com/',
'http://www.ifeng.com/']

work = Queue()
#创建队列对象,并赋值给work。
for url in url_list:
#遍历url_list
    work.put_nowait(url)
    #用put_nowait()函数可以把网址都放进队列里。

def crawler():
    while not work.empty():
    #当队列不是空的时候,就执行下面的程序。
        url = work.get_nowait()
        #用get_nowait()函数可以把队列里的网址都取出。
        r = requests.get(url)
        #用requests.get()函数抓取网址。
        print(url,work.qsize(),r.status_code)
        #打印网址、队列长度、抓取请求的状态码。
Example #41
0
'''
Created on 2015-10-23

@author: Shawn
'''

from gevent.monkey import patch_all

patch_all()
from socket import socket
from gevent.queue import Queue
import gevent.socket

import conf_server

queue = Queue()

queue.put('123')
queue.put('456')

print queue.peek()
print queue.peek()

import json

print json.loads('123')

# address = (conf_server.SERVER_IP, conf_server.SERVER_PORT)
# s = socket()
# s.connect(address)
#
Example #42
0
 def __init__(self, pool_size=5000):
     self.job_pool = Pool(size=pool_size)
     self.result = Queue()
     self.target_queue = Queue()
Example #43
0
 def get(self, *args, **kw):
     self.event.set()
     return Queue.get(self, *args, **kw)
Example #44
0
 def test_imap_unordered_no_stop(self):
     q = Queue()
     q.put(1234)
     gevent.spawn_later(0.1, q.put, StopIteration)
     result = list(self.pool.imap_unordered(lambda _: _, q))
     self.assertEqual(result, [1234])
Example #45
0
 def __init__(self, sock, addr):
     Endpoint.__init__(self, sock, addr)
     Greenlet.__init__(self)
     self.ctlcmds = Queue(0)
     self.userid = 0
     self.init_gamedata_mixin()
Example #46
0
# TODO:垂直挖掘STAFF机场
from gevent import monkey

monkey.patch_all()

import gevent
from gevent.queue import Queue
from spiderNest.preIntro import *

target_Q = Queue()
response_Q = Queue()
dataList = []


class sAirportSpider(object):
    def __init__(self):

        # 白嫖首页
        self.airHome = 'https://52bp.org'

    def get_vip_link(self):
        return self.slaver(self.airHome + '/vip-airport.html', 'url')

    def get_free_link(self):
        return self.slaver(self.airHome + '/free-airport.html', 'url')

    def get_all_link(self):
        return self.slaver(self.airHome + '/airport.html', 'url')

    @staticmethod
    def get_sAirHome(mode):
Example #47
0
 def __init__(self):
     super(NotifyingQueue, self).__init__()
     self._queue = Queue()
Example #48
0
def getUrl(oUrl,
           encoding='utf-8',
           headers=None,
           data=None,
           method=None,
           allowCache=True,
           usePool=True,
           pool=pool_getUrl):
    def _getUrl(result_queue, url_json, oUrl, encoding, headers, data, method,
                allowCache, callmethod):
        try:
            if requests and session:
                req = requests.Request(
                    method=method if method else "GET",
                    url=oUrl,
                    headers=headers if headers else fake_headers,
                    data=data)
                prepped = req.prepare()
                resp = session.send(prepped)
                if encoding == "raw":
                    html_text = resp.content
                else:
                    resp.encoding = encoding
                    html_text = resp.text
            else:
                # url 包含中文时 parse.quote_from_bytes(oUrl.encode('utf-8'), ':/&%?=+')
                req = urllib.request.Request(
                    oUrl,
                    headers=headers if headers else {},
                    data=data,
                    method=method)
                with urllib.request.urlopen(req) as response:
                    headers = response.info()
                    cType = headers.get('Content-Type', '')
                    match = re.search('charset\s*=\s*(\w+)', cType)
                    if match:
                        encoding = match.group(1)
                    blob = response.read()
                    if headers.get('Content-Encoding', '') == 'gzip':
                        data = gzip.decompress(blob)
                    else:
                        data = blob
                    if encoding == "raw":
                        html_text = data
                    else:
                        html_text = data.decode(encoding, 'ignore')
            if allowCache:
                urlcache[url_json] = html_text
            result_queue.put(html_text)
            return
        except socket.timeout:
            logging.warning(callmethod +
                            'request attempt %s timeout' % str(i + 1))
        except urllib.error.URLError:
            logging.warning(callmethod +
                            'request attempt %s URLError' % str(i + 1))
        except http.client.RemoteDisconnected:
            logging.warning(callmethod +
                            'request attempt %s RemoteDisconnected' %
                            str(i + 1))
        except http.client.IncompleteRead:
            logging.warning(callmethod +
                            'request attempt %s IncompleteRead' % str(i + 1))
        except:
            logging.exception(callmethod + "get url " + url_json + "fail")
        result_queue.put(None)
        return

    callmethod = get_caller_info()
    url_json = {
        "oUrl": oUrl,
        "encoding": encoding,
        "headers": headers,
        "data": data,
        "method": method
    }
    url_json = json.dumps(url_json, sort_keys=True, ensure_ascii=False)
    if allowCache:
        if url_json in urlcache:
            html_text = urlcache[url_json]
            logging.debug(callmethod + "cache get:" + url_json)
            return html_text
        logging.debug(callmethod + "normal get:" + url_json)
    else:
        logging.debug(callmethod + "nocache get:" + url_json)
        usePool = False

    if requests and session:
        retry_num = 1
    else:
        retry_num = 10

    for i in range(retry_num):
        queue = Queue(1)
        if usePool:
            pool.spawn(_getUrl, queue, url_json, oUrl, encoding, headers, data,
                       method, allowCache, callmethod)
        else:
            _getUrl(queue, url_json, oUrl, encoding, headers, data, method,
                    allowCache, callmethod)
        result = queue.get()
        if result is not None:
            return result
    return None
Example #49
0
from gevent import monkey
monkey.patch_all()
import gevent,requests, bs4, csv
from gevent.queue import Queue

work = Queue()
food_list=[]
#前3个常见食物分类的前3页的食物记录的网址:
url_1 = 'http://www.boohee.com/food/group/{type}?page={page}'
for x in range(1, 4):
    for y in range(1, 4):
        real_url = url_1.format(type=x, page=y)
        work.put_nowait(real_url)

#第11个常见食物分类的前3页的食物记录的网址:
url_2 = 'http://www.boohee.com/food/view_menu?page={page}'
for x in range(1,4):
    real_url = url_2.format(page=x)
    work.put_nowait(real_url)

#请写出crawler函数和启动协程的代码:
def crawler():
    headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'
    }
    while not work.empty():
        url = work.get_nowait()
        res = requests.get(url, headers=headers)
        bs_res = bs4.BeautifulSoup(res.text, 'html.parser')
        foods = bs_res.find_all('li', class_='item clearfix')
        temp_food = []
class VncIfmapClient(object):

    # * Not all properties in an object needs to be published
    #   to IfMap.
    # * In some properties, not all fields are relevant
    #   to be publised to IfMap.
    # If the property is not relevant at all, define the property
    # with None. If it is partially relevant, then define the fn.
    # which would handcraft the generated xml for the object.
    IFMAP_PUBLISH_SKIP_LIST = {
        # Format - <prop_field> : None | <Handler_fn>
        u"perms2": None,
        u"id_perms": build_idperms_ifmap_obj
    }

    def handler(self, signum, frame):
        file = open("/tmp/api-server-ifmap-cache.txt", "w")
        file.write(pformat(self._id_to_metas))
        file.close()

    def __init__(self, db_client_mgr, ifmap_srv_ip, ifmap_srv_port, uname,
                 passwd, ssl_options):
        self._ifmap_srv_ip = ifmap_srv_ip
        self._ifmap_srv_port = ifmap_srv_port
        self._username = uname
        self._password = passwd
        self._ssl_options = ssl_options
        self._dequeue_greenlet = None
        self._CONTRAIL_XSD = "http://www.contrailsystems.com/vnc_cfg.xsd"
        self._IPERMS_NAME = "id-perms"
        self._NAMESPACES = {
            'env': "http://www.w3.org/2003/05/soap-envelope",
            'ifmap': "http://www.trustedcomputinggroup.org/2010/IFMAP/2",
            'meta':
            "http://www.trustedcomputinggroup.org/2010/IFMAP-METADATA/2",
            'contrail': self._CONTRAIL_XSD
        }

        self._db_client_mgr = db_client_mgr
        self._sandesh = db_client_mgr._sandesh

        ConnectionState.update(
            conn_type=ConnType.IFMAP,
            name='IfMap',
            status=ConnectionStatus.INIT,
            message='',
            server_addrs=["%s:%s" % (ifmap_srv_ip, ifmap_srv_port)])
        self._conn_state = ConnectionStatus.INIT
        self._is_ifmap_up = False
        self._queue = Queue(self._get_api_server()._args.ifmap_queue_size)

        self.reset()

        # Set the signal handler
        signal.signal(signal.SIGUSR2, self.handler)

        self._init_conn()
        self._publish_config_root()
        self._health_checker_greenlet =\
               vnc_greenlets.VncGreenlet('VNC IfMap Health Checker',
                                         self._health_checker)

    # end __init__

    @classmethod
    def object_alloc(cls, obj_class, parent_res_type, fq_name):
        res_type = obj_class.resource_type
        my_fqn = ':'.join(fq_name)
        parent_fqn = ':'.join(fq_name[:-1])

        my_imid = 'contrail:%s:%s' % (res_type, my_fqn)
        if parent_fqn:
            if parent_res_type is None:
                err_msg = "Parent: %s type is none for: %s" % (parent_fqn,
                                                               my_fqn)
                return False, (409, err_msg)
            parent_imid = 'contrail:' + parent_res_type + ':' + parent_fqn
        else:  # parent is config-root
            parent_imid = 'contrail:config-root:root'

        # Normalize/escape special chars
        my_imid = escape(my_imid)
        parent_imid = escape(parent_imid)

        return True, (my_imid, parent_imid)

    # end object_alloc

    def object_set(self, obj_class, my_imid, existing_metas, obj_dict):
        update = {}

        # Properties Meta
        for prop_field in obj_class.prop_fields:
            field = obj_dict.get(prop_field)
            if field is None:
                continue
            # construct object of xsd-type and get its xml repr
            # e.g. virtual_network_properties
            prop_field_types = obj_class.prop_field_types[prop_field]
            is_simple = not prop_field_types['is_complex']
            prop_type = prop_field_types['xsd_type']
            # e.g. virtual-network-properties
            prop_meta = obj_class.prop_field_metas[prop_field]

            if prop_field in VncIfmapClient.IFMAP_PUBLISH_SKIP_LIST:
                # Field not relevant, skip publishing to IfMap
                if not VncIfmapClient.IFMAP_PUBLISH_SKIP_LIST[prop_field]:
                    continue
                # Call the handler fn to generate the relevant fields.
                if callable(
                        VncIfmapClient.IFMAP_PUBLISH_SKIP_LIST[prop_field]):
                    prop_xml = VncIfmapClient.IFMAP_PUBLISH_SKIP_LIST[
                        prop_field](prop_field, field)
                    meta = Metadata(prop_meta,
                                    '', {'ifmap-cardinality': 'singleValue'},
                                    ns_prefix='contrail',
                                    elements=prop_xml)
                else:
                    log_str = '%s is marked for partial publish\
                               to Ifmap but handler not defined' % (prop_field)
                    self.config_log(log_str, level=SandeshLevel.SYS_DEBUG)
                    continue
            elif is_simple:
                norm_str = escape(str(field))
                meta = Metadata(prop_meta,
                                norm_str, {'ifmap-cardinality': 'singleValue'},
                                ns_prefix='contrail')
            else:  # complex type
                prop_cls = str_to_class(prop_type, __name__)
                buf = cStringIO.StringIO()
                # perms might be inserted at server as obj.
                # obj construction diff from dict construction.
                if isinstance(field, dict):
                    prop_cls(**field).exportChildren(buf,
                                                     level=1,
                                                     name_=prop_meta,
                                                     pretty_print=False)
                elif isinstance(field, list):
                    for elem in field:
                        if isinstance(elem, dict):
                            prop_cls(**elem).exportChildren(buf,
                                                            level=1,
                                                            name_=prop_meta,
                                                            pretty_print=False)
                        else:
                            elem.exportChildren(buf,
                                                level=1,
                                                name_=prop_meta,
                                                pretty_print=False)
                else:  # object
                    field.exportChildren(buf,
                                         level=1,
                                         name_=prop_meta,
                                         pretty_print=False)
                prop_xml = buf.getvalue()
                buf.close()
                meta = Metadata(prop_meta,
                                '', {'ifmap-cardinality': 'singleValue'},
                                ns_prefix='contrail',
                                elements=prop_xml)

            # If obj is new (existing metas is none) or
            # if obj does not have this prop_meta (or)
            # or if the prop_meta is different from what we have currently,
            # then update
            if (not existing_metas or not prop_meta in existing_metas
                    or ('' in existing_metas[prop_meta]
                        and str(meta) != str(existing_metas[prop_meta]['']))):
                self._update_id_self_meta(update, meta)
        # end for all property types

        # References Meta
        for ref_field in obj_class.ref_fields:
            refs = obj_dict.get(ref_field)
            if not refs:
                continue
            for ref in refs:
                ref_fq_name = ref['to']
                ref_fld_types_list = list(obj_class.ref_field_types[ref_field])
                ref_res_type = ref_fld_types_list[0]
                ref_link_type = ref_fld_types_list[1]
                ref_meta = obj_class.ref_field_metas[ref_field]
                ref_imid = get_ifmap_id_from_fq_name(ref_res_type, ref_fq_name)
                ref_data = ref.get('attr')
                if ref_data:
                    buf = cStringIO.StringIO()
                    attr_cls = str_to_class(ref_link_type, __name__)
                    attr_cls(**ref_data).exportChildren(buf,
                                                        level=1,
                                                        name_=ref_meta,
                                                        pretty_print=False)
                    ref_link_xml = buf.getvalue()
                    buf.close()
                else:
                    ref_link_xml = ''
                meta = Metadata(ref_meta,
                                '', {'ifmap-cardinality': 'singleValue'},
                                ns_prefix='contrail',
                                elements=ref_link_xml)
                self._update_id_pair_meta(update, ref_imid, meta)
        # end for all ref types

        self._publish_update(my_imid, update)
        return (True, '')

    # end object_set

    def object_create(self, obj_ids, obj_dict):
        obj_type = obj_ids['type']
        obj_class = self._db_client_mgr.get_resource_class(obj_type)
        if not 'parent_type' in obj_dict:
            # parent is config-root
            parent_type = 'config-root'
            parent_imid = 'contrail:config-root:root'
        else:
            parent_type = obj_dict['parent_type']
            parent_imid = obj_ids.get('parent_imid', None)

        # Parent Link Meta
        update = {}
        parent_cls = self._db_client_mgr.get_resource_class(parent_type)
        parent_link_meta = parent_cls.children_field_metas.get('%ss' %
                                                               (obj_type))
        if parent_link_meta:
            meta = Metadata(parent_link_meta,
                            '', {'ifmap-cardinality': 'singleValue'},
                            ns_prefix='contrail')
            self._update_id_pair_meta(update, obj_ids['imid'], meta)
            self._publish_update(parent_imid, update)

        (ok, result) = self.object_set(obj_class, obj_ids['imid'], None,
                                       obj_dict)
        return (ok, result)

    # end object_create

    def _object_read_to_meta_index(self, ifmap_id):
        # metas is a dict where key is meta-name and val is list of dict of
        # form [{'meta':meta}, {'id':id1, 'meta':meta}, {'id':id2, 'meta':meta}]
        metas = {}
        if ifmap_id in self._id_to_metas:
            metas = self._id_to_metas[ifmap_id].copy()
        return metas

    # end _object_read_to_meta_index

    def object_update(self, obj_cls, new_obj_dict):
        ifmap_id = get_ifmap_id_from_fq_name(obj_cls.resource_type,
                                             new_obj_dict['fq_name'])
        # read in refs from ifmap to determine which ones become inactive after update
        existing_metas = self._object_read_to_meta_index(ifmap_id)

        if not existing_metas:
            # UPDATE notify queued before CREATE notify, Skip publish to IFMAP.
            return (True, '')

        # remove properties that are no longer active
        props = obj_cls.prop_field_metas
        for prop, meta in props.items():
            if meta in existing_metas and new_obj_dict.get(prop) is None:
                self._delete_id_self_meta(ifmap_id, meta)

        # remove refs that are no longer active
        delete_list = []
        refs = dict(
            (obj_cls.ref_field_metas[rf], obj_cls.ref_field_types[rf][0])
            for rf in obj_cls.ref_fields)
        #refs = {'virtual-network-qos-forwarding-class': 'qos-forwarding-class',
        #        'virtual-network-network-ipam': 'network-ipam',
        #        'virtual-network-network-policy': 'network-policy',
        #        'virtual-network-route-table': 'route-table'}
        for meta, ref_res_type in refs.items():
            old_set = set(existing_metas.get(meta, {}).keys())
            new_set = set()
            ref_obj_type = self._db_client_mgr.get_resource_class(
                ref_res_type).object_type
            for ref in new_obj_dict.get(ref_obj_type + '_refs', []):
                to_imid = get_ifmap_id_from_fq_name(ref_res_type, ref['to'])
                new_set.add(to_imid)

            for inact_ref in old_set - new_set:
                delete_list.append((inact_ref, meta))

        if delete_list:
            self._delete_id_pair_meta_list(ifmap_id, delete_list)

        (ok, result) = self.object_set(obj_cls, ifmap_id, existing_metas,
                                       new_obj_dict)
        return (ok, result)

    # end object_update

    def object_delete(self, obj_ids):
        ifmap_id = obj_ids['imid']
        parent_imid = obj_ids.get('parent_imid')
        existing_metas = self._object_read_to_meta_index(ifmap_id)
        meta_list = []
        for meta_name, meta_infos in existing_metas.items():
            # Delete all refs/links in the object.
            # Refs are identified when the key is a non-empty string.
            meta_list.extend([(k, meta_name) for k in meta_infos if k != ''])

        if parent_imid:
            # Remove link from parent
            meta_list.append((parent_imid, None))

        if meta_list:
            self._delete_id_pair_meta_list(ifmap_id, meta_list)

        # Remove all property metadata associated with this ident
        self._delete_id_self_meta(ifmap_id, None)

        return (True, '')

    # end object_delete

    def _init_conn(self):
        self._mapclient = client(
            ("%s" % (self._ifmap_srv_ip), "%s" % (self._ifmap_srv_port)),
            self._username, self._password, self._NAMESPACES,
            self._ssl_options)

        connected = False
        while not connected:
            try:
                resp_xml = self._mapclient.call('newSession',
                                                NewSessionRequest())
            except socket.error as e:
                msg = 'Failed to establish IF-MAP connection: %s' % str(e)
                self.config_log(msg, level=SandeshLevel.SYS_WARN)
                time.sleep(3)
                continue

            resp_doc = etree.parse(StringIO.StringIO(resp_xml))
            err_codes = resp_doc.xpath(
                '/env:Envelope/env:Body/ifmap:response/errorResult/@errorCode',
                namespaces=self._NAMESPACES)
            if not err_codes:
                connected = True
            else:
                msg = "Failed to establish IF-MAP connection: %s" % err_codes
                self.config_log(msg, level=SandeshLevel.SYS_WARN)
                session_id = self._mapclient.get_session_id()
                try:
                    self._mapclient.call('endSession',
                                         EndSessionRequest(session_id))
                except socket.error as e:
                    msg = "Failed to end the IF-MAP session %s: %s" %\
                          (session_id, str(e))
                    self.config_log(msg, level=SandeshLevel.SYS_WARN)
                time.sleep(3)

        ConnectionState.update(conn_type=ConnType.IFMAP,
                               name='IfMap',
                               status=ConnectionStatus.UP,
                               message='',
                               server_addrs=[
                                   "%s:%s" %
                                   (self._ifmap_srv_ip, self._ifmap_srv_port)
                               ])
        self._conn_state = ConnectionStatus.UP
        msg = 'IFMAP connection ESTABLISHED'
        self.config_log(msg, level=SandeshLevel.SYS_NOTICE)

        self._mapclient.set_session_id(
            newSessionResult(resp_xml).get_session_id())
        self._mapclient.set_publisher_id(
            newSessionResult(resp_xml).get_publisher_id())

    # end _init_conn

    def _get_api_server(self):
        return self._db_client_mgr._api_svr_mgr

    # end _get_api_server

    def reset(self):
        self._id_to_metas = {}
        while not self._queue.empty():
            self._queue.get_nowait()

        if (self._dequeue_greenlet is not None
                and gevent.getcurrent() != self._dequeue_greenlet):
            self._dequeue_greenlet.kill()
        self._dequeue_greenlet =\
              vnc_greenlets.VncGreenlet("VNC IfMap Dequeue",
                                        self._ifmap_dequeue_task)

    # end reset

    def _publish_config_root(self):
        # Remove all resident data
        result = ifmap_wipe(self._mapclient)
        if result is None:
            msg = "Cannot purge the IF-MAP server before publishing root graph"
            self.config_log(msg, level=SandeshLevel.SYS_WARN)
        # Build default config-root
        buf = cStringIO.StringIO()
        perms = Provision.defaults.perms
        perms.exportChildren(buf, level=1, pretty_print=False)
        id_perms_xml = buf.getvalue()
        buf.close()
        update = {}
        meta = Metadata(self._IPERMS_NAME,
                        '', {'ifmap-cardinality': 'singleValue'},
                        ns_prefix='contrail',
                        elements=id_perms_xml)
        self._update_id_self_meta(update, meta)
        self._publish_update("contrail:config-root:root", update)

    # end _publish_config_root

    def config_log(self, msg, level):
        self._db_client_mgr.config_log(msg, level)

    # end config_log

    @ignore_exceptions
    def _generate_ifmap_trace(self, oper, body):
        req_id = get_trace_id()
        ifmap_trace = IfmapTrace(request_id=req_id)
        ifmap_trace.operation = oper
        ifmap_trace.body = body

        return ifmap_trace

    # end _generate_ifmap_trace

    def _publish_to_ifmap_enqueue(self, oper, oper_body, do_trace=True):
        # safety check, if we proceed ifmap-server reports error
        # asking for update|delete in publish
        if not oper_body:
            return
        self._queue.put((oper, oper_body, do_trace))

    # end _publish_to_ifmap_enqueue

    def _ifmap_dequeue_task(self):
        while True:
            try:
                self._publish_to_ifmap_dequeue()
            except Exception as e:
                tb = detailed_traceback()
                self.config_log(tb, level=SandeshLevel.SYS_ERR)

    def _publish_to_ifmap_dequeue(self):
        def _publish(requests, traces, publish_discovery=False):
            if not requests:
                return
            ok = False
            # Continue to trying publish requests until the queue is full.
            # When queue is full, ifmap is totally resync from db
            while not ok:
                ok, err_msg = self._publish_to_ifmap(''.join(requests))
                if ok:
                    trace_msg(traces, 'IfmapTraceBuf', self._sandesh)
                else:
                    trace_msg(traces,
                              'IfmapTraceBuf',
                              self._sandesh,
                              error_msg=err_msg)
                if publish_discovery and ok:
                    self._get_api_server().publish_ifmap_to_discovery()
                    self._is_ifmap_up = True
                if not ok:
                    msg = ("%s. IF-MAP sending queue size: %d/%d" %
                           (err_msg, self._queue.qsize(),
                            self._get_api_server()._args.ifmap_queue_size))
                    self.config_log(msg, level=SandeshLevel.SYS_WARN)
                    gevent.sleep(1)

        # end _publish

        while True:
            # block until there is data in the queue
            (oper, oper_body, do_trace) = self._queue.get()
            requests = []
            requests_len = 0
            traces = []
            while True:
                # drain the queue till empty or max message size
                # or change of oper because ifmap does not like
                # different operations in same message
                if oper == 'publish_discovery':
                    _publish(requests, traces, True)
                    break
                if do_trace:
                    trace = self._generate_ifmap_trace(oper, oper_body)
                    traces.append(trace)
                requests.append(oper_body)
                requests_len += len(oper_body)
                if (requests_len >
                        self._get_api_server()._args.ifmap_max_message_size):
                    _publish(requests, traces)
                    break
                old_oper = oper
                try:
                    (oper, oper_body, do_trace) = self._queue.get_nowait()
                    if oper != old_oper:
                        _publish(requests, traces)
                        requests = []
                        requests_len = 0
                        traces = []
                        continue
                except Empty:
                    _publish(requests, traces)
                    break

    # end _publish_to_ifmap_dequeue

    def _publish_to_ifmap(self, oper_body):
        try:
            not_published = True
            retry_count = 0
            resp_xml = None
            while not_published:
                sess_id = self._mapclient.get_session_id()
                req_xml = PublishRequest(sess_id, oper_body)
                resp_xml = self._mapclient.call('publish', req_xml)

                resp_doc = etree.parse(StringIO.StringIO(resp_xml))
                err_codes = resp_doc.xpath(
                    '/env:Envelope/env:Body/ifmap:response/errorResult/@errorCode',
                    namespaces=self._NAMESPACES)
                if err_codes:
                    if retry_count == 0:
                        log_str = 'Error publishing to ifmap, req: %s, resp: %s' \
                                  %(req_xml, resp_xml)
                        self.config_log(log_str, level=SandeshLevel.SYS_ERR)

                    ConnectionState.update(
                        conn_type=ConnType.IFMAP,
                        name='IfMap',
                        status=ConnectionStatus.INIT,
                        message='Session lost, renew it',
                        server_addrs=[
                            "%s:%s" %
                            (self._ifmap_srv_ip, self._ifmap_srv_port)
                        ])
                    self._conn_state = ConnectionStatus.INIT
                    self._is_ifmap_up = False
                    retry_count = retry_count + 1
                    self._init_conn()

                    if self._ifmap_restarted():
                        msg = "IF-MAP servers restarted, re-populate it"
                        self.config_log(msg, level=SandeshLevel.SYS_ERR)

                        self.reset()
                        self._get_api_server().publish_ifmap_to_discovery(
                            'down', msg)

                        self._publish_config_root()
                        self._db_client_mgr.db_resync()
                        self._publish_to_ifmap_enqueue('publish_discovery', 1)

                else:  # successful publish
                    not_published = False
                    break
            # end while not_published

            if retry_count:
                log_str = 'Success publishing to ifmap after %d tries' \
                          %(retry_count)
                self.config_log(log_str, level=SandeshLevel.SYS_ERR)

            return True, resp_xml
        except Exception as e:
            # Failed to publish the operation due to unknown error.
            # Probably a connection issue with the ifmap server.
            msg = "Failed to publish request %s: %s" % (oper_body, str(e))
            return False, msg

    # end _publish_to_ifmap

    def _build_request(self, id1_name, id2_name, meta_list, delete=False):
        request = ''
        id1 = unicode(
            Identity(name=id1_name, type="other", other_type="extended"))
        if id2_name != 'self':
            id2 = unicode(
                Identity(name=id2_name, type="other", other_type="extended"))
        else:
            id2 = None
        for m in meta_list:
            if delete:
                filter = unicode(m) if m else None
                op = PublishDeleteOperation(id1=id1, id2=id2, filter=filter)
            else:
                op = PublishUpdateOperation(id1=id1,
                                            id2=id2,
                                            metadata=unicode(m),
                                            lifetime='forever')
            request += unicode(op)
        return request

    def _delete_id_self_meta(self, self_imid, meta_name):
        contrail_metaname = 'contrail:' + meta_name if meta_name else None
        del_str = self._build_request(self_imid, 'self', [contrail_metaname],
                                      True)
        self._publish_to_ifmap_enqueue('delete', del_str)

        try:

            # del meta from cache and del id if this was last meta
            if meta_name:
                del self._id_to_metas[self_imid][meta_name]
                if not self._id_to_metas[self_imid]:
                    del self._id_to_metas[self_imid]
            else:
                del self._id_to_metas[self_imid]

        except KeyError:
            # Case of delete received for an id which we do not know about.
            # Could be a case of duplicate delete.
            # There is nothing for us to do here. Just log and proceed.
            msg = "Delete received for unknown imid(%s) meta_name(%s)." % \
                  (self_imid, meta_name)
            self.config_log(msg, level=SandeshLevel.SYS_DEBUG)

    # end _delete_id_self_meta

    def _delete_id_pair_meta_list(self, id1, meta_list):
        del_str = ''
        for id2, metadata in meta_list:
            contrail_metadata = 'contrail:' + metadata if metadata else None
            del_str += self._build_request(id1, id2, [contrail_metadata], True)

        self._publish_to_ifmap_enqueue('delete', del_str)

        # del meta,id2 from cache and del id if this was last meta
        def _id_to_metas_delete(id1, id2, meta_name):
            if id1 not in self._id_to_metas:
                return
            if meta_name not in self._id_to_metas[id1]:
                return
            if not self._id_to_metas[id1][meta_name]:
                del self._id_to_metas[id1][meta_name]
                if not self._id_to_metas[id1]:
                    del self._id_to_metas[id1]
                return

            # if meta is prop, noop
            if id2 in self._id_to_metas[id1][meta_name]:
                del self._id_to_metas[id1][meta_name][id2]

        #end _id_to_metas_delete

        for id2, metadata in meta_list:
            if metadata:
                # replace with remaining refs
                _id_to_metas_delete(id1, id2, metadata)
                _id_to_metas_delete(id2, id1, metadata)
            else:  # no meta specified remove all links from id1 to id2
                for meta_name in self._id_to_metas.get(id1, {}).keys():
                    _id_to_metas_delete(id1, id2, meta_name)
                for meta_name in self._id_to_metas.get(id2, {}).keys():
                    _id_to_metas_delete(id2, id1, meta_name)

    # end _delete_id_pair_meta_list

    def _update_id_self_meta(self, update, meta):
        """ update: dictionary of the type
                update[<id> | 'self'] = list(metadata)
        """
        mlist = update.setdefault('self', [])
        mlist.append(meta)

    # end _update_id_self_meta

    def _update_id_pair_meta(self, update, to_id, meta):
        mlist = update.setdefault(to_id, [])
        mlist.append(meta)

    # end _update_id_pair_meta

    def _publish_update(self, self_imid, update):
        requests = []
        self_metas = self._id_to_metas.setdefault(self_imid, {})
        for id2, metalist in update.items():
            request = self._build_request(self_imid, id2, metalist)

            # remember what we wrote for diffing during next update
            old_metalist = []
            for m in metalist:
                meta_name = m._Metadata__name[9:]

                # Objects have two types of members - Props and refs/links.
                # Props are cached in id_to_metas as
                #        id_to_metas[self_imid][meta_name]['']
                #        (with empty string as key)

                # Links are cached in id_to_metas as
                #        id_to_metas[self_imid][meta_name][id2]
                #        id2 is used as a key

                if id2 == 'self':
                    self_metas[meta_name] = {'': m}
                    continue

                if meta_name in self_metas:
                    old_metalist.append(self_metas[meta_name])
                    # Update the link/ref
                    self_metas[meta_name][id2] = m
                else:
                    # Create a new link/ref
                    self_metas[meta_name] = {id2: m}

                # Reverse linking from id2 to id1
                self._id_to_metas.setdefault(id2, {})

                if meta_name in self._id_to_metas[id2]:
                    self._id_to_metas[id2][meta_name][self_imid] = m
                else:
                    self._id_to_metas[id2][meta_name] = {self_imid: m}

            old_request = self._build_request(self_imid, id2, old_metalist)
            if request != old_request:
                requests.append(request)

        upd_str = ''.join(requests)
        self._publish_to_ifmap_enqueue('update', upd_str)

    # end _publish_update

    def _ifmap_restarted(self):
        return not entity_is_present(self._mapclient, 'config-root', ['root'])

    def _health_checker(self):
        while True:
            try:
                # do the healthcheck only if we are connected
                if self._conn_state == ConnectionStatus.DOWN:
                    continue
                meta = Metadata('display-name',
                                '', {'ifmap-cardinality': 'singleValue'},
                                ns_prefix='contrail',
                                elements='')
                request_str = self._build_request('healthcheck', 'self',
                                                  [meta])
                self._publish_to_ifmap_enqueue('update',
                                               request_str,
                                               do_trace=False)

                # Confirm the existence of the following default global entities in IFMAP.
                search_list = [
                    ('global-system-config', ['default-global-system-config']),
                ]
                for type, fq_name in search_list:
                    if not entity_is_present(self._mapclient, type, fq_name):
                        raise Exception("%s not found in IFMAP DB" %
                                        ':'.join(fq_name))

                # If we had unpublished the IFMAP server to discovery server earlier
                # publish it back now since it is valid now.
                if not self._is_ifmap_up:
                    self._get_api_server().publish_ifmap_to_discovery('up', '')
                    self._is_ifmap_up = True
                    ConnectionState.update(
                        conn_type=ConnType.IFMAP,
                        name='IfMap',
                        status=ConnectionStatus.UP,
                        message='',
                        server_addrs=[
                            "%s:%s" %
                            (self._ifmap_srv_ip, self._ifmap_srv_port)
                        ])
            except Exception as e:
                log_str = 'IFMAP Healthcheck failed: %s' % (str(e))
                self.config_log(log_str, level=SandeshLevel.SYS_ERR)
                if self._is_ifmap_up:
                    self._get_api_server().publish_ifmap_to_discovery(
                        'down', 'IFMAP DB - Invalid state')
                    self._is_ifmap_up = False
                    ConnectionState.update(
                        conn_type=ConnType.IFMAP,
                        name='IfMap',
                        status=ConnectionStatus.DOWN,
                        message='Invalid IFMAP DB State',
                        server_addrs=[
                            "%s:%s" %
                            (self._ifmap_srv_ip, self._ifmap_srv_port)
                        ])
            finally:
                gevent.sleep(
                    self._get_api_server().get_ifmap_health_check_interval())
Example #51
0
class MuxSocketTransportSink(ClientMessageSink):
  """A transport sink for tmux servers.
  This sink supports concurrent requests over its transport.
  """

  SINK_LOG = ROOT_LOG.getChild('SocketTransportSink')
  _EMPTY_DCT = {}
  _CLOSE_INVOKED = "Close invoked"

  class Varz(VarzBase):
    """
    messages_sent - The number of messages sent over this sink.
    messages_recv - The number of messages received over this sink.
    active - 1 if the sink is open, else 0.
    send_queue_size - The length of the send queue.
    send_time - The aggregate amount of time spent sending data.
    recv_time - The aggregate amount of time spend receiving data.
    send_latency - The average amount of time taken to send a message.
    recv_latency - The average amount of time taken to receive a message
                   (once a response has reached the client).
    transport_latency - The average amount of time taken to perform a full
                        method call transaction (send data, wait for response,
                        read response).
    """
    _VARZ_BASE_NAME = 'scales.thriftmux.SocketTransportSink'
    _VARZ = {
      'messages_sent': Rate,
      'messages_recv': Rate,
      'active': Gauge,
      'send_queue_size': Gauge,
      'send_time': AggregateTimer,
      'recv_time': AggregateTimer,
      'send_latency': AverageTimer,
      'recv_latency': AverageTimer,
      'transport_latency': AverageTimer
    }

  def __init__(self, socket, service):
    super(MuxSocketTransportSink, self).__init__()
    self._socket = socket
    self._state = ChannelState.Idle
    self._log = self.SINK_LOG.getChild('[%s.%s:%d]' % (
      service, self._socket.host, self._socket.port))
    self._socket_source = '%s:%d' % (self._socket.host, self._socket.port)
    self._service = service
    self._open_result = None
    self._varz = self.Varz(Source(service=self._service,
      endpoint=self._socket_source))

  def _Init(self):
    self._tag_map = {}
    self._open_result = None
    self._tag_pool = TagPool((2 ** 24) - 1, self._service, self._socket_source)
    self._greenlets = []
    self._send_queue = Queue()

  @property
  def isActive(self):
    return self._state != ChannelState.Closed

  @property
  def state(self):
    return self._state

  def Open(self):
    """Initializes the dispatcher, opening a connection to the remote host.
    This method may only be called once.
    """
    if not self._open_result:
      self._Init()
      self._open_result = AsyncResult()
      self._open_result.SafeLink(self._OpenImpl)
    return self._open_result

  def _SpawnNamedGreenlet(self, name, *args, **kwargs):
    return NamedGreenlet.spawn(
      'Scales %s for %s [%s]' % (name, self._service, self._socket_source),
      *args,
      **kwargs)

  def _OpenImpl(self):
    try:
      self._log.debug('Opening transport.')
      self._socket.open()
      self._greenlets.append(self._SpawnNamedGreenlet('Recv Loop', self._RecvLoop))
      self._greenlets.append(self._SpawnNamedGreenlet('Send Loop', self._SendLoop))

      self._CheckInitialConnection()
      self._log.debug('Open successful')
      self._state = ChannelState.Open
      self._varz.active(1)
    except Exception as e:
      self._log.error('Exception opening socket')
      self._open_result.set_exception(e)
      self._Shutdown('Open failed')
      raise

  @abstractmethod
  def _CheckInitialConnection(self):
    raise NotImplementedError()

  def Close(self):
    self._Shutdown(self._CLOSE_INVOKED, False)

  def _Shutdown(self, reason, fault=True):
    if not self.isActive:
      return

    self._state = ChannelState.Closed

    if reason == self._CLOSE_INVOKED:
      log_fn = self._log.debug
    else:
      log_fn = self._log.warning
    log_fn('Shutting down transport [%s].' % str(reason))
    self._varz.active(0)
    self._socket.close()
    [g.kill(block=False) for g in self._greenlets]
    self._greenlets = []

    if not isinstance(reason, Exception):
      reason = Exception(str(reason))
    if fault:
      self.on_faulted.Set(reason)
    msg = MethodReturnMessage(error=ClientError(reason))

    for sink_stack, _, _ in self._tag_map.values():
      sink_stack.AsyncProcessResponseMessage(msg)

    if (self._open_result
        and not self._open_result.ready()
        and isinstance(reason, Exception)):
      self._open_result.set_exception(reason)

    self._tag_map = {}
    self._open_result = None
    self._send_queue = Queue()

  @abstractmethod
  def _OnTimeout(self, tag):
    raise NotImplementedError()

  def _HandleTimeout(self, msg_properties):
    """Determine if a message has timed out yet (because it waited in the queue
    for too long).  If it hasn't, initialize the timeout handler to fire if the
    message times out in transit.

    Args:
      msg_properties - The properties of the message.
    """
    timeout_event = msg_properties.get(Deadline.EVENT_KEY, None)
    if timeout_event and timeout_event.Get():
      # The message has timed out before it got out of the send queue
      # In this case, we can discard it immediately without even sending it.
      tag = msg_properties.pop(Tag.KEY, 0)
      if tag != 0:
        self._ReleaseTag(tag)
      return True
    elif timeout_event:
      # The event exists but hasn't been signaled yet, hook up a
      # callback so we can be notified on a timeout.
      def timeout_proc():
        timeout_tag = msg_properties.pop(Tag.KEY, 0)
        if timeout_tag:
          self._OnTimeout(timeout_tag)

      timeout_event.Subscribe(lambda evt: timeout_proc(), True)
      return False
    else:
      # No event was created, so this will never timeout.
      return False

  def _SendLoop(self):
    """Dispatch messages from the send queue to the remote server.

    Note: Messages in the queue have already been serialized into wire format.
    """
    while self.isActive:
      try:
        payload, dct = self._send_queue.get()
        queue_len = self._send_queue.qsize()
        self._varz.send_queue_size(queue_len)
        # HandleTimeout sets up the transport level timeout handling
        # for this message.  If the message times out in transit, this
        # transport will handle sending a Tdiscarded to the server.
        if self._HandleTimeout(dct): continue

        with self._varz.send_time.Measure():
          with self._varz.send_latency.Measure():
            self._socket.write(payload)
        self._varz.messages_sent()
      except Exception as e:
        self._Shutdown(e)
        break

  def _RecvLoop(self):
    """Dispatch messages from the remote server to their recipient.

    Note: Deserialization and dispatch occurs on a seperate greenlet, this only
    reads the message off the wire.
    """
    while self.isActive:
      try:
        sz, = unpack('!i', self._socket.readAll(4))
        with self._varz.recv_time.Measure():
          with self._varz.recv_latency.Measure():
            buf = BytesIO(self._socket.readAll(sz))
        self._varz.messages_recv()
        gevent.spawn(self._ProcessReply, buf)
      except Exception as e:
        self._Shutdown(e)
        break

  @abstractmethod
  def _ProcessReply(self, stream):
    raise NotImplementedError()

  def _ProcessTaggedReply(self, tag, stream):
    tup = self._ReleaseTag(tag)
    if tup:
      reply_stack, start_time, props = tup
      props[Tag.KEY] = None
      self._varz.transport_latency(time.time() - start_time)
      stream.seek(0)
      reply_stack.AsyncProcessResponseStream(stream)

  def _ReleaseTag(self, tag):
    """Return a tag to the tag pool.

    Note: Tags are only returned when the server has ACK'd them (or NACK'd) with
    and Rdispatch message (or similar).  Client initiated timeouts do NOT return
    tags to the pool.

    Args:
      tag - The tag to return.

    Returns:
      The ClientChannelSinkStack associated with the tag's response.
    """
    tup = self._tag_map.pop(tag, None)
    self._tag_pool.release(tag)
    return tup

  @abstractmethod
  def _BuildHeader(self, tag, msg_type, data_len):
    raise NotImplementedError()

  def AsyncProcessRequest(self, sink_stack, msg, stream, headers):
    if self._state == ChannelState.Idle and self._open_result:
      self._log.debug('Waiting for channel to be open')
      self._open_result.wait()

    if ((self._state == ChannelState.Idle and not self._open_result)
        or self._state == ChannelState.Closed) and sink_stack is not None:
      err_msg = MethodReturnMessage(error=Exception('Sink not open.'))
      sink_stack.AsyncProcessResponseMessage(err_msg)
      return

    if not msg.is_one_way:
      tag = self._tag_pool.get()
      msg.properties[Tag.KEY] = tag
      self._tag_map[tag] = (sink_stack, time.time(), msg.properties)
    else:
      tag = 0

    data_len = stream.tell()
    header = self._BuildHeader(tag, headers[TransportHeaders.MessageType], data_len)
    payload = header + stream.getvalue()
    self._send_queue.put((payload, msg.properties))

  def AsyncProcessResponse(self, sink_stack, context, stream, msg):
    pass
Example #52
0
class gwhatweb(object):
    def __init__(self, url, webdata):
        self.tasks = Queue()
        self.url = url.rstrip("/")
        self.cmsdict = {}
        self.cmsname = None
        for i in webdata:
            self.tasks.put(i)

        print("webdata total:%d" % len(webdata))

    def _GetMd5(self, body):
        m2 = hashlib.md5()
        m2.update(body)
        return m2.hexdigest()

    def _clearQueue(self):
        while not self.tasks.empty():
            self.tasks.get()

    def _worker(self):
        data = self.tasks.get()
        test_url = "{0}{1}".format(self.url, data["url"])
        req = None
        try:
            req = requests.get(test_url, timeout=10)

        except:
            rtext = ''

        if not req:
            return False

        result = checkcms(req, data)

        if result:

            if result > 100:
                logger.info('web is  {0} finger: {1}'.format(
                    data['name'], data['url']))
                return data['name']

            if data['name'] not in self.cmsdict:
                logger.info('web look like {0}'.format(data['name']))
                self.cmsdict[data['name']] = data['weight']
                logger.info('cms weight:{}'.format(self.cmsdict[data['name']]))
            else:
                self.cmsdict[data['name']] += data['weight']
                logger.info('cms weight:{}'.format(self.cmsdict[data['name']]))
                if self.cmsdict[data['name']] > 100:
                    logger.info('web is  {0} finger: {1}'.format(
                        data['name'], data['url']))

                    return data['name']
        return False

    def _boss(self):
        while not self.tasks.empty():
            flag = self._worker()
            if flag:
                self.cmsname = flag
                self._clearQueue()

    def whatweb(self, maxsize=5):
        allr = [gevent.spawn(self._boss) for i in range(maxsize)]
        gevent.joinall(allr)
        return self.cmsname
    def __init__(self, config):
        super(CompetitiveDialogueDataBridge, self).__init__()
        self.config = config

        self.tenders_sync_client = TendersClientSync(
            '',
            host_url=self.config_get('public_tenders_api_server') or self.config_get('tenders_api_server'),
            api_version=self.config_get('tenders_api_version'),
        )

        self.client = TendersClientSync(
            self.config_get('api_token'),
            host_url=self.config_get('tenders_api_server'),
            api_version=self.config_get('tenders_api_version'),
        )

        def_queue_size = 500
        def_watcher_delay = 15
        self.initial_sync_point = {}
        self.initialization_event = gevent.event.Event()
        self.competitive_dialogues_queue = Queue(maxsize=def_queue_size)  # Id tender which need to check
        self.handicap_competitive_dialogues_queue = Queue(maxsize=def_queue_size)
        self.dialogs_stage2_put_queue = Queue(maxsize=def_queue_size)  # queue with new tender data
        self.dialogs_stage2_retry_put_queue = Queue(maxsize=def_queue_size)

        self.dialog_stage2_id_queue = Queue(maxsize=def_queue_size)
        self.dialog_retry_stage2_id_queue = Queue(maxsize=def_queue_size)

        self.dialogs_stage2_patch_queue = Queue(maxsize=def_queue_size)
        self.dialogs_stage2_retry_patch_queue = Queue(maxsize=def_queue_size)

        self.dialog_set_complete_queue = Queue(maxsize=def_queue_size)
        self.dialog_retry_set_complete_queue = Queue(maxsize=def_queue_size)
        self.jobs_watcher_delay = self.config_get('jobs_watcher_delay') or def_watcher_delay
Example #54
0
 def _Init(self):
   self._tag_map = {}
   self._open_result = None
   self._tag_pool = TagPool((2 ** 24) - 1, self._service, self._socket_source)
   self._greenlets = []
   self._send_queue = Queue()
Example #55
0
#!/usr/bin/env python
# encoding: utf-8

# SystemError: (libev) select: Unknown error
import socket
import gevent
from gevent.queue import Queue, Empty

tasks = Queue(maxsize=3)


def worker(name):
    try:
        while True:
            task = tasks.get(timeout=1)
            print(('Worker %s got task %s' % (name, task)))
            gevent.sleep(0)
    except Empty:
        print(('Quitting time of %s!' % name))


def boss():
    for i in range(1, 11):
        tasks.put(i)
    print('Assigned all work in iteration 1')
    for i in range(11, 21):
        tasks.put(i)
    print('Assigned all work in iteration 2')


gevent.joinall([
Example #56
0
class PAService(TradeService):
    def __init__(self, name):
        super(PAService, self).__init__(name)

        self.active = False  # 工作状态
        self.queue = Queue()  # 队列
        self.thread = Thread(target=self._processData)  # 线程
        self.logger = instance.getLogger()
        self.symbols = set()  # 已经订阅的合约
        # self.latest_ticks = {}
        self.contracts = {ProductClass.Future: {}, ProductClass.Stock: {}}
        self.generate_bars = []
        self.kline_symbols = []  # 实时计算k线的合约名称
        self.all_calc_mins = {}

    def init(self, cfgs, **kwargs):
        self.service_id = cfgs.get('id')
        self.service_type = ServiceType.DataPAServer
        super(PAService, self).init(cfgs)
        # SymbolBarManager().init(cfgs.get('generate_bars'))
        self.generate_bars = map(str.strip,
                                 cfgs.get('generate_bars', '').split(','))
        kline.mongodb_conn = instance.datasourceManager.get('mongodb').conn
        self.contracts[ProductClass.Future] = get_all_contracts(
            type_=ProductClass.Future)

        symbols = self.cfgs.get('kline_symbols', '').split(',')
        symbols = map(str.strip, symbols)
        self.kline_symbols = symbols

    def syncDownServiceConfig(self):
        TradeService.syncDownServiceConfig(self)

    def setupFanoutAndLogHandler(self):
        from mantis.trade.log import TradeServiceLogHandler
        self.initFanoutSwitchers(self.cfgs.get('fanout'))
        handler = TradeServiceLogHandler(self)
        self.logger.addHandler(handler)

    def start(self, block=False):
        self.setupFanoutAndLogHandler()
        # 创建日志引擎
        super(PAService, self).start()
        self.active = True
        # self.thread.start()

        self.prepare_kline_calc_minutes()

        self.registerTimedTask(self.make_kline, timeout=1)

    def stop(self):
        super(PAService, self).stop()

        self.active = False

    def join(self):
        # self.thread.join()
        pass

    def onXminBar(self, scale, bar):
        """
        :param scale:
        :param bar: (VtBarData)
        :return:
        """
        symbol = bar.vtSymbol
        self.logger.debug('onXminBar: {} - {}'.format(symbol, scale))

        hashobj = copy.copy(bar.__dict__)

        ## obj.__dict__ 的使用方法,特别注意: 后续修改 hashobj将改变 bar对象的对应的属性值

        hashobj['datetime'] = ''
        hashobj['scale'] = scale
        hashobj.has_key('_id')
        del hashobj['_id']
        jsondata = json.dumps(hashobj)
        self.dataFanout('switch0', jsondata, symbol=symbol, scale=scale)

    def onTick(self, symbol, tick):
        """

        :param symbol:
        :param tick: (VtTickData)
        :return:
        """
        # self.queue.put((symbol,tick))
        # self.latest_ticks[symbol] = tick
        pass

    def _processData(self):
        self.active = True
        while self.active:
            try:
                data = self.queue.get(block=True, timeout=1)
                if not data:
                    continue
                symbol, tick = data
                SymbolBarManager().onTick(symbol, tick)
            except Exception as e:
                # self.logger.error( str(e) )
                # traceback.print_exc()
                pass

    def _make_kline(self, timer):
        """ 定时执行所有合约的k线生成
            生成k线bar发布到redis待写入mongodb
            实时生成指定合约的k线数据,合约定义:  ' kline_symbols'
        """
        # symbols = self.contracts[ProductClass.Future].keys()
        for symbol in self.kline_symbols:

            # prefix = get_symbol_prefix(symbol)
            # 定时生成 合约的1分钟 k线
            bar = kline.make_lastest_min_bar(symbol)
            if bar:
                self.onXminBar('1m', bar)

            for scale in self.generate_bars:
                bar = kline.make_latest_nmin_bar(symbol, scale)
                if bar:
                    self.onXminBar(scale, bar)

        timer.start()

    def prepare_kline_calc_minutes(self):
        """当天载入当日计算时间分钟点和跨日分钟点
            凌晨2:40停止程序运行
        """
        now = DateTime.now()
        if now.time() < Time(3, 0):  # 如果在凌晨启动的话,去前一天的计算分钟点
            now = now - TimeDelta(days=1)

        for symbol in self.kline_symbols:
            self.all_calc_mins[symbol] = {}
            for k in (1, 5, 15, 30, 60):
                mins = kline.get_day_trade_calc_minutes_new(symbol, k, now)
                self.all_calc_mins[symbol][k] = mins

    def make_kline(self, timer):
        """ 定时执行所有合约的k线生成
            生成k线bar发布到redis待写入mongodb
            实时生成指定合约的k线数据,合约定义:  ' kline_symbols'
        """
        # symbols = self.contracts[ProductClass.Future].keys()
        for symbol in self.kline_symbols:

            # for scale in self.generate_bars:
            for k in (1, 5, 15, 30, 60):
                scale = '{}m'.format(k)
                calc_mins = self.all_calc_mins.get(symbol).get(k)

                # 定时生成 合约的1分钟 k线
                bar = kline.make_lastest_min_bar(symbol, scale, calc_mins)
                if bar:
                    self.onXminBar(scale, bar)  # 通知接收用户

        timer.start()
Example #57
0
 def queue(self):
     return Queue()
class CompetitiveDialogueDataBridge(object):
    """ Competitive Dialogue Data Bridge """
    copy_name_fields = (
        'title', 'title_ru', 'title_en', 'description', 'description_en', 'description_ru',
        'mode', 'procurementMethodDetails', 'submissionMethodDetails',
        'minimalStep', 'value',
        'procuringEntity', 'buyers',
    )
    rewrite_statuses = ['draft']
    allowed_statuses = ['active.tendering', 'active.pre-qualification', 'active.pre-qualification.stand-still',
                        'active.auction', 'active.qualification', 'active.awarded', 'complete', 'cancelled',
                        'unsuccessful', STAGE2_STATUS]

    def __init__(self, config):
        super(CompetitiveDialogueDataBridge, self).__init__()
        self.config = config

        self.tenders_sync_client = TendersClientSync(
            '',
            host_url=self.config_get('public_tenders_api_server') or self.config_get('tenders_api_server'),
            api_version=self.config_get('tenders_api_version'),
        )

        self.client = TendersClientSync(
            self.config_get('api_token'),
            host_url=self.config_get('tenders_api_server'),
            api_version=self.config_get('tenders_api_version'),
        )

        def_queue_size = 500
        def_watcher_delay = 15
        self.initial_sync_point = {}
        self.initialization_event = gevent.event.Event()
        self.competitive_dialogues_queue = Queue(maxsize=def_queue_size)  # Id tender which need to check
        self.handicap_competitive_dialogues_queue = Queue(maxsize=def_queue_size)
        self.dialogs_stage2_put_queue = Queue(maxsize=def_queue_size)  # queue with new tender data
        self.dialogs_stage2_retry_put_queue = Queue(maxsize=def_queue_size)

        self.dialog_stage2_id_queue = Queue(maxsize=def_queue_size)
        self.dialog_retry_stage2_id_queue = Queue(maxsize=def_queue_size)

        self.dialogs_stage2_patch_queue = Queue(maxsize=def_queue_size)
        self.dialogs_stage2_retry_patch_queue = Queue(maxsize=def_queue_size)

        self.dialog_set_complete_queue = Queue(maxsize=def_queue_size)
        self.dialog_retry_set_complete_queue = Queue(maxsize=def_queue_size)
        self.jobs_watcher_delay = self.config_get('jobs_watcher_delay') or def_watcher_delay

    def config_get(self, name):
        return self.config.get('main').get(name)

    @retry(stop_max_attempt_number=5, wait_exponential_multiplier=1000)
    def get_tender_credentials(self, tender_id):
        self.client.headers.update({'X-Client-Request-ID': generate_req_id()})
        logger.info("Getting credentials for tender {}".format(tender_id),
                    extra=journal_context({"MESSAGE_ID": DATABRIDGE_GET_CREDENTIALS},
                                          {"TENDER_ID": tender_id}))

        data = self.client.extract_credentials(tender_id)
        logger.info("Got tender {} credentials".format(tender_id),
                    extra=journal_context({"MESSAGE_ID": DATABRIDGE_GOT_CREDENTIALS},
                                          {"TENDER_ID": tender_id}))
        return data

    @retry(stop_max_attempt_number=5, wait_exponential_multiplier=1000)
    def initialize_sync(self, params=None, direction=None):
        # TODO use gevent.Event to wake up forward sync instead of checking
        # initial sync point
        if direction == "backward":
            assert params['descending']
            response = self.tenders_sync_client.sync_tenders(params, extra_headers={'X-Client-Request-ID': generate_req_id()})
            # set values in reverse order due to 'descending' option
            self.initial_sync_point = {'forward_offset': response.prev_page.offset,
                                       'backward_offset': response.next_page.offset}
            self.initialization_event.set()
            logger.info("Initial sync point {}".format(self.initial_sync_point))
            return response
        elif not self.initial_sync_point:
            raise ValueError
        else:
            assert 'descending' not in params
            gevent.wait([self.initialization_event])
            self.initialization_event.clear()
            params['offset'] = self.initial_sync_point['forward_offset']
            logger.info("Starting forward sync from offset {}".format(params['offset']))
            return self.tenders_sync_client.sync_tenders(params,
                                                         extra_headers={'X-Client-Request-ID': generate_req_id()})

    def get_tenders(self, params, direction=""):
        response = self.initialize_sync(params=params, direction=direction)

        while not (params.get('descending') and not len(response.data) and params.get('offset') == response.next_page.offset):
            tenders_list = response.data
            params['offset'] = response.next_page.offset

            delay = 101
            if tenders_list:
                delay = 15
                logger.info("Client {} params: {}".format(direction, params))
            for tender in tenders_list:
                # Check, if we already work with this tender
                if tender['id'] in dialog_work:
                    logger.info('WORK with tender {}'.format(tender['id']))
                    continue
                if tender['procurementMethodType'] in [CD_UA_TYPE, CD_EU_TYPE] and tender['status'] == 'active.stage2.waiting':
                    logger.info('{0} sync: Found competitive dialogue (stage1), id={1} with status {2}'.format(direction.capitalize(), tender['id'], tender['status']),
                                extra=journal_context({"MESSAGE_ID": DATABRIDGE_FOUND_NOLOT},
                                                      {"TENDER_ID": tender['id']}))
                    yield tender
                else:
                    logger.debug('{0} sync: Skipping tender {1} in status {2} with procurementMethodType {3}'.format(direction.capitalize(), tender['id'], tender['status'], tender['procurementMethodType']),
                                 extra=journal_context(params={"TENDER_ID": tender['id']}))

            logger.info('Sleep {} sync...'.format(direction),
                        extra=journal_context({"MESSAGE_ID": DATABRIDGE_SYNC_SLEEP}))
            gevent.sleep(delay)
            logger.info('Restore {} sync'.format(direction),
                        extra=journal_context({"MESSAGE_ID": DATABRIDGE_SYNC_RESUME}))
            logger.debug('{} {}'.format(direction, params))
            response = self.tenders_sync_client.sync_tenders(params,
                                                             extra_headers={'X-Client-Request-ID': generate_req_id()})

    def get_competitive_dialogue_data(self):
        while True:
            try:
                tender_to_sync = self.competitive_dialogues_queue.peek()  # Get competitive dialogue which we want to sync
                tender = self.tenders_sync_client.get_tender(tender_to_sync['id'])['data']  # Try get data by tender id
            except Exception as e:
                # If we have something problems then put tender back to queue
                logger.exception(e)
                logger.info('Putting tender {} back to tenders queue...'.format(tender_to_sync['id']),
                            extra=journal_context(params={"TENDER_ID": tender_to_sync['id']}))

                self.competitive_dialogues_queue.get() # Remove erroneous item from the queue, to put it as the last item
                # What if another thread put one more item after get but before put?
                self.competitive_dialogues_queue.put(tender_to_sync)

                logger.info('Tender {} put back to tenders queue. Que size: {}'.format(
                    tender_to_sync['id'], self.competitive_dialogues_queue.qsize()))
            else:
                if 'stage2TenderID' in tender:
                    try:
                        tender_stage2 = self.tenders_sync_client.get_tender(tender['stage2TenderID'])['data']
                    except:
                        logger.info('Tender stage 2 id={0} didn\'t exist, need create new'.format(tender['id']),
                                    extra=journal_context({"MESSAGE_ID": DATABRIDGE_TENDER_STAGE2_NOT_EXIST},
                                                          {"TENDER_ID": tender['id']}))
                    else:
                        if tender_stage2.get('status') in self.allowed_statuses:
                            if tender.get('status') == 'complete':
                                logger.warn('Dialog {0} already has complete status - silently removing from initial queue.'.format(tender['id']),
                                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_ONLY_PATCH},
                                                                  {"TENDER_ID": tender['id']}))
                                self.competitive_dialogues_queue.get()  # Remove from the queue
                            else:
                                logger.info('For dialog {0} tender stage 2 already exists, need only patch'.format(tender['id']),
                                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_ONLY_PATCH},
                                                                  {"TENDER_ID": tender['id']}))
                                patch_data = {"id": tender['id'],
                                              "status": "complete"}
                                self.competitive_dialogues_queue.get()  # Remove from the queue
                                self.dialog_set_complete_queue.put(patch_data)

                            continue
                        elif tender_stage2.get('status') in self.rewrite_statuses:
                            logger.info('Tender stage 2 id={0} has bad status need to create new '.format(tender['id']),
                                        extra=journal_context({"MESSAGE_ID": DATABRIDGE_CREATE_NEW_STAGE2},
                                                              {"TENDER_ID": tender['id']}))

                logger.info('Copy competitive dialogue data, id={} '.format(tender['id']),
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_COPY_TENDER_ITEMS},
                                                  {"TENDER_ID": tender['id']}))
                new_tender = dict(procurementMethod='selective', status='draft', dialogueID=tender['id'])

                for field_name in self.copy_name_fields:  # Copy fields from stage 1 competitive dialog
                    if field_name in tender:
                        new_tender[field_name] = tender[field_name]
                if tender['procurementMethodType'].endswith('EU'):
                    new_tender['procurementMethodType'] = STAGE_2_EU_TYPE
                else:
                    new_tender['procurementMethodType'] = STAGE_2_UA_TYPE
                new_tender['tenderID'] = '{}.2'.format(tender['tenderID']) # set tenderID as in stage1 + '.2'
                old_lots, items, short_listed_firms = dict(), list(), dict()
                for qualification in tender['qualifications']:
                    if qualification['status'] == 'active':  # check if qualification has status active
                        if qualification.get('lotID'):
                            if qualification['lotID'] not in old_lots:  # check if lot id in local dict with new lots
                                lot = prepare_lot(tender, qualification['lotID'], items)  # update lot with new id
                                if not lot:  # Go next iter if not lot
                                    continue
                                old_lots[qualification['lotID']] = lot  # set new lot in local dict
                            bid = get_bid_by_id(tender['bids'], qualification['bidID'])
                            for bid_tender in bid['tenderers']:
                                if bid_tender['identifier']['id'] not in short_listed_firms:
                                    short_listed_firms[bid_tender['identifier']['id']] = {"name": bid_tender['name'],
                                                                                          "identifier": bid_tender['identifier'],
                                                                                          "lots": [{"id": old_lots[qualification['lotID']]['id']}]}
                                else:
                                    short_listed_firms[bid_tender['identifier']['id']]['lots'].append(
                                        {"id": old_lots[qualification['lotID']]['id']})
                        else:
                            new_tender['items'] = copy.deepcopy(tender['items'])  # add all items, with new id
                            bid = get_bid_by_id(tender['bids'], qualification['bidID'])
                            for bid_tender in bid['tenderers']:
                                if bid_tender['identifier']['id'] not in short_listed_firms:
                                    short_listed_firms[bid_tender['identifier']['id']] = {"name": bid_tender['name'],
                                                                                          "identifier": bid_tender['identifier'],
                                                                                          "lots": []}
                if items:  # If we have lots, then add only related items
                    new_tender['items'] = items
                new_tender['lots'] = old_lots.values()
                if 'features' in tender:
                    new_tender['features'] = []
                    for feature in tender.get('features'):
                        if feature['featureOf'] == 'tenderer':  # If feature related to tender, than just copy
                            new_tender['features'].append(feature)
                        elif feature['featureOf'] == 'item':  # If feature related to item need check if it's actual
                            if feature['relatedItem'] in (item['id'] for item in new_tender['items']):
                                new_tender['features'].append(feature)
                        elif feature['featureOf'] == 'lot':  # If feature related to lot need check if it's actual
                            if feature['relatedItem'] in old_lots.keys():
                                new_tender['features'].append(feature)
                new_tender['shortlistedFirms'] = short_listed_firms.values()
                self.competitive_dialogues_queue.get()
                self.handicap_competitive_dialogues_queue.put(new_tender)

    def prepare_new_tender_data(self):
        while True:
            new_tender = self.handicap_competitive_dialogues_queue.get()
            try:
                logger.info("Getting extra info for competitive dialogue, id={0}".format(new_tender['dialogueID']),
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_GET_EXTRA_INFO},
                                                  {"TENDER_ID": new_tender['dialogueID']}))
                tender_data = self.get_tender_credentials(new_tender['dialogueID'])
            except Exception as e:
                logger.exception(e)
                logger.info("Can't get competitive dialogue credentials, id={0}".format(new_tender['dialogueID']),
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_MISSING_CREDENTIALS},
                                                  {"TENDER_ID": new_tender['dialogueID']}))
                self.handicap_competitive_dialogues_queue.put(new_tender)
            else:
                logger.debug("Got extra info for competitive dialogue, id={}".format(new_tender['dialogueID']),
                             extra=journal_context({"MESSAGE_ID": DATABRIDGE_GOT_EXTRA_INFO},
                                                   {"TENDER_ID": new_tender['dialogueID']}))
                data = tender_data.data
                new_tender['owner'] = data['owner']
                new_tender['dialogue_token'] = data['tender_token']
                self.dialogs_stage2_put_queue.put(new_tender)
            gevent.sleep(0)

    def put_tender_stage2(self):
        """
        Create tender for stage 2
        """
        while True:
            new_tender = self.dialogs_stage2_put_queue.get()
            logger.info("Creating tender stage2 from competitive dialogue id={}".format(new_tender['dialogueID']),
                        extra=journal_context({"MESSAGE_ID": DATABRIDGE_CREATE_NEW_TENDER},
                                              {"TENDER_ID": new_tender['dialogueID']}))
            data = {"data": new_tender}
            try:
                res = self.client.create_tender(data)
            except ResourceError as re:
                if re.status_int == 412:  # Update Cookie, and retry
                    self.client.headers['Cookie'] = re.response.headers['Set-Cookie']
                elif re.status_int == 422:  # WARNING and don't retry
                    logger.warn("Catch 422 status, stop create tender stage2",
                                extra=journal_context({"MESSAGE_ID": DATABRIDGE_UNSUCCESSFUL_CREATE},
                                                      {"TENDER_ID": new_tender['dialogueID']}))
                    logger.warn("Error response {}".format(re.message),
                                extra=journal_context({"MESSAGE_ID": DATABRIDGE_UNSUCCESSFUL_CREATE},
                                                      {"TENDER_ID": new_tender['dialogueID']}))
                    continue
                elif re.status_int == 404:  # WARNING and don't retry
                    logger.warn("Catch 404 status, stop create tender stage2",
                                extra=journal_context({"MESSAGE_ID": DATABRIDGE_UNSUCCESSFUL_CREATE},
                                                      {"TENDER_ID": new_tender['dialogueID']}))
                    continue
                logger.info("Unsuccessful put for tender stage2 of competitive dialogue id={0}".format(new_tender['dialogueID']),
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_UNSUCCESSFUL_CREATE},
                                                  {"TENDER_ID": new_tender['dialogueID']}))
                logger.info("Schedule retry for competitive dialogue id={0}".format(new_tender['dialogueID']),
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_RETRY_CREATE},
                                                  {"TENDER_ID": new_tender['dialogueID']}))
                self.dialogs_stage2_retry_put_queue.put(new_tender)
            except Exception as e:
                logger.info("Exception, schedule retry for competitive dialogue id={0}".format(new_tender['dialogueID']),
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_RETRY_CREATE},
                                                  {"TENDER_ID": new_tender['dialogueID']}))
                self.dialogs_stage2_retry_put_queue.put(new_tender)
                logger.exception(e)
            else:
                logger.info("Successfully created tender stage2 id={} from competitive dialogue id={}".format(res['data']['id'], res['data']['dialogueID']),
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_TENDER_CREATED},
                                                  {"DIALOGUE_ID": res['data']['dialogueID'],
                                                   "TENDER_ID": res['data']['id']}))
                # Put data in queue for patch dialog
                dialog = {"id": res['data']['dialogueID'],
                          "stage2TenderID": res['data']['id']}
                self.dialog_stage2_id_queue.put(dialog)
            gevent.sleep(0)

    def patch_dialog_add_stage2_id(self):
        """
        Patch origin competitive dialogue - set tender id for stage 2 (field stage2TenderID)
        """
        while True:
            dialog = self.dialog_stage2_id_queue.get()
            dialog_work.add(dialog['id'])
            logger.info("Patch competitive dialogue id={} with stage2 tender id".format(dialog['id']),
                        extra=journal_context({"MESSAGE_ID": DATABRIDGE_CD_PATCH_STAGE2_ID},
                                              {"TENDER_ID": dialog['id']}))

            patch_data = {"data": dialog}
            try:
                res_patch = self.client.patch_tender(patch_data)
            except Exception as e:
                logger.exception(e)
                logger.info("Unsuccessful patch competitive dialogue id={0} with stage2 tender id".format(dialog['id']),
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_CD_UNSUCCESSFUL_PATCH_STAGE2_ID},
                                                  {"TENDER_ID": dialog['id']}))
                logger.info("Schedule retry for competitive dialogue id={0}".format(dialog['id']),
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_CD_RETRY_PATCH_STAGE2_ID},
                                                  {"TENDER_ID": dialog['id']}))
                self.dialog_retry_stage2_id_queue.put(dialog)
            else:
                logger.info("Successful patch competitive dialogue id={0} with stage2 tender id".format(res_patch['data']['id']),
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_CD_PATCHED_STAGE2_ID},
                                                  {"DIALOGUE_ID": res_patch['data']['id'],
                                                   "TENDER_ID": res_patch['data']['stage2TenderID']}))
                data = {"id": dialog['stage2TenderID'],
                        "status": STAGE2_STATUS,
                        "dialogueID": dialog['id']}
                self.dialogs_stage2_patch_queue.put(data)
            gevent.sleep(0)

    @retry(stop_max_attempt_number=5, wait_exponential_multiplier=1000)
    def _patch_dialog_add_stage2_id_with_retry(self, dialog):
        try:
            data = {"data": dialog}
            logger.info("Patch competitive dialogue id={0}".format(dialog['id']),
                        extra=journal_context({"MESSAGE_ID": DATABRIDGE_PATCH_DIALOG},
                                              {"TENDER_ID": dialog['id']}))
            self.client.patch_tender(data)
        except Exception as e:
            logger.exception(e)
            raise

    def retry_patch_dialog_add_stage2_id(self):
        while True:
            try:
                dialog = self.dialog_retry_stage2_id_queue.get()
                self._patch_dialog_add_stage2_id_with_retry(dialog)
            except:
                logger.warn("Can't patch competitive dialogue id={0}".format(dialog['id']),
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_CD_UNSUCCESSFUL_PATCH_STAGE2_ID},
                                                  {"TENDER_ID": dialog['id']}))
                self.competitive_dialogues_queue.put({"id": dialog['id']})
            else:
                data = {"id": dialog['stage2TenderID'],
                        "status": STAGE2_STATUS,
                        "dialogueID": dialog['id']}
                self.dialogs_stage2_patch_queue.put(data)
            gevent.sleep(0)

    def patch_new_tender_status(self):
        while True:
            patch_data = self.dialogs_stage2_patch_queue.get()
            logger.info("Patch tender stage2 id={0} with status {1}".format(patch_data['id'], patch_data['status']),
                        extra=journal_context({"MESSAGE_ID": DATABRIDGE_PATCH_NEW_TENDER_STATUS},
                                              {"TENDER_ID": patch_data["id"]}))
            try:
                res = self.client.patch_tender({"data": patch_data})
            except Exception as e:
                logger.exception(e)
                logger.info("Unsuccessful path tender stage2 id={0} with status {1}".format(patch_data['id'], patch_data['status']))
                logger.info("Schedule retry patch for tender stage2 {0}".format(patch_data['id']),
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_UNSUCCESSFUL_PATCH_NEW_TENDER_STATUS},
                                                  {"TENDER_ID": patch_data['id']}))
                self.dialogs_stage2_retry_patch_queue.put(patch_data)
            else:
                logger.info("Successful patch tender stage2 id={0} with status {1}".format(patch_data['id'], patch_data['status']),
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_SUCCESSFUL_PATCH_NEW_TENDER_STATUS}))
                patch_data = {"id": res['data']['dialogueID'],
                              "status": "complete"}
                self.dialog_set_complete_queue.put(patch_data)

    @retry(stop_max_attempt_number=5, wait_exponential_multiplier=1000)
    def _patch_new_tender_status_with_retry(self, new_tender):
        try:
            data = {"data": new_tender}
            logger.info("Patch new tender stage2 id={0} status".format(new_tender['id']),
                        extra=journal_context({"MESSAGE_ID": DATABRIDGE_PATCH_NEW_TENDER_STATUS},
                                              {"TENDER_ID": new_tender['id']}))
            self.client.patch_tender(data)
        except Exception as e:
            logger.exception(e)
            raise

    def path_dialog_status(self):
        while True:
            patch_data = self.dialog_set_complete_queue.get()
            logger.info("Patch competitive dialogue id={0} with status {1}".format(patch_data['id'], patch_data['status']),
                        extra=journal_context({"MESSAGE_ID": DATABRIDGE_PATCH_DIALOG_STATUS},
                                              {"TENDER_ID": patch_data["id"]}))
            try:
                self.client.patch_tender({"data": patch_data})
            except Exception as e:
                logger.exception(e)
                logger.info("Unsuccessful path competitive dialogue id={0} with status {1}".format(patch_data['id'], patch_data['status']))
                logger.info("Schedule retry patch for competitive dialogue id={0}".format(patch_data['id']),
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_UNSUCCESSFUL_PATCH_DIALOG_STATUS},
                                                  {"TENDER_ID": patch_data['id']}))
                self.dialog_retry_set_complete_queue.put(patch_data)
            else:
                logger.info("Successful patch competitive dialogue id={0} with status {1}".format(patch_data['id'], patch_data['status']),
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_SUCCESSFUL_PATCH_DIALOG_STATUS}))
                try:
                    dialog_work.remove(patch_data['id'])
                except KeyError:
                    pass

    def retry_patch_dialog_status(self):
        while True:
            patch_data = self.dialog_retry_set_complete_queue.get()
            try:
                self._patch_dialog_status_with_retry(patch_data)
            except:
                logger.warn("Can't patch competitive dialogue id={0} with status {1}".format(patch_data['id'], patch_data['status']),
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_UNSUCCESSFUL_PATCH_DIALOG_STATUS,
                                                   "TENDER_ID": patch_data['id']}))
                self.competitive_dialogues_queue.put({"id": patch_data['id']})
            gevent.sleep(0)

    @retry(stop_max_attempt_number=5, wait_exponential_multiplier=1000)
    def _patch_dialog_status_with_retry(self, patch_data):
        try:
            data = {"data": patch_data}
            logger.info("Patch competitive dialogue id={0} with status {1}".format(patch_data['id'], patch_data['status']),
                        extra=journal_context({"MESSAGE_ID": DATABRIDGE_PATCH_DIALOG_STATUS},
                                              {"TENDER_ID": patch_data['id']}))
            self.client.patch_tender(data)
        except Exception as e:
            logger.exception(e)
            raise

    def retry_patch_new_tender_status(self):
        while True:
            patch_data = self.dialogs_stage2_retry_patch_queue.get()
            try:
                self._patch_new_tender_status_with_retry(patch_data)
            except:
                logger.warn("Can't patch tender stage2 id={0} with status {1}".format(patch_data['id'], patch_data['status']),
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_UNSUCCESSFUL_PATCH_NEW_TENDER_STATUS,
                                                   "TENDER_ID": patch_data['id']}))
                self.competitive_dialogues_queue.put({"id": patch_data['dialogueID']})
            gevent.sleep(0)

    @retry(stop_max_attempt_number=5, wait_exponential_multiplier=10000)
    def _put_with_retry(self, new_tender):
        data = {"data": new_tender}
        logger.info("Creating tender stage2 from competitive dialogue id={0}".format(new_tender['dialogueID']),
                    extra=journal_context({"MESSAGE_ID": DATABRIDGE_CREATE_NEW_TENDER},
                                          {"TENDER_ID": new_tender['dialogueID']}))
        try:
            res = self.client.create_tender(data)
        except ResourceError as re:
            if re.status_int == 412:  # Update Cookie, and retry
                self.client.headers['Cookie'] = re.response.headers['Set-Cookie']
            elif re.status_int == 422:  # WARNING and don't retry
                logger.warn("Catch 422 status, stop create tender stage2",
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_UNSUCCESSFUL_CREATE},
                                                  {"TENDER_ID": new_tender['dialogueID']}))
                logger.warn("Error response {}".format(re.message),
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_UNSUCCESSFUL_CREATE},
                                                  {"TENDER_ID": new_tender['dialogueID']}))
            elif re.status_int == 404:  # WARNING and don't retry
                logger.warn("Catch 404 status, stop create tender stage2",
                            extra=journal_context(
                                {"MESSAGE_ID": DATABRIDGE_UNSUCCESSFUL_CREATE},
                                {"TENDER_ID": new_tender['dialogueID']}))
            else:
                logger.info("Unsuccessful put for tender stage2 of competitive dialogue id={0}".format(new_tender['dialogueID']),
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_UNSUCCESSFUL_CREATE},
                                                  {"TENDER_ID": new_tender['dialogueID']}))
            raise re
        except Exception as e:
            logger.exception(e)
            raise
        else:
            # Put data in queue for patch dialog
            dialog = {"id": res['data']['dialogueID'],
                      "stage2TenderID": res['data']['id']}
            self.dialog_stage2_id_queue.put(dialog)

    def retry_put_tender_stage2(self):
        while True:
            try:
                new_tender = self.dialogs_stage2_retry_put_queue.get()
                self._put_with_retry(new_tender)
            except:
                del new_tender['dialogue_token']  # do not reveal tender credentials in logs
                logger.warn("Can't create tender stage2 from competitive dialogue id={0}".format(new_tender['dialogueID']),
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_CREATE_ERROR,
                                                   "TENDER_ID": new_tender['dialogueID']}))
                self.competitive_dialogues_queue.put({"id": new_tender['dialogueID']})
            else:
                dialog = {"id": new_tender['dialogueID'],
                          "stage2TenderID": new_tender['id']}
                self.dialog_stage2_id_queue.put(dialog)
            gevent.sleep(0)

    def get_competitive_dialogue_forward(self):
        logger.info('Start forward data sync worker...')
        params = {'opt_fields': 'status,procurementMethodType', 'mode': '_all_'}
        try:
            for tender_data in self.get_tenders(params=params, direction="forward"):
                logger.info('Forward sync: Put competitive dialogue id={} to process...'.format(tender_data['id']),
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_TENDER_PROCESS},
                                                  {"TENDER_ID": tender_data['id']}))
                self.competitive_dialogues_queue.put(tender_data)
        except ResourceError as re:
            if re.status_int == 412:
                self.tenders_sync_client.headers['Cookie'] = re.response.headers['Set-Cookie']
                logger.warn('Forward catch 412, update coockie and restart worker',extra=journal_context({"MESSAGE_ID": DATABRIDGE_WORKER_DIED}, {}))
            logger.warn('Forward worker died!', extra=journal_context({"MESSAGE_ID": DATABRIDGE_WORKER_DIED}, {}))
            logger.error("Error response {}".format(re.message))
            raise re
        except Exception as e:
            # TODO reset queues and restart sync
            logger.warn('Forward worker died!', extra=journal_context({"MESSAGE_ID": DATABRIDGE_WORKER_DIED}, {}))
            logger.exception(e)
            raise e
        else:
            logger.warn('Forward data sync finished!')  # Should never happen!!!

    def get_competitive_dialogue_backward(self):
        logger.info('Start backward data sync worker...')
        params = {'opt_fields': 'status,procurementMethodType', 'descending': 1, 'mode': '_all_'}
        try:
            for tender_data in self.get_tenders(params=params, direction="backward"):
                logger.info('Backward sync: Put competitive dialogue id={} to process...'.format(tender_data['id']),
                            extra=journal_context({"MESSAGE_ID": DATABRIDGE_TENDER_PROCESS},
                                                  {"TENDER_ID": tender_data['id']}))
                self.competitive_dialogues_queue.put(tender_data)
        except ResourceError as re:
            if re.status_int == 412:
                self.tenders_sync_client.headers['Cookie'] = re.response.headers['Set-Cookie']
                logger.warn('Backward catch 412, update coockie and restart worker', extra=journal_context({"MESSAGE_ID": DATABRIDGE_WORKER_DIED}, {}))
            logger.warn('Backward worker died!', extra=journal_context({"MESSAGE_ID": DATABRIDGE_WORKER_DIED}, {}))
            logger.error("Error response {}".format(re.message))
            raise re
        except Exception as e:
            # TODO reset queues and restart sync
            logger.warn('Backward worker died!', extra=journal_context({"MESSAGE_ID": DATABRIDGE_WORKER_DIED}, {}))
            logger.exception(e)
            raise e
        else:
            logger.info('Backward data sync finished.')

    def catch_exception(self, exc, name):
        """Restarting job"""
        logger.warning('Worker died! Restarting {}.'.format(name), extra=journal_context({"MESSAGE_ID": DATABRIDGE_WORKER_DIED}, {}))
        if name == 'get_competitive_dialogue_data':
            tender = self.competitive_dialogues_queue.get()  # delete invalid tender from queue
            logger.info('Remove invalid tender {}'.format(tender.id))
        self.immortal_jobs[name] = gevent.spawn(getattr(self, name))
        self.immortal_jobs[name].link_exception(partial(self.catch_exception, name=name))

    def _start_competitive_sculptors(self):
        logger.info('Start Competitive Dialogue Data Bridge')
        self.immortal_jobs = {
            'get_competitive_dialogue_data': gevent.spawn(self.get_competitive_dialogue_data),
            'prepare_new_tender_data': gevent.spawn(self.prepare_new_tender_data),
            'put_tender_stage2': gevent.spawn(self.put_tender_stage2),
            'retry_put_tender_stage2': gevent.spawn(self.retry_put_tender_stage2),
            'patch_dialog_add_stage2_id': gevent.spawn(self.patch_dialog_add_stage2_id),
            'retry_patch_dialog_add_stage2_id': gevent.spawn(self.retry_patch_dialog_add_stage2_id),
            'patch_new_tender_status': gevent.spawn(self.patch_new_tender_status),
            'retry_patch_new_tender_status': gevent.spawn(self.retry_patch_new_tender_status),
            'path_dialog_status': gevent.spawn(self.path_dialog_status),
            'retry_patch_dialog_status': gevent.spawn(self.retry_patch_dialog_status)
        }
        for name, job in self.immortal_jobs.items():
            job.link_exception(partial(self.catch_exception, name=name))

    def _start_competitive_wokers(self):
        self.jobs = [
            gevent.spawn(self.get_competitive_dialogue_backward),
            gevent.spawn(self.get_competitive_dialogue_forward),
        ]

    def _restart_synchronization_workers(self):
        logger.warn("Restarting synchronization", extra=journal_context({"MESSAGE_ID": DATABRIDGE_RESTART}, {}))
        for j in self.jobs:
            j.kill(timeout=5)
        self._start_competitive_wokers()

    def run(self):
        self._start_competitive_sculptors()
        self._start_competitive_wokers()
        backward_worker, forward_worker = self.jobs
        counter = 0
        try:
            while True:
                gevent.sleep(self.jobs_watcher_delay)
                if counter == 20:
                    logger.info(
                        """Current state:
                                          First stages in processing {competitive_dialogues_queue};
                                          Prepared data for second stage {handicap_competitive_dialogues_queue};
                                          Prepared data with owner and token {dialogs_stage2_put_queue};
                                          Retry prepared data with owner and token {dialogs_stage2_retry_put_queue};
                                          Data with second stage ID {dialog_stage2_id_queue};
                                          Retry data with second stage ID {dialog_retry_stage2_id_queue};
                                          Data with new status and first stage ID {dialogs_stage2_patch_queue}
                                          Retry data with new status and first stage ID {dialogs_stage2_retry_patch_queue}
                                          Data with new status for first stage {dialog_set_complete_queue}
                                          Retry data with new status for first stage {dialog_retry_set_complete_queue}""".format(
                            competitive_dialogues_queue=self.competitive_dialogues_queue.qsize(),
                            handicap_competitive_dialogues_queue=self.handicap_competitive_dialogues_queue.qsize(),
                            dialogs_stage2_put_queue=self.dialogs_stage2_put_queue.qsize(),
                            dialogs_stage2_retry_put_queue=self.dialogs_stage2_retry_put_queue.qsize(),
                            dialog_stage2_id_queue=self.dialog_stage2_id_queue.qsize(),
                            dialog_retry_stage2_id_queue=self.dialog_retry_stage2_id_queue.qsize(),
                            dialogs_stage2_patch_queue=self.dialogs_stage2_patch_queue.qsize(),
                            dialogs_stage2_retry_patch_queue=self.dialogs_stage2_retry_patch_queue.qsize(),
                            dialog_set_complete_queue=self.dialog_set_complete_queue.qsize(),
                            dialog_retry_set_complete_queue=self.dialog_retry_set_complete_queue.qsize()),
                            extra={
                                'competitive_dialogues_queue': self.competitive_dialogues_queue.qsize(),
                                'handicap_competitive_dialogues_queue': self.handicap_competitive_dialogues_queue.qsize(),
                                'dialogs_stage2_put_queue': self.dialogs_stage2_put_queue.qsize(),
                                'dialogs_stage2_retry_put_queue': self.dialogs_stage2_retry_put_queue.qsize(),
                                'dialog_stage2_id_queue': self.dialog_stage2_id_queue.qsize(),
                                'dialog_retry_stage2_id_queue': self.dialog_retry_stage2_id_queue.qsize(),
                                'dialogs_stage2_patch_queue': self.dialogs_stage2_patch_queue.qsize(),
                                'dialogs_stage2_retry_patch_queue': self.dialogs_stage2_retry_patch_queue.qsize(),
                                'dialog_set_complete_queue': self.dialog_set_complete_queue.qsize(),
                                'dialog_retry_set_complete_queue': self.dialog_retry_set_complete_queue.qsize()})
                    counter = 0
                counter += 1
                if forward_worker.dead or (backward_worker.dead and not backward_worker.successful()):
                    self._restart_synchronization_workers()
                    backward_worker, forward_worker = self.jobs
        except KeyboardInterrupt:
            logger.info('Exiting...')
            gevent.killall(self.jobs, timeout=5)
            gevent.killall(self.immortal_jobs, timeout=5)
        except Exception as e:
            logger.exception(e)
            logger.warn("Restarting synchronization", extra=journal_context({"MESSAGE_ID": DATABRIDGE_RESTART}))
Example #59
0
#防止ssl未校验时出现提示信息
requests.packages.urllib3.disable_warnings()

#dict_mode的payloads
payloads.dict_mode_dict = set()
#crawl_mode的payloads
payloads.crawl_mode_dynamic_fuzz_temp_dict = set()
payloads.similar_urls_set = set()
payloads.crawl_mode_dynamic_fuzz_dict = list()
#blast_mode的payload
payloads.blast_mode_custom_charset_dict = list()
#fuzz_mode的payload
payloads.fuzz_mode_dict = list()

#创建all_tasks队列
tasks.all_task = Queue()
tasks.task_length = 0
tasks.task_count = 0

#假性404页面md5列表
conf.autodiscriminator_md5 = set()

bar.log = progressbar.ProgressBar()


def saveResults(domain, msg):
    '''
    @description: 结果保存,以"域名.txt"命名,url去重复
    @param {domain:域名,msg:保存的信息} 
    @return: null
    '''
Example #60
0
import gevent
from gevent.queue import Queue

tasks = Queue()


def worker(user):
    while not tasks.empty():
        task = tasks.get()
        print('%s got task %s' % (user, task))
        gevent.sleep(0)
    print('Quitting worker!')


def boss():
    for i in xrange(4):
        tasks.put_nowait(i)


gevent.spawn(boss).join()

gevent.joinall([
    gevent.spawn(worker, 'steve'),
    gevent.spawn(worker, 'john'),
])