Пример #1
0
class XCATMessager(utils.Messager):
    def __init__(self, sock):
        self.sock = sock
        self.sem = BoundedSemaphore(1)

    def _send(self, d):
        buf = json.dumps(d)
        self.sem.acquire()
        self.sock.sendall(utils.int2bytes(len(buf)) + buf.encode('utf-8'))
        self.sem.release()

    def info(self, msg):
        d = {'type': MSG_TYPE, 'msg': {'type': 'info', 'data': msg}}
        self._send(d)

    def warn(self, msg):
        d = {'type': MSG_TYPE, 'msg': {'type': 'warning', 'data': msg}}
        self._send(d)

    def error(self,  msg, node=''):
        d = {'type': MSG_TYPE, 'msg': {'type': 'error', 'node': node, 'data': msg}}
        self._send(d)

    def syslog(self, msg):
        d = {'type': MSG_TYPE, 'msg': {'type': 'syslog', 'data': msg}}
        self._send(d)

    def info_with_host(self, msg):
        d = {'type': MSG_TYPE, 'msg': {'type': 'info_with_host', 'data': msg}}
        self._send(d)

    def update_node_attributes(self, attribute, node, data):
        d = {'type': DB_TYPE, 'attribute': {'name': attribute, 'method': 'set', 'type': 'node', 'node': node, 'value': data}}
        self._send(d)
Пример #2
0
    def __init__(self, server_node, iface_cls, config):

        self._section_name = utils.get_module(__name__)
        self._logger = logging.getLogger(__name__)
        self._host = server_node.split(":")[0]
        self._port = int(server_node.split(":")[1])
        self._iface_cls = iface_cls

        self._get_conn_timeout = config.getint(self._section_name, "pool_timeout",
                                               default=settings.DEFAULT_POOL_TIMEOUT)
        self._socket_timeout = config.getint(self._section_name, "request_timeout",
                                             default=settings.DEFAULT_REQUEST_TIMEOUT) * 1000
        self._size = config.getint(self._section_name, "pool_size", default=settings.DEFAULT_POOL_SIZE)

        self._c_module_serialize = config.getboolean(self._section_name, "c_module_serialize",
                                                     default=settings.USE_C_MODULE_SERIALIZE)

        self._closed = False
        if ASYNC_TAG:
            from gevent.lock import BoundedSemaphore
            from gevent import queue as Queue
            self._semaphore = BoundedSemaphore(self._size)
            self._connection_queue = Queue.LifoQueue(self._size)
            self._QueueEmpty = Queue.Empty
        else:
            from threading import BoundedSemaphore
            import Queue
            self._semaphore = BoundedSemaphore(self._size)
            self._connection_queue = Queue.LifoQueue(self._size)
            self._QueueEmpty = Queue.Empty
Пример #3
0
 def __init__(self, account_id, num_connections, readonly):
     log.info("Creating Crispin connection pool", account_id=account_id, num_connections=num_connections)
     self.account_id = account_id
     self.readonly = readonly
     self._queue = Queue(num_connections, items=num_connections * [None])
     self._sem = BoundedSemaphore(num_connections)
     self._set_account_info()
Пример #4
0
 def __init__(self, serverIP, Plist):
     self.serverIP = serverIP
     self.baseUrl = "http://%s/" % serverIP
     self.crypt = Account(Plist.userID, Plist.devideToken)
     self.mlkLock = BoundedSemaphore(200)
     self.IID = Plist.iid
     self.VID = Plist.vid
Пример #5
0
 def __init__( self ):
     self._canRead = Event()
     self._canWrite = Event()
     self._mutex = BoundedSemaphore( value = 1 )
     self._readers = 0
     self._isWriting = False
     self._canRead.set()
     self._canWrite.set()
Пример #6
0
 def __init__(self, account_id, num_connections, readonly):
     log.info('Creating Crispin connection pool for account {} with {} '
              'connections'.format(account_id, num_connections))
     self.account_id = account_id
     self.readonly = readonly
     self._queue = Queue(num_connections, items=num_connections * [None])
     self._sem = BoundedSemaphore(num_connections)
     self._set_account_info()
Пример #7
0
class _LeakySemaphore(object):
    def __init__(self, value = 1, maxSeconds = 10):
        self._semaphore = BoundedSemaphore(value)
        self._maxSeconds = maxSeconds
        self._timer = None
        self._leaked = 0
        self._stopped = False

    def _leak(self):
        sleep(self._maxSeconds)
        self._leaked += 1
        self._semaphore.release()

    @property
    def inUse(self):
        return self._semaphore._initial_value - self.semaphore.counter

    @property
    def waiting(self):
        return len(self._semaphore._links)

    def release(self):
        if self._stopped:
            return
        if self._leaked > 0:
            self._leaked -= 1
        else:
            self._semaphore.release()

    def stop(self):
        self._stopped = True

        if self._timer is not None:
            self._timer.kill(block = False)
            self._timer = None

        while self.waiting > 0:
            self._semaphore.release()
            sleep(0.1)

    def acquire(self):
        if self._stopped:
            return
        if self._semaphore.locked() and not self._timer:
            self._timer = spawn(self._leak)
        self._semaphore.acquire(blocking = True, timeout = None)
        if self._timer is not None:
            self._timer.kill(block = False)
            self._timer = None
            if self.waiting > 0:
                self._timer = spawn(self._leak)
Пример #8
0
 def __init__(self, port, data_dir):
     gevent.spawn(self.controller)
     self.state = STATE_READY
     self.workers = {}
     self.jobs_tracker = {}
     self.port = port
     self.data_dir = data_dir
     self.mapper_queue = Queue()
     self.reducer_queue = Queue()
     self.jobs_tracker_lock = BoundedSemaphore(1)
     self.workers_lock = BoundedSemaphore(1)
Пример #9
0
 def __init__(self, size, exc_classes=DEFAULT_EXC_CLASSES, keepalive=None):
     self.size = size
     self.conn = deque()
     self.lock = BoundedSemaphore(size)
     self.keepalive = keepalive
     # Exceptions list must be in tuple form to be caught properly
     self.exc_classes = tuple(exc_classes)
     for i in iter(range(size)):
         self.lock.acquire()
     for i in iter(range(size)):
         gevent.spawn_later(self.SPAWN_FREQUENCY*i, self._addOne)
     if self.keepalive:
         gevent.spawn(self._keepalive_periodic)
Пример #10
0
    def test_concurrent_batching(self):
        lock = BoundedSemaphore(1)
        lock.acquire()  # now 0

        N_CALLS = [0]

        @batched()
        def fn(arg_list):
            N_CALLS[0] += 1
            lock.acquire()

        @batched()
        def fn2(arg_list):
            N_CALLS[0] += 1
            lock.release()

        @batch_context
        def test():
            a, b = spawn(fn), spawn(fn2)
            self.assertEquals(0, N_CALLS[0])
            a.get(), b.get()

        test()  # shouldn't hang.
Пример #11
0
        def acquire(self, blocking=True, timeout=-1):
            # Transform the default -1 argument into the None that our
            # semaphore implementation expects, and raise the same error
            # the stdlib implementation does.
            if timeout == -1:
                timeout = None
            if not blocking and timeout is not None:
                raise ValueError("can't specify a timeout for a non-blocking call")
            if timeout is not None:
                if timeout < 0:
                    # in C: if(timeout < 0 && timeout != -1)
                    raise ValueError("timeout value must be strictly positive")
                if timeout > self._TIMEOUT_MAX:
                    raise OverflowError('timeout value is too large')

            return BoundedSemaphore.acquire(self, blocking, timeout)
Пример #12
0
    def __init__(self, discServer, zk_srv_ip='127.0.0.1',
                 zk_srv_port='2181', reset_config=False):
        self._reset_config = reset_config
        self._service_id_to_type = {}
        self._ds = discServer
        self._zk_sem = BoundedSemaphore(1)
        self._election = None
        self._restarting = False

        zk_endpts = []
        for ip in zk_srv_ip.split(','):
            zk_endpts.append('%s:%s' %(ip, zk_srv_port))

        # logging
        logger = logging.getLogger('discovery-service')
        logger.setLevel(logging.WARNING)
        handler = logging.handlers.RotatingFileHandler('/var/log/contrail/discovery_zk.log', maxBytes=1024*1024, backupCount=10)
        log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
        handler.setFormatter(log_format)
        logger.addHandler(handler)

        self._zk = kazoo.client.KazooClient(
            hosts=','.join(zk_endpts),
            handler=kazoo.handlers.gevent.SequentialGeventHandler(),
            logger=logger)
        self._logger = logger

        # connect
        self.connect()

        if reset_config:
            self.delete_node("/services", recursive=True)
            self.delete_node("/clients", recursive=True)
            self.delete_node("/election", recursive=True)

        # create default paths
        self.create_node("/services")
        self.create_node("/clients")
        self.create_node("/election")

        self._debug = {
            'subscription_expires': 0,
            'oos_delete': 0,
            'db_excepts': 0,
        }
Пример #13
0
    def __init__(self, api, token_address):
        assert isinstance(api, RaidenAPI)
        self.ready = Event()

        self.api = api
        self.token_address = token_address

        existing_channels = self.api.get_channel_list(
            api.raiden.default_registry.address,
            self.token_address,
        )

        open_channels = [
            channel_state
            for channel_state in existing_channels
            if channel.get_status(channel_state) == CHANNEL_STATE_OPENED
        ]

        if len(open_channels) == 0:
            token = self.api.raiden.chain.token(self.token_address)
            if not token.balance_of(self.api.raiden.address) > 0:
                raise ValueError('not enough funds for echo node %s for token %s' % (
                    pex(self.api.raiden.address),
                    pex(self.token_address),
                ))
            self.api.token_network_connect(
                self.api.raiden.default_registry.address,
                self.token_address,
                token.balance_of(self.api.raiden.address),
                initial_channel_target=10,
                joinable_funds_target=.5,
            )

        self.last_poll_offset = 0
        self.received_transfers = Queue()
        self.stop_signal = None  # used to signal REMOVE_CALLBACK and stop echo_workers
        self.greenlets = list()
        self.lock = BoundedSemaphore()
        self.seen_transfers = deque(list(), TRANSFER_MEMORY)
        self.num_handled_transfers = 0
        self.lottery_pool = Queue()
        # register ourselves with the raiden alarm task
        self.api.raiden.alarm.register_callback(self.echo_node_alarm_callback)
        self.echo_worker_greenlet = gevent.spawn(self.echo_worker)
        log.info('Echo node started')
Пример #14
0
def test_initial_sync(db, generic_account, inbox_folder, mock_imapclient):
    # We should really be using hypothesis.given() to generate lots of
    # different uid sets, but it's not trivial to ensure that no state is
    # carried over between runs. This will have to suffice for now as a way to
    # at least establish coverage.
    uid_dict = uids.example()
    mock_imapclient.add_folder_data(inbox_folder.name, uid_dict)

    folder_sync_engine = FolderSyncEngine(generic_account.id,
                                          generic_account.namespace.id,
                                          inbox_folder.name,
                                          generic_account.email_address,
                                          'custom', BoundedSemaphore(1))
    folder_sync_engine.initial_sync()

    saved_uids = db.session.query(ImapUid).filter(
        ImapUid.folder_id == inbox_folder.id)
    assert {u.msg_uid for u in saved_uids} == set(uid_dict)

    saved_message_hashes = {u.message.data_sha256 for u in saved_uids}
    assert saved_message_hashes == {
        sha256(v['BODY[]']).hexdigest()
        for v in uid_dict.values()
    }
Пример #15
0
def test_handle_uidinvalid_loops(db, generic_account, inbox_folder,
                                 mock_imapclient, monkeypatch):

    import inbox.mailsync.backends.imap.generic as generic_import

    mock_imapclient.uidvalidity = 1

    # We're using a list here because of weird monkeypatching shenanigans.
    uidinvalid_count = []

    def fake_poll_function(self):
        uidinvalid_count.append(1)
        raise UidInvalid

    monkeypatch.setattr(
        "inbox.mailsync.backends.imap.generic.FolderSyncEngine.poll",
        fake_poll_function)

    uid_dict = uids.example()
    mock_imapclient.add_folder_data(inbox_folder.name, uid_dict)
    inbox_folder.imapfolderinfo = ImapFolderInfo(account=generic_account,
                                                 uidvalidity=1,
                                                 uidnext=1)
    db.session.commit()
    folder_sync_engine = generic_import.FolderSyncEngine(
        generic_account.id, generic_account.namespace.id, inbox_folder.name,
        generic_account.email_address, 'custom', BoundedSemaphore(1))

    folder_sync_engine.state = 'poll'

    db.session.expunge(inbox_folder.imapsyncstatus)

    with pytest.raises(MailsyncDone):
        folder_sync_engine._run_impl()

    assert len(uidinvalid_count) == MAX_UIDINVALID_RESYNCS + 1
Пример #16
0
 def __init__(self, sock):
     self.sock = sock
     self.sem = BoundedSemaphore(1)
Пример #17
0
    def __init__(self):
        # 缓存的城市信息
        from gevent.lock import BoundedSemaphore

        self._city_cache = {}
        self._city_cache_lock = BoundedSemaphore(1)
Пример #18
0
    def __init__(self, serviceName, originSecret, isTraceComms=False):
        '''Create a new Service.
        :param serviceName: name identifying this service.
        :param originSecret: shared secret with LimaCharlie to validate origin of requests.
        :param isTraceComms: if True, log all requests and responses (jwt omitted).
        '''
        self._serviceName = serviceName
        self._originSecret = originSecret
        self._startedAt = int(time.time())
        self._lock = BoundedSemaphore()
        self._backgroundStopEvent = gevent.event.Event()
        self._nCallsInProgress = 0
        self._threads = gevent.pool.Group()
        self._detectSubscribed = set()
        self._internalResources = {}
        self._supportedRequestParameters = {}
        self._isTraceComms = isTraceComms

        if self._originSecret is None:
            self.logCritical(
                'Origin verification disabled, this should not be in production.'
            )

        if isinstance(self._originSecret, str):
            self._originSecret = self._originSecret.encode()

        self._handlers = {
            'health': self._health,
            'org_install': self.onOrgInstalled,
            'org_uninstall': self.onOrgUninstalled,
            'detection': self.onDetection,
            'request': self.onRequest,
            'get_resource': self._onResourceAccess,
            'deployment_event': self.onDeploymentEvent,
            'log_event': self.onLogEvent,
            'org_per_1h': self.every1HourPerOrg,
            'org_per_3h': self.every3HourPerOrg,
            'org_per_12h': self.every12HourPerOrg,
            'org_per_24h': self.every24HourPerOrg,
            'org_per_7d': self.every7DayPerOrg,
            'org_per_30d': self.every30DayPerOrg,
            'once_per_1h': self.every1HourGlobally,
            'once_per_3h': self.every3HourGlobally,
            'once_per_12h': self.every12HourGlobally,
            'once_per_24h': self.every24HourGlobally,
            'once_per_7d': self.every7DayGlobally,
            'once_per_30d': self.every30DayGlobally,
            'new_sensor': self.onNewSensor,
            'sensor_per_1h': self.every1HourPerSensor,
            'sensor_per_3h': self.every3HourPerSensor,
            'sensor_per_12h': self.every12HourPerSensor,
            'sensor_per_24h': self.every24HourPerSensor,
            'sensor_per_7d': self.every7DayPerSensor,
            'sensor_per_30d': self.every30DayPerSensor,
            'service_error': self.onServiceError,
        }

        self.log("Starting lc-service v%s (SDK v%s)" %
                 (lcservice_version, limacharlie.__version__))

        self._onStartup()
Пример #19
0
 def __init__(self, phrase=''):
     self.phrase = phrase.lower()
     self.pool = Pool()
     self.sem = BoundedSemaphore(3)
     self.result = None
     self.sql_saver = sqlsaver.SQLSaver()
Пример #20
0
 def __init__(self, link):
     self._sem = Semaphore(1)
     self.link = link
Пример #21
0
class EchoNode:
    def __init__(self, api, token_address):
        assert isinstance(api, RaidenAPI)
        self.ready = Event()

        self.api = api
        self.token_address = token_address

        existing_channels = self.api.get_channel_list(
            api.raiden.default_registry.address,
            self.token_address,
        )

        open_channels = [
            channel_state for channel_state in existing_channels
            if channel.get_status(channel_state) == CHANNEL_STATE_OPENED
        ]

        if len(open_channels) == 0:
            token = self.api.raiden.chain.token(self.token_address)
            if not token.balance_of(self.api.raiden.address) > 0:
                raise ValueError(
                    'not enough funds for echo node %s for token %s' % (
                        pex(self.api.raiden.address),
                        pex(self.token_address),
                    ))
            self.api.token_network_connect(
                self.api.raiden.default_registry.address,
                self.token_address,
                token.balance_of(self.api.raiden.address),
                initial_channel_target=10,
                joinable_funds_target=.5,
            )

        self.last_poll_offset = 0
        self.received_transfers = Queue()
        self.stop_signal = None  # used to signal REMOVE_CALLBACK and stop echo_workers
        self.greenlets = list()
        self.lock = BoundedSemaphore()
        self.seen_transfers = deque(list(), TRANSFER_MEMORY)
        self.num_handled_transfers = 0
        self.lottery_pool = Queue()
        # register ourselves with the raiden alarm task
        self.api.raiden.alarm.register_callback(self.echo_node_alarm_callback)
        self.echo_worker_greenlet = gevent.spawn(self.echo_worker)
        log.info('Echo node started')

    def echo_node_alarm_callback(self, block_number):
        """ This can be registered with the raiden AlarmTask.
        If `EchoNode.stop()` is called, it will give the return signal to be removed from
        the AlarmTask callbacks.
        """
        if not self.ready.is_set():
            self.ready.set()
        log.debug('echo_node callback', block_number=block_number)
        if self.stop_signal is not None:
            return REMOVE_CALLBACK
        else:
            self.greenlets.append(gevent.spawn(self.poll_all_received_events))
            return True

    def poll_all_received_events(self):
        """ This will be triggered once for each `echo_node_alarm_callback`.
        It polls all channels for `EventPaymentReceivedSuccess` events,
        adds all new events to the `self.received_transfers` queue and
        respawns `self.echo_node_worker`, if it died. """

        locked = False
        try:
            with Timeout(10):
                locked = self.lock.acquire(blocking=False)
                if not locked:
                    return
                else:
                    received_transfers = self.api.get_raiden_events_payment_history(
                        token_address=self.token_address,
                        offset=self.last_poll_offset,
                    )

                    # received transfer is a tuple of (block_number, event)
                    received_transfers = [
                        event for event in received_transfers
                        if type(event) == EventPaymentReceivedSuccess
                    ]

                    for event in received_transfers:
                        transfer = copy.deepcopy(event)
                        self.received_transfers.put(transfer)

                    # set last_poll_block after events are enqueued (timeout safe)
                    if received_transfers:
                        self.last_poll_offset += len(received_transfers)

                    if not self.echo_worker_greenlet.started:
                        log.debug(
                            'restarting echo_worker_greenlet',
                            dead=self.echo_worker_greenlet.dead,
                            successful=self.echo_worker_greenlet.successful(),
                            exception=self.echo_worker_greenlet.exception,
                        )
                        self.echo_worker_greenlet = gevent.spawn(
                            self.echo_worker)
        except Timeout:
            log.info('timeout while polling for events')
        finally:
            if locked:
                self.lock.release()

    def echo_worker(self):
        """ The `echo_worker` works through the `self.received_transfers` queue and spawns
        `self.on_transfer` greenlets for all not-yet-seen transfers. """
        log.debug('echo worker', qsize=self.received_transfers.qsize())
        while self.stop_signal is None:
            if self.received_transfers.qsize() > 0:
                transfer = self.received_transfers.get()
                if transfer in self.seen_transfers:
                    log.debug(
                        'duplicate transfer ignored',
                        initiator=pex(transfer.initiator),
                        amount=transfer.amount,
                        identifier=transfer.identifier,
                    )
                else:
                    self.seen_transfers.append(transfer)
                    self.greenlets.append(
                        gevent.spawn(self.on_transfer, transfer))
            else:
                gevent.sleep(.5)

    def on_transfer(self, transfer):
        """ This handles the echo logic, as described in
        https://github.com/raiden-network/raiden/issues/651:

            - for transfers with an amount that satisfies `amount % 3 == 0`, it sends a transfer
            with an amount of `amount - 1` back to the initiator
            - for transfers with a "lucky number" amount `amount == 7` it does not send anything
            back immediately -- after having received "lucky number transfers" from 7 different
            addresses it sends a transfer with `amount = 49` to one randomly chosen one
            (from the 7 lucky addresses)
            - consecutive entries to the lucky lottery will receive the current pool size as the
            `echo_amount`
            - for all other transfers it sends a transfer with the same `amount` back to the
            initiator """
        echo_amount = 0
        if transfer.amount % 3 == 0:
            log.info(
                'ECHO amount - 1',
                initiator=pex(transfer.initiator),
                amount=transfer.amount,
                identifier=transfer.identifier,
            )
            echo_amount = transfer.amount - 1

        elif transfer.amount == 7:
            log.info(
                'ECHO lucky number draw',
                initiator=pex(transfer.initiator),
                amount=transfer.amount,
                identifier=transfer.identifier,
                poolsize=self.lottery_pool.qsize(),
            )

            # obtain a local copy of the pool
            pool = self.lottery_pool.copy()
            tickets = [pool.get() for _ in range(pool.qsize())]
            assert pool.empty()
            del pool

            if any(ticket.initiator == transfer.initiator
                   for ticket in tickets):
                assert transfer not in tickets
                log.debug(
                    'duplicate lottery entry',
                    initiator=pex(transfer.initiator),
                    identifier=transfer.identifier,
                    poolsize=len(tickets),
                )
                # signal the poolsize to the participant
                echo_amount = len(tickets)

            # payout
            elif len(tickets) == 6:
                log.info('payout!')
                # reset the pool
                assert self.lottery_pool.qsize() == 6
                self.lottery_pool = Queue()
                # add new participant
                tickets.append(transfer)
                # choose the winner
                transfer = random.choice(tickets)
                echo_amount = 49
            else:
                self.lottery_pool.put(transfer)

        else:
            log.debug(
                'echo transfer received',
                initiator=pex(transfer.initiator),
                amount=transfer.amount,
                identifier=transfer.identifier,
            )
            echo_amount = transfer.amount

        if echo_amount:
            log.debug(
                'sending echo transfer',
                target=pex(transfer.initiator),
                amount=echo_amount,
                orig_identifier=transfer.identifier,
                echo_identifier=transfer.identifier + echo_amount,
                token_address=pex(self.token_address),
                num_handled_transfers=self.num_handled_transfers + 1,
            )

            self.api.transfer(
                self.api.raiden.default_registry.address,
                self.token_address,
                echo_amount,
                transfer.initiator,
                identifier=transfer.identifier + echo_amount,
            )
        self.num_handled_transfers += 1

    def stop(self):
        self.stop_signal = True
        self.greenlets.append(self.echo_worker_greenlet)
        gevent.joinall(self.greenlets, raise_error=True)
Пример #22
0
    class WorkloadRecord:
        def __init__(self, workload_id, workload):
            self.time_start = None
            self.time_last_report = None
            self.workload_id = workload_id
            self.workload = workload
            self.query_count = 0
            self.num_finished_jobs = 0
            self.ready_drivers = 0
            self.dispatch_completed = False
            self.counter_success = 0
            self.counter_failure = 0
            self.statistics = {}
            self.timeline_completion = {}
            self.timeline_failure = {}
            self.logger = logging.getLogger(__name__)
            self.statistics_lock = BoundedSemaphore(1)

        def start(self):
            self.logger.info("* jobs started")
            self.time_start = time.time()

        def get_workload(self):
            return self.workload

        def add_workload(self, num_queries):
            self.query_count += num_queries

        def completed_dispatch(self):
            self.logger.info("@ job dispatch completed")
            self.dispatch_completed = True

        def report_completion(self, report):
            self.time_last_report = time.time()
            self.num_finished_jobs += 1
            item_id = report['item']
            report.pop('item', None)
            self.statistics_lock.acquire()
            before_length = len(self.statistics)
            self.statistics[item_id] = report
            #self.logger.info("on report: {}, before={}, after={}".format(item_id, before_length, len(self.statistics)))
            self.statistics_lock.release()
            timeslot = int(self.time_last_report - self.time_start) + 1
            if report['success']:
                self.counter_success += 1
                if timeslot not in self.timeline_completion:
                    self.timeline_completion[timeslot] = 0
                self.timeline_completion[timeslot] += 1
            else:
                self.counter_failure += 1
                if timeslot not in self.timeline_failure:
                    self.timeline_failure[timeslot] = 0
                self.timeline_failure[timeslot] += 1

        def is_completed(self):
            """jobs may all complete before dispatch finish"""
            self.logger.info("# dispatch completed: %s",
                             self.dispatch_completed)
            self.logger.info("@ num_queries={}, num_finished_jobs={}".format(
                self.query_count, self.num_finished_jobs))
            return self.dispatch_completed and (self.query_count
                                                == self.num_finished_jobs)

        def is_started(self):
            return self.time_start is not None

        def get_report(self):
            return {
                "num_finished_jobs":
                self.num_finished_jobs,
                "num_successful_jobs":
                self.counter_success,
                "num_failed_jobs":
                self.counter_failure,
                "elapsed_time":
                self.time_last_report -
                self.time_start if self.is_completed() else time.time() -
                self.time_start
            }

        def get_statistics(self):
            self.logger.info("## total reported jobs: {}".format(
                len(self.statistics)))
            return self.statistics

        def get_timeline_completion(self):
            return self.timeline_completion

        def get_timeline_failure(self):
            return self.timeline_failure
Пример #23
0
    def __init__(self):
        from gevent.lock import BoundedSemaphore

        self.__logger_sem = BoundedSemaphore(1)
        self.__logger = None
Пример #24
0
class LoggerMixin(object):
    def __init__(self):
        from gevent.lock import BoundedSemaphore

        self.__logger_sem = BoundedSemaphore(1)
        self.__logger = None

    def _get_logger(self):
        if not self.__logger:
            try:
                self.__logger_sem.acquire()
                if not self.__logger:
                    self.__logger = self.__init_logger()
            finally:
                self.__logger_sem.release()

        return self.__logger

    logger = property(_get_logger, doc="Get the logger of the engine")

    def log(self, msg, level=logging.INFO):
        self.logger.log(level, msg)

    def __init_logger(self):
        import argparse

        parser = argparse.ArgumentParser()
        parser.add_argument('--quiet', action='store_true')
        parser.add_argument('--log2file', action='store_true')
        parser.add_argument('--debug', action='store_true')
        parser.add_argument('--logpath', type=str)
        args, leftovers = parser.parse_known_args()

        if 'logging' not in dhaulagiri_settings:
            dhaulagiri_settings['logging'] = {}

        if args.log2file:
            dhaulagiri_settings['logging']['write_to_file'] = True
        if args.quiet:
            dhaulagiri_settings['logging']['write_to_stream'] = False
        if args.debug:
            dhaulagiri_settings['logging']['log_level'] = logging.DEBUG
        if args.logpath:
            dhaulagiri_settings['logging']['log_path'] = args.logpath

        import os
        from logging.handlers import TimedRotatingFileHandler
        from logging import StreamHandler, Formatter

        name = getattr(self, 'name', 'general_logger')

        # Set up a specific logger with our desired output level
        from hashlib import md5
        from random import randint
        import sys

        sig = md5('%d' % randint(0, sys.maxint)).hexdigest()[:8]
        logger = logging.getLogger('%s-%s' % (name, sig))

        handler_list = []
        if dhaulagiri_settings['logging']['write_to_stream']:
            handler_list.append(StreamHandler())
        if dhaulagiri_settings['logging']['write_to_file']:
            log_path = os.path.abspath(dhaulagiri_settings['logging']['log_path'])

            try:
                os.mkdir(log_path)
            except OSError:
                pass

            log_file = os.path.normpath(os.path.join(log_path, '%s.log' % name))
            handler = TimedRotatingFileHandler(log_file, when='D', interval=1, encoding='utf-8')
            handler_list.append(handler)

        log_level = dhaulagiri_settings['logging']['log_level']
        formatter = Formatter(fmt='%(asctime)s [%(name)s] [%(threadName)s] %(levelname)s: %(message)s',
                              datefmt='%Y-%m-%d %H:%M:%S%z')

        if not handler_list:
            handler_list.append(logging.NullHandler())
        for handler in handler_list:
            handler.setLevel(log_level)
            handler.setFormatter(formatter)
            logger.addHandler(handler)

        logger.setLevel(log_level)

        return logger
Пример #25
0
class CrispinConnectionPool(object):
    """
    Connection pool for Crispin clients.

    Connections in a pool are specific to an IMAPAccount.

    Parameters
    ----------
    account_id : int
        Which IMAPAccount to open up a connection to.
    num_connections : int
        How many connections in the pool.
    readonly : bool
        Is the connection to the IMAP server read-only?
    """
    def __init__(self, account_id, num_connections, readonly):
        log.info('Creating Crispin connection pool for account {} with {} '
                 'connections'.format(account_id, num_connections))
        self.account_id = account_id
        self.readonly = readonly
        self._queue = Queue(num_connections, items=num_connections * [None])
        self._sem = BoundedSemaphore(num_connections)
        self._set_account_info()

    @contextlib.contextmanager
    def get(self):
        """ Get a connection from the pool, or instantiate a new one if needed.
        If `num_connections` connections are already in use, block until one is
        available.
        """
        # A gevent semaphore is granted in the order that greenlets tried to
        # acquire it, so we use a semaphore here to prevent potential
        # starvation of greenlets if there is high contention for the pool.
        # The queue implementation does not have that property; having
        # greenlets simply block on self._queue.get(block=True) could cause
        # individual greenlets to block for arbitrarily long.
        self._sem.acquire()
        client = self._queue.get()
        try:
            if client is None:
                client = self._new_connection()
            yield client
        except CONN_DISCARD_EXC_CLASSES as exc:
            # Discard the connection on socket or IMAP errors. Technically this
            # isn't always necessary, since if you got e.g. a FETCH failure you
            # could reuse the same connection. But for now it's the simplest
            # thing to do.
            log.info('IMAP connection error; discarding connection',
                     exc_info=True)
            if client is not None:
                try:
                    client.logout()
                except:
                    log.error('Error on IMAP logout', exc_info=True)
                client = None
            raise exc
        except:
            raise
        finally:
            self._queue.put(client)
            self._sem.release()

    def _set_account_info(self):
        with session_scope() as db_session:
            account = db_session.query(Account).get(self.account_id)
            self.sync_state = account.sync_state
            self.provider_info = account.provider_info
            self.email_address = account.email_address
            self.auth_handler = account.auth_handler
            if account.provider == 'gmail':
                self.client_cls = GmailCrispinClient
            elif (getattr(account, 'supports_condstore', None) or
                  account.provider_info.get('condstore')):
                self.client_cls = CondStoreCrispinClient
            else:
                self.client_cls = CrispinClient

    def _new_connection(self):
        try:
            with session_scope() as db_session:
                account = db_session.query(Account).get(self.account_id)
                conn = self.auth_handler.connect_account(account)
                # If we can connect the account, then we can set the state
                # to 'running' if it wasn't already
                if self.sync_state != 'running':
                    self.sync_state = account.sync_state = 'running'
            return self.client_cls(self.account_id, self.provider_info,
                                   self.email_address, conn,
                                   readonly=self.readonly)
        except ValidationError, e:
            log.error('Error validating',
                      account_id=self.account_id,
                      logstash_tag='mark_invalid')
            with session_scope() as db_session:
                account = db_session.query(Account).get(self.account_id)
                account.mark_invalid()
                account.update_sync_error(str(e))
            raise
Пример #26
0
namespace = "R:" #sys.argv[2]

prefix = namespace + (("{" + group + "}:") if group else "") + id + ":"
inbox = prefix + "inbox"
#print(inbox)
pipeline = prefix + "pipeline"
channelKey = "__keyspace@0__:" + inbox
resultKey = prefix + "asyncdictionary:"

lua = """
        local result = redis.call('RPOP', KEYS[1])
        if result ~= nil then
            redis.call('HSET', KEYS[2], KEYS[3], result)
        end
        return result"""
concurrency_semaphore = BoundedSemaphore(max_concurrency)
notification_semaphore = BoundedSemaphore(1)

def release():
    #print("received notification")
    try:
        if concurrency_semaphore.counter == 0:
            concurrency_semaphore.release()
            #print("released semaphore")
    except:
        pass

def notify(m = None):
    try:
        if (m['data'] == 'lpush' or  m['data'] == 'rpush') and notification_semaphore.counter == 0:
            #print("received notification") #  + str(m))
Пример #27
0
class DiscoveryZkClient(object):

    def __init__(self, discServer, zk_srv_ip='127.0.0.1',
                 zk_srv_port='2181', reset_config=False):
        self._reset_config = reset_config
        self._service_id_to_type = {}
        self._ds = discServer
        self._zk_sem = BoundedSemaphore(1)
        self._election = None
        self._restarting = False

        zk_endpts = []
        for ip in zk_srv_ip.split(','):
            zk_endpts.append('%s:%s' %(ip, zk_srv_port))

        # logging
        logger = logging.getLogger('discovery-service')
        logger.setLevel(logging.WARNING)
        handler = logging.handlers.RotatingFileHandler('/var/log/contrail/discovery_zk.log', maxBytes=1024*1024, backupCount=10)
        log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
        handler.setFormatter(log_format)
        logger.addHandler(handler)

        self._zk = kazoo.client.KazooClient(
            hosts=','.join(zk_endpts),
            handler=kazoo.handlers.gevent.SequentialGeventHandler(),
            logger=logger)
        self._logger = logger

        # connect
        self.connect()

        if reset_config:
            self.delete_node("/services", recursive=True)
            self.delete_node("/clients", recursive=True)
            self.delete_node("/election", recursive=True)

        # create default paths
        self.create_node("/services")
        self.create_node("/clients")
        self.create_node("/election")

        self._debug = {
            'subscription_expires': 0,
            'oos_delete': 0,
            'db_excepts': 0,
        }
    # end __init__

    # Discovery server used for syslog, cleanup etc
    def set_ds(self, discServer):
        self._ds = discServer
    # end set_ds

    def is_restarting(self):
        return self._restarting
    # end is_restarting

    # restart
    def restart(self):
        self._zk_sem.acquire()
        self._restarting = True
        self.syslog("restart: acquired lock; state %s " % self._zk.state)
        # initiate restart if our state is suspended or lost
        if self._zk.state != "CONNECTED":
            self.syslog("restart: starting ...")
            try:
                self._zk.stop() 
                self._zk.close() 
                self._zk.start() 
                self.syslog("restart: done")
            except:
                e = sys.exc_info()[0]
                self.syslog('restart: exception %s' % str(e))
        self._restarting = False
        self._zk_sem.release()

    # start 
    def connect(self):
        while True:
            try:
                self._zk.start()
                break
            except gevent.event.Timeout as e:
                self.syslog(
                    'Failed to connect with Zookeeper -will retry in a second')
                gevent.sleep(1)
            # Zookeeper is also throwing exception due to delay in master election
            except Exception as e:
                self.syslog('%s -will retry in a second' % (str(e)))
                gevent.sleep(1)
        self.syslog('Connected to ZooKeeper!')
    # end

    def start_background_tasks(self):
        # spawn loop to expire subscriptions
        gevent.Greenlet.spawn(self.inuse_loop)

        # spawn loop to expire services
        gevent.Greenlet.spawn(self.service_oos_loop)
    # end

    def syslog(self, log_msg):
        if self._logger is None:
            return
        self._logger.info(log_msg)
    # end

    def get_debug_stats(self):
        return self._debug
    # end

    def _zk_listener(self, state):
        if state == "CONNECTED":
            self._election.cancel()
    # end

    def _zk_election_callback(self, func, *args, **kwargs):
        self._zk.remove_listener(self._zk_listener)
        func(*args, **kwargs)
    # end

    def master_election(self, path, identifier, func, *args, **kwargs):
        self._zk.add_listener(self._zk_listener)
        while True:
            self._election = self._zk.Election(path, identifier)
            self._election.run(self._zk_election_callback, func, *args, **kwargs)
    # end master_election

    def create_node(self, path, value='', makepath=True, sequence=False):
        value = str(value)
        while True:
            try:
                return self._zk.set(path, value)
            except kazoo.exceptions.NoNodeException:
                self.syslog('create %s' % (path))
                return self._zk.create(path, value, makepath=makepath, sequence=sequence)
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()
    # end create_node

    def get_children(self, path):
        while True:
            try:
                return self._zk.get_children(path)
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()
            except Exception:
                return []
    # end get_children

    def read_node(self, path):
        while True:
            try:
                data, stat = self._zk.get(path)
                return data,stat
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()
            except kazoo.exceptions.NoNodeException:
                self.syslog('exc read: node %s does not exist' % path)
                return (None, None)
    # end read_node

    def delete_node(self, path, recursive=False):
        while True:
            try:
                return self._zk.delete(path, recursive=recursive)
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()
            except kazoo.exceptions.NoNodeException:
                self.syslog('exc delete: node %s does not exist' % path)
                return None
    # end delete_node

    def exists_node(self, path):
        while True:
            try:
                return self._zk.exists(path)
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()
    # end exists_node

    def service_entries(self):
        service_types = self.get_children('/services')
        for service_type in service_types:
            services = self.get_children('/services/%s' % (service_type))
            for service_id in services:
                data, stat = self.read_node(
                    '/services/%s/%s' % (service_type, service_id))
                entry = json.loads(data)
                yield(entry)

    def subscriber_entries(self):
        service_types = self.get_children('/clients')
        for service_type in service_types:
            subscribers = self.get_children('/clients/%s' % (service_type))
            for client_id in subscribers:
                cl_entry = self.lookup_client(service_type, client_id)
                if cl_entry:
                    yield((client_id, service_type))
    # end

    def update_service(self, service_type, service_id, data):
        path = '/services/%s/%s' % (service_type, service_id)
        self.create_node(path, value=json.dumps(data), makepath=True)
    # end

    def insert_service(self, service_type, service_id, data):

        # ensure election path for service type exists
        path = '/election/%s' % (service_type)
        self.create_node(path)

        # preclude duplicate service entry
        sid_set = set()

        # prevent background task from deleting node under our nose
        seq_list = self.get_children(path)
        # data for election node is service ID
        for sequence in seq_list:
            sid, stat = self.read_node(
                '/election/%s/%s' % (service_type, sequence))
            if sid is not None:
                sid_set.add(sid)
        if not service_id in sid_set:
            path = '/election/%s/node-' % (service_type)
            pp = self.create_node(
                path, service_id, makepath=True, sequence=True)
            pat = path + "(?P<id>.*$)"
            mch = re.match(pat, pp)
            seq = mch.group('id')
            data['sequence'] = seq
            self.syslog('ST %s, SID %s not found! Added with sequence %s' %
                        (service_type, service_id, seq))
    # end insert_service

    # forget service and subscribers
    def delete_service(self, service_type, service_id, recursive = False):
        #if self.lookup_subscribers(service_type, service_id):
        #    return

        path = '/services/%s/%s' %(service_type, service_id)
        self.delete_node(path, recursive = recursive)

        # delete service node if all services gone
        path = '/services/%s' %(service_type)
        if self.get_children(path):
            return
        self.delete_node(path)
     #end delete_service

    def lookup_service(self, service_type, service_id=None):
        if not self.exists_node('/services/%s' % (service_type)):
            return None
        if service_id:
            data = None
            path = '/services/%s/%s' % (service_type, service_id)
            datastr, stat = self.read_node(path)
            if datastr:
                data = json.loads(datastr)
                clients = self.get_children(path)
                data['in_use'] = len(clients)
            return data
        else:
            r = []
            services = self.get_children('/services/%s' % (service_type))
            for service_id in services:
                entry = self.lookup_service(service_type, service_id)
                r.append(entry)
            return r
    # end lookup_service

    def query_service(self, service_type):
        path = '/election/%s' % (service_type)
        if not self.exists_node(path):
            return None
        seq_list = self.get_children(path)
        seq_list = sorted(seq_list)

        r = []
        for sequence in seq_list:
            service_id, stat = self.read_node(
                '/election/%s/%s' % (service_type, sequence))
            entry = self.lookup_service(service_type, service_id)
            r.append(entry)
        return r
    # end

    # TODO use include_data available in new versions of kazoo
    # tree structure /services/<service-type>/<service-id>
    def get_all_services(self):
        r = []
        service_types = self.get_children('/services')
        for service_type in service_types:
            services = self.lookup_service(service_type)
            r.extend(services)
        return r
    # end

    def insert_client(self, service_type, service_id, client_id, blob, ttl):
        data = {'ttl': ttl, 'blob': blob}

        path = '/services/%s/%s/%s' % (service_type, service_id, client_id)
        self.create_node(path, value=json.dumps(data))

        path = '/clients/%s/%s/%s' % (service_type, client_id, service_id)
        self.create_node(path, value=json.dumps(data), makepath=True)
    # end insert_client

    def lookup_subscribers(self, service_type, service_id):
        path = '/services/%s/%s' % (service_type, service_id)
        if not self.exists_node(path):
            return None
        clients = self.get_children(path)
        return clients
    # end lookup_subscribers

    def lookup_client(self, service_type, client_id):
        try:
            datastr, stat = self.read_node(
                '/clients/%s/%s' % (service_type, client_id))
            data = json.loads(datastr) if datastr else None
        except ValueError:
            self.syslog('raise ValueError st=%s, cid=%s' %(service_type, client_id))
            data = None
        return data
    # end lookup_client

    def insert_client_data(self, service_type, client_id, cldata):
        path = '/clients/%s/%s' % (service_type, client_id)
        self.create_node(path, value=json.dumps(cldata), makepath=True)
    # end insert_client_data

    def lookup_subscription(self, service_type, client_id=None,
                            service_id=None, include_meta=False):
        if not self.exists_node('/clients/%s' % (service_type)):
            return None
        if client_id and service_id:
            try:
                datastr, stat = self.read_node(
                    '/clients/%s/%s/%s'
                    % (service_type, client_id, service_id))
                data = json.loads(datastr)
                blob = data['blob']
                if include_meta:
                    return (blob, stat, data['ttl'])
                else:
                    return blob
            except kazoo.exceptions.NoNodeException:
                return None
        elif client_id:
            # our version of Kazoo doesn't support include_data :-(
            try:
                services = self.get_children(
                    '/clients/%s/%s' % (service_type, client_id))
                r = []
                for service_id in services:
                    datastr, stat = self.read_node(
                        '/clients/%s/%s/%s'
                        % (service_type, client_id, service_id))
                    if datastr:
                        data = json.loads(datastr)
                        blob = data['blob']
                        r.append((service_id, blob, stat))
                # sort services in the order of assignment to this client
                # (based on modification time)
                rr = sorted(r, key=lambda entry: entry[2].last_modified)
                return [(service_id, blob) for service_id, blob, stat in rr]
            except kazoo.exceptions.NoNodeException:
                return None
        else:
            clients = self.get_children('/clients/%s' % (service_type))
            return clients
    # end lookup_subscription

    # delete client subscription. Cleanup path if possible
    def delete_subscription(self, service_type, client_id, service_id):
        path = '/clients/%s/%s/%s' % (service_type, client_id, service_id)
        self.delete_node(path)

        path = '/services/%s/%s/%s' % (service_type, service_id, client_id)
        self.delete_node(path)

        # delete client node if all subscriptions gone
        path = '/clients/%s/%s' % (service_type, client_id)
        if self.get_children(path):
            return
        self.delete_node(path)

        # purge in-memory cache - ideally we are not supposed to know about
        # this
        self._ds.delete_sub_data(client_id, service_type)

        # delete service node if all clients gone
        path = '/clients/%s' % (service_type)
        if self.get_children(path):
            return
        self.delete_node(path)
    # end

    # TODO use include_data available in new versions of kazoo
    # tree structure /clients/<service-type>/<client-id>/<service-id>
    # return tuple (service_type, client_id, service_id)
    def get_all_clients(self):
        r = []
        service_types = self.get_children('/clients')
        for service_type in service_types:
            clients = self.get_children('/clients/%s' % (service_type))
            for client_id in clients:
                services = self.get_children(
                    '/clients/%s/%s' % (service_type, client_id))
                rr = []
                for service_id in services:
                    (datastr, stat, ttl) = self.lookup_subscription(
                        service_type, client_id, service_id, include_meta=True)
                    rr.append(
                        (service_type, client_id, service_id,
                         stat.last_modified, ttl))
                rr = sorted(rr, key=lambda entry: entry[3])
                r.extend(rr)
        return r
    # end get_all_clients

    # reset in-use count of clients for each service
    def inuse_loop(self):
        while True:
            service_types = self.get_children('/clients')
            for service_type in service_types:
                clients = self.get_children('/clients/%s' % (service_type))
                for client_id in clients:
                    services = self.get_children(
                        '/clients/%s/%s' % (service_type, client_id))
                    for service_id in services:
                        path = '/clients/%s/%s/%s' % (
                            service_type, client_id, service_id)
                        datastr, stat = self.read_node(path)
                        data = json.loads(datastr)
                        now = time.time()
                        exp_t = stat.last_modified + data['ttl'] +\
                            disc_consts.TTL_EXPIRY_DELTA
                        if now > exp_t:
                            self.delete_subscription(
                                service_type, client_id, service_id)
                            self.syslog(
                                'Expiring st:%s sid:%s cid:%s'
                                % (service_type, service_id, client_id))
                            self._debug['subscription_expires'] += 1
            gevent.sleep(10)

    def service_oos_loop(self):
        if self._ds._args.hc_interval <= 0:
            return

        while True:
            for entry in self.service_entries():
                if not self._ds.service_expired(entry, include_down=False):
                    continue
                service_type = entry['service_type']
                service_id   = entry['service_id']
                path = '/election/%s/node-%s' % (
                    service_type, entry['sequence'])
                if not self.exists_node(path):
                    continue
                self.syslog('Deleting sequence node %s for service %s:%s' %
                        (path, service_type, service_id))
                self.delete_node(path)
                entry['sequence'] = -1
                self.update_service(service_type, service_id, entry)
                self._debug['oos_delete'] += 1
            gevent.sleep(self._ds._args.hc_interval)
Пример #28
0
                      dest="password",
                      type="string",
                      help="password to login on the OVC api")
    parser.add_option("-e",
                      "--env",
                      dest="environment",
                      type="string",
                      help="environment to login on the OVC api")
    parser.add_option("-n",
                      "--con",
                      dest="concurrency",
                      default=2,
                      type="int",
                      help="amount of concurrency to execute the job")
    parser.add_option("-a",
                      "--application_id",
                      dest="application_id",
                      help="itsyouonline Application Id")
    parser.add_option("-s",
                      "--secret",
                      dest="secret",
                      help="itsyouonline Secret")

    (options, args) = parser.parse_args()
    if not options.username or not options.environment:
        parser.print_usage()
    else:
        concurrency = BoundedSemaphore(options.concurrency)
        gevent.signal(signal.SIGQUIT, gevent.kill)
        main(options)
Пример #29
0
class ProcessorEngine(LoggerMixin):
    name = 'processor_engine'

    # Singleton
    __lock = BoundedSemaphore(1)

    __instance = None

    def _get_redis_cli(self):
        return self._redis_client

    redis_cli = property(_get_redis_cli)

    @classmethod
    def get_instance(cls):
        if not cls.__instance:
            try:
                cls.__lock.acquire()
                if not cls.__instance:
                    cls.__instance = ProcessorEngine()
            finally:
                cls.__lock.release()

        return cls.__instance

    @staticmethod
    def reg_processors(proc_dir=None):
        """
        将processors路径下的processor类进行注册
        """
        import os
        import imp
        import types

        if not proc_dir:
            root_dir = os.path.normpath(os.path.split(__file__)[0])
            proc_dir = os.path.normpath(os.path.join(root_dir, 'processors'))

        processor_dict = {}

        for cur, d_list, f_list in os.walk(proc_dir):
            for f in f_list:
                f = os.path.normpath(os.path.join(cur, f))
                tmp, ext = os.path.splitext(f)
                if ext != '.py':
                    continue
                p, fname = os.path.split(tmp)

                try:
                    ret = imp.find_module(fname, [p]) if p else imp.find_module(fname)
                    mod = imp.load_module(fname, *ret)

                    for attr_name in dir(mod):
                        try:
                            target_cls = getattr(mod, attr_name)
                            name = getattr(target_cls, 'name')
                            func = getattr(target_cls, 'run')
                            if isinstance(name, str) and isinstance(func, types.MethodType):
                                processor_dict[name] = target_cls
                            else:
                                continue
                        except (TypeError, AttributeError):
                            pass
                except ImportError:
                    print 'Import error: %s' % fname
                    raise

        return processor_dict

    @staticmethod
    def parse_tracking():
        # Base argument parser
        import argparse

        parser = argparse.ArgumentParser()
        parser.add_argument('--track', action='store_true')
        # task tracking的有效期。支持以下格式1d, 1h, 1m, 1s
        parser.add_argument('--track-exp', default=None, type=str)
        args, leftover = parser.parse_known_args()

        tracker_settings = dhaulagiri_settings['task_tracker']
        tracker_settings['tracking'] = args.track
        if not tracker_settings['tracking']:
            return

        if args.track_exp:
            match = re.search(r'([\d\.]+)(\w)', args.track_exp)
            val = float(match.group(1))
            unit = match.group(2)

            expire = None
            if unit == 'd':
                expire = val * 3600 * 24
            elif unit == 'h':
                expire = val * 3600
            elif unit == 'm':
                expire = val * 60
            elif unit == 's':
                expire = val

            if expire:
                tracker_settings['expire'] = expire

        return TaskTrackerFactory.get_instance()

    def __init__(self):
        from utils import load_yaml

        self.settings = load_yaml()

        LoggerMixin.__init__(self)

        self._redis_client = RedisClient('engine')

        # 获得TaskTracker
        self.task_tracker = self.parse_tracking()

        self.request = RequestHelper.from_engine(self)
        self.middleware_manager = MiddlewareManager.from_engine(self)

        self.processor_store = self.reg_processors()
        self.processors = {}

        self.log('Engine init completed')

    @staticmethod
    def _init_redis():
        import redis
        from utils import load_yaml

        cfg = load_yaml()
        redis_conf = filter(lambda v: v['profile'] == 'task-track', cfg['redis'])[0]
        host = redis_conf['host']
        port = int(redis_conf['port'])

        return redis.StrictRedis(host=host, port=port, db=0)

    def add_processor(self, name):
        if name not in self.processor_store:
            self.logger.critical('Cannot find processor: %s' % name)
            return

        processor = self.processor_store[name].from_engine(self)

        if name not in self.processors:
            self.processors[name] = []
        self.processors[name].append(processor)
        self.log('Added processor %s' % name)

    def start(self):
        self.log('Starting engine...')

        for processor_list in self.processors.values():
            for processor in processor_list:
                self.log('Starting processor %s' % processor.name)
                processor.run()
                self.log('Cleaning up processor %s' % processor.name)

        self.log('Cleaning up engine...')
Пример #30
0
class ConnectionPool(object):
    """dynamic service connection pool"""

    def __init__(self, server_node, iface_cls, config):

        self._section_name = utils.get_module(__name__)
        self._logger = logging.getLogger(__name__)
        self._host = server_node.split(":")[0]
        self._port = int(server_node.split(":")[1])
        self._iface_cls = iface_cls

        self._get_conn_timeout = config.getint(self._section_name, "pool_timeout",
                                               default=settings.DEFAULT_POOL_TIMEOUT)
        self._socket_timeout = config.getint(self._section_name, "request_timeout",
                                             default=settings.DEFAULT_REQUEST_TIMEOUT) * 1000
        self._size = config.getint(self._section_name, "pool_size", default=settings.DEFAULT_POOL_SIZE)

        self._c_module_serialize = config.getboolean(self._section_name, "c_module_serialize",
                                                     default=settings.USE_C_MODULE_SERIALIZE)

        self._closed = False
        if ASYNC_TAG:
            from gevent.lock import BoundedSemaphore
            from gevent import queue as Queue
            self._semaphore = BoundedSemaphore(self._size)
            self._connection_queue = Queue.LifoQueue(self._size)
            self._QueueEmpty = Queue.Empty
        else:
            from threading import BoundedSemaphore
            import Queue
            self._semaphore = BoundedSemaphore(self._size)
            self._connection_queue = Queue.LifoQueue(self._size)
            self._QueueEmpty = Queue.Empty

    def close(self):
        self._closed = True
        while not self._connection_queue.empty():
            try:
                conn = self._connection_queue.get(block=False)
                try:
                    self._close_connection(conn)
                except:
                    pass
            except self._QueueEmpty:
                pass

    def _create_connection(self):
        self._logger.debug("create a new connection ip:%s port:%s" %(self._host, self._port))
        socket_ = TSocket.TSocket(self._host, self._port)
        if self._socket_timeout > 0:
            socket_.setTimeout(self._socket_timeout)
        transport = TTransport.TBufferedTransport(socket_)
        if self._c_module_serialize:
            protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
        else:
            protocol = TBinaryProtocol.TBinaryProtocol(transport)
        connection = self._iface_cls(protocol)
        transport.open()
        return connection

    def _close_connection(self, conn):
        try:
            conn._iprot.trans.close()
        except:
            pass
        try:
            conn._oprot.trans.close()
        except:
            pass

    def get_connection(self):
        """ get a connection from the pool. This blocks until one is available."""
        self._semaphore.acquire()
        if self._closed:
            raise RuntimeError('connection pool closed')
        try:
            return self._connection_queue.get(block=False)
        except self._QueueEmpty:

            try:
                return self._create_connection()
            except Exception as e:
                self._semaphore.release()
                raise e

    def return_connection(self, conn):
        """ return a connection to the pool."""
        if self._closed:
            self._close_connection(conn)
            return
        self._connection_queue.put(conn)
        self._semaphore.release()

    def release_connection(self, conn):
        """ call when the connect is no usable anymore"""
        try:

            self._close_connection(conn)
        except:
            pass
        if not self._closed:
            self._semaphore.release()

    def release_all_connection(self):
        """ call when the all connect in pool is no usable anymore"""
        while not self._connection_queue.empty():
            try:
                conn = self._connection_queue.get(block=False)
                try:
                    self._close_connection(conn)
                except:
                    pass
            except self._QueueEmpty:
                pass
Пример #31
0
    -h                  show help

Example:
    dirbuster.py http://sh3ll.me/www/ PHP.txt
    dirbuster.py http://sh3ll.me/www/ PHP.txt -t 30

"""
from gevent import monkey
from gevent.lock import BoundedSemaphore
from gevent.pool import Pool
monkey.patch_all()

import requests
from docopt import docopt

lock = BoundedSemaphore()
results = []


def judge(url, keyword=None):
    """
    判断是否存在文件
    :param url: 链接
    :type url: str
    :param keyword: 404 页面关键字
    :type keyword: str
    """
    headers = {
        "User-Agent":
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1)"
        " AppleWebKit/537.36 (KHTML, like Gecko) Chrome/"
Пример #32
0
class GearSheetPlugin(Plugin):
    session = ""
    conn = None
    WEAPON_TALENTS = 'weapontalents'
    PLAYER_TALENTS = 'playertalents'
    GEAR_TALENTS = 'geartalents'
    GEAR_SETS = 'gearsets'
    WEAPONS = 'weapons'
    WEAPON_MODS = 'weaponmods'
    EXOTIC_GEARS = 'exoticgears'
    GEAR_ATTRIBUTES = 'gearattributes'

    names = {}
    vendor_names = {}
    logger = None
    lock = None

    def __init__(self, bot, config):
        super().__init__(bot, config)

        print('Logging in to backend api...')
        login_params = json.dumps({'username': '******', 'password': '******', 'appcode': 'gearsheet'})
        conn = http.client.HTTPConnection("localhost:9000")

        conn.request('POST', '/login', login_params, {'Content-Type': 'application/json'})
        login_response = conn.getresponse()
        login_response = json.loads(login_response.read().decode('utf-8'))

        if login_response['result'] != 'ok':
            print("Login to baasbox failed")
            return

        print('Login successful.')
        self.session = login_response['data']["X-BB-SESSION"]

        # get a list of all indexed names
        params = urllib.parse.urlencode({'fields': 'name'})
        conn.request('GET', '/document/indexes?%s' % params, headers={'X-BB-SESSION': self.session})
        res = json.loads(conn.getresponse().read().decode('utf-8'))
        self.names = {i['name'] for i in res['data']}

        conn.close()

        vendors_param = { "fields": "name" }
        response = requests.get(BACKEND_HOST + '/document/vendors-indexes', params=vendors_param, headers={SESSION_HEADER: self.session})
        self.vendor_names = {i['name'] for i in response.json()['data']}

        # init logging
        self.logger = logging.getLogger('gearsheet_bot')
        self.logger.setLevel(logging.INFO)

        fh = logging.FileHandler('access.log')
        fh.setLevel(logging.INFO)

        formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')
        fh.setFormatter(formatter)

        self.logger.addHandler(fh)
        
        self.lock = BoundedSemaphore(1)

        #print(self.state.guilds)

    #in a plugin class
    @Plugin.listen('Ready')
    def on_ready(self, event):
        self.client.update_presence(Status.online, Game(type=GameType.default, name='!help for guide'))

    @Plugin.command('help')
    def command_help(self, event):
        skip = { 264445053596991498 : "discord bot list"}
                                
        if event.guild.id in skip.keys():
            return
        
        self.log_it(event, "", "help")
        event.msg.reply('Usage Guide <https://docs.google.com/document/d/1G1d1oj0qDbv6yf7EOEkGAHN40OqsdkXRrxCHo_3QC2w/view>')

    @Plugin.command('ping')
    def command_ping(self, event):
        event.msg.reply("Pong!")

    @Plugin.command('servers')
    def command_servers(self, event):
        if event.author.id in [195168390476726272]:
            event.msg.reply("I am currently installed on %s servers" % len(self.state.guilds))
    
    @Plugin.command('nicknames')
    def command_nicknames(self, event):
        self.log_it(event, "", "nicknames")
        event.msg.reply("<https://goo.gl/Brh1TF>")

    @Plugin.command('add')
    def command_add(self, event):
        pass

    def log_it(self, event, param, command):
        if event.author.id not in [195168390476726272]:
            self.logger.info("%s - %s - %s - %s - %s" % (command, str(event.author).replace(" ", "_"), event.guild.name.replace(" ", "_"), event.guild.id, param))

    @Plugin.command('g')
    @Plugin.command('s')
    @Plugin.command('sheet')
    @Plugin.command('gearsheet')
    def command_talents(self, event):
        if len(event.args) > 0:
            param = ' '.join(event.args).lower()

            if param == 'help':
                help_text = '''I can only perform simple searches for **The Division** related items\n
Example: to find out what *Responsive* talent does, use `!gearsheet responsive`\n
Popular community nicknames for items are also supported.\n
**PRO TIP**: `!sheet responsive` will also work.

My reddit thread: <https://goo.gl/638vpi>.

**Credit** to @Pfftman#6620 | /u/pfftman | PSN: pfftman'''

                self.log_it(event, param, "gearsheet")
                event.msg.reply(help_text)
                return

            if param in util.aliases.keys():
                param = util.aliases[param].lower()
            
            if param in scopes and param != 'weapons':
                self.log_it(event, param, "gearsheet")
                query = {
                    'fields': 'name',
                    'orderBy': 'name'
                }
                names = requests.get(BACKEND_HOST + "/document/%s" % param, query, headers={SESSION_HEADER: self.session}).json()
                # print(names)
                name_list = ['`' + i["name"] + '`' for i in names["data"]]

                event.msg.reply('there are **%s items**' % len(name_list))
                event.msg.reply(",  ".join(name_list))

                return
            
            # start_time = time.time()
            conn = http.client.HTTPConnection("localhost:9000")
            conn.request('GET', '/plugin/bot.index?%s' % (urllib.parse.urlencode({"param": param})),
                            headers={'X-BB-SESSION': self.session})

            response = conn.getresponse().read().decode('utf-8')
            conn.close()
            # time_diff = time.time() - start_time

            
            self.log_it(event, param, "gearsheet")

            response = json.loads(response)
            if response['result'] != 'ok':
                matches = [("**%s**" % i).title() for i in self.names if fuzz.partial_ratio(param, i) > 80]

                if len(matches) > 0:
                    match_str = "this %s" % ', '.join(matches) if len(matches) == 1 else \
                        "any of these %s" % ', '.join(matches)
                    text = "Did you mean to search for %s?" % \
                           match_str
                    event.msg.reply('%s' % text)
                else:
                    event.msg.reply('```item not found```')

                return

            for item in response['data']:
                collection_name = item['@class']

                embed = None
                if collection_name == self.WEAPON_TALENTS:
                    embed = self.render_weapon_talent(item)
                elif collection_name == self.PLAYER_TALENTS:
                    embed = self.render_player_talent(item)
                elif collection_name == self.GEAR_TALENTS:
                    embed = self.render_gear_talent(item)
                elif collection_name == self.GEAR_SETS:
                    embed = self.render_gearset(item)
                elif collection_name == self.WEAPONS:
                    embed = self.render_weapon(item)
                elif collection_name == self.WEAPON_MODS:
                    embed = self.render_weapon_mods(item)
                elif collection_name == self.EXOTIC_GEARS:
                    embed = self.render_exotic_gear(item)
                elif collection_name == self.GEAR_ATTRIBUTES:
                    embed = self.render_gear_attribute(item)

                event.msg.reply(embed=embed)

    @Plugin.command('v')
    @Plugin.command('vendors')
    @Plugin.command('vendor')
    def command_vendors(self, event):
        if len(event.args) > 0:
            param = ' '.join(event.args).lower()
            splitted = param.strip().split(" with ")

            # handle requests from weapons in plural form
            if splitted[0] in weapon_types:
                temp = splitted[0]
                splitted[0] = temp[:-1]
            
            if event.author.id not in [195168390476726272]:
                self.log_it(event, param, "vendors")

            # routines related to updatee
            if param.strip(' ') == 'update': #pfft              #ruben              #noots              #ahmad
                if event.author.id in { 195168390476726272, 177627571700105217, 196408555132289024, 99511296604520448 }:
                    self.handle_update(event)
                else:
                    event.msg.reply("Haha! no")
                return
            
            if param.strip(' ') == 'status':
                reply = self.render_status_command()
                event.msg.reply(embed=reply)
                return
            
            arg = None
            param_obj = None
            
            for i, item in enumerate(splitted): # check if there is already a nickname
                # start with the vendor aliases and fallback to the gearsheet aliases
                if item in util.vendor_aliases.keys():
                    splitted[i] = util.vendor_aliases[item].lower()
            
            # determine the kind of request to send to the server
            query = splitted[0]    
            if len(splitted) == 1: # this block takes care of args without 'with'
                param_obj = {
                    "param": splitted[0],
                    "has_arg": False
                }
            elif len(splitted) >= 2:
                arg = splitted[1]
                param_obj = {
                    "param": splitted[0],
                    "has_arg": True,
                    "arg": splitted[1] # take only one argument
                }
            else:
                event.msg.reply('```You shouldn\'t be able to get here. Yet.. ```')
                return

            header = {SESSION_HEADER: self.session}
            response = requests.get(BACKEND_HOST + '/plugin/vendors.index', params=param_obj, headers=header)

            if response.json()['result'] != 'ok': # item not found in vendors list
                # try to determine if it was a bad input from user or an item that doesn't exist
                self.reply_item_not_found(query, event)
                return
            
            data = remove_duplicates(response.json()["data"])
            data = sorted(data, key=lambda item: item['name'])
            embed = None

            if len(data) > 1:
                embed = self.render_multiple_items(data)
            else:
                for item in data:
                    collection = item['@class']

                    if collection == "vendors-%s" % VENDOR_WEAPONS:
                        embed = self.render_vendor_weapon(item)
                    elif collection == get_collection_name(VENDOR_GEAR):
                        embed = self.render_vendor_gear(item)
                    elif collection == get_collection_name(VENDOR_GEAR_MODS):
                        embed = self.render_vendor_gear_mod(item)
                    elif collection == get_collection_name(VENDOR_WEAPON_MODS):
                        embed = self.render_vendor_weapon_mod(item)
            
            if embed != None:
                event.msg.reply(embed=embed)
        else:
            event.msg.reply('<http://rubenalamina.mx/the-division-weekly-vendor-reset/>')
    
    def render_status_command(self):
        param = {
            "orderBy": "time desc",
            "recordsPerPage": 1,
            "page": 0
        }

        res = requests.get(BACKEND_HOST + '/document/vendors-update', params=param, headers={SESSION_HEADER: self.session}).json()
        info = res['data'][0]
        
        today = arrow.utcnow()
        reset_text = ""

        if today.weekday() == 5:
            reset_text = "in 6 days"
        else:
            temp = today.shift(weekday=5)
            reset_text = temp.humanize()
        
        last_updated = arrow.Arrow.fromtimestamp(info['time'])

        embed = MessageEmbed()
        embed.title = "Last Updated %s" % last_updated.humanize()
        embed.description = "by %s" % info['updater']
        
        embed.add_field(name='Next Vendor Reset (in game)', value=reset_text, inline=True)

        return embed

    def reply_item_not_found(self, query, event):
        pieces = ["vest", "backpack", "mask", "gloves", "knee pads", "holster"]
        temp = [i for i in pieces if (" " + i) in query]

        if len(temp) > 0:
            gear_piece = query.strip(" " + temp[0])
            if gear_piece in self.names:
                event.msg.reply('Sorry, no gearset or highend item like that this week')
            else:
                event.msg.reply('Are you sure %s exists?' % query)
        elif query in ["performance mod", "stamina mod", "electronics mod", "weapon mod"]:
            event.msg.reply('Sorry, no mod like that this week')

        elif util.aliases or query in self.names or query in self.vendor_names:
            event.msg.reply("Sorry, no item like that this week")

        else:
            event.msg.reply("Are you sure this item exists?")


    def handle_update(self, event):
        if not self.lock.locked():
            start_time = time.time()
            self.lock.acquire()

            event.msg.reply("Vendors update initiated by Master @%s" % (str(event.author)))
            vendors.update()

            # log the update in the db
            info = {
                "updater": str(event.author),
                "time": int(time.time()),
                "server": event.guild.name,
                "server_id": event.guild.id
            }

            requests.post(BACKEND_HOST + "/document/vendors-update", json=info, headers={SESSION_HEADER: self.session})

            # release lock
            self.lock.release()

            duration = time.time() - start_time
            event.msg.reply("Update done. Duration: `{0:.2f}s`".format(duration))
        else:
            event.msg.reply("update is already running")


    def render_multiple_items(self, items):
        embed = MessageEmbed()
        embed.description = "found in %s items" % len(items)
        embed.color = 0xDA9513

        for item in items:
            collection = item["@class"]

            if collection == get_collection_name(VENDOR_WEAPONS):
                talents = " **-** ".join([ i for i in [item['talent1'], item['talent2'], item['talent3']] if i.strip() != "-"])
                body = '''`%s`  |  **%s** DMG  |  %s''' % (item["vendor"], item['dmg'], talents.strip())

                embed.add_field(name=item["name"], value=body)
            elif collection == get_collection_name(VENDOR_GEAR):
                major_attrs = item["major"].strip().strip("-").split("<br/>")
                minor_attrs = item["minor"].strip().strip("-").split("<br/>")

                main_stats = []
                if (item['fire'].strip().strip('-')):
                    main_stats.append("**Firearms:** %s" % item['fire'])
                if (item['stam'].strip().strip('-')):
                    main_stats.append("**Stamina:** %s" % item['stam'])
                if (item['elec'].strip().strip('-')):
                    main_stats.append("**Electronics:** %s" % item['elec'])

                all_attrs = "  **|**  ".join(main_stats + [i for i in major_attrs + minor_attrs if i != ""])
                
                body = "`%s`  |  %s" % (item["vendor"], all_attrs)

                embed.add_field(name=item["name"], value=body)
            elif collection == get_collection_name(VENDOR_GEAR_MODS):
                attr = item["attribute"]

                body = "`%s` | %s | %s" % (item["vendor"], item['stat'], attr)
                embed.add_field(name=item['name'], value=body)
            elif collection == get_collection_name(VENDOR_WEAPON_MODS):
                attrs = item['attributes'].split("<br/>")

                attrs_str = " **|** ".join([i for i in attrs[:3]])

                body = "`%s` | %s " % (item["vendor"], attrs_str)
                embed.add_field(name=item["name"], value=body)
            else: return None
        
        return embed

    def render_vendor_weapon(self, weapon):
        embed = MessageEmbed()

        embed.title = weapon['name']
        embed.description = weapon['vendor']
        # embed.add_field(name='Vendor', value=weapon['vendor'], inline=True)
        embed.add_field(name='Price', value=weapon['price'], inline=True)
        embed.add_field(name="Damage", value=weapon['dmg'], inline=True)
        embed.add_field(name='Bonus', value=weapon['bonus'], inline=True)
        
        talents = " **-** ".join([ i for i in [weapon['talent1'], weapon['talent2'], weapon['talent3']] if i.strip() != "-"])
        embed.add_field(name='Talents', value=talents)
        
        if weapon['type'] == 'exotic':
            embed.color = 0xCF5A2E
        else:
            embed.color = 0xDA9513

        return embed
    
    def render_vendor_gear_mod(self, gearmod):
        embed = MessageEmbed()

        embed.title = gearmod['name']
        embed.description = gearmod['vendor']

        embed.add_field(name='Price', value=gearmod['price'], inline=True)
        embed.add_field(name='Stat', value=gearmod['stat'], inline=True)
        embed.add_field(name='Attribute', value=gearmod['attribute'])

        if gearmod['type'] == 'purple-mod':
            embed.color = 0x993D78
        else:
            embed.color = 0xDA9513
        
        return embed
    
    def render_vendor_weapon_mod(self, weaponmod):
        embed = MessageEmbed()

        embed.title = weaponmod['name']
        embed.description = weaponmod['vendor']

        embed.add_field(name='Price', value=weaponmod['price'], inline=True)
        # embed.add_field(name='Stat', value=weaponmod[''], inline=True)
        attr = " **-** ".join(weaponmod["attributes"].split('<br/>'))
        embed.add_field(name='Attribute', value=attr)
        embed.color = 0xDA9513

        return embed

    def render_vendor_gear(self, gear):
        embed = MessageEmbed()

        embed.title = gear['name']
        embed.description = gear['vendor']

        embed.add_field(name='Price', value=gear['price'], inline=True)
        embed.add_field(name='Armor', value=gear['armor'], inline=True)
        embed.add_field(name="Gearscore", value=gear['score'], inline=True)

        if (gear['fire'].strip().strip('-')):
            embed.add_field(name='Firearms', value=gear['fire'], inline=True)
        if (gear['stam'].strip().strip('-')):
            embed.add_field(name='Stamina', value=gear['stam'], inline=True)
        if (gear['elec'].strip().strip('-')):
            embed.add_field(name='Electronics', value=gear['elec'], inline=True)
        
        major_attr = "  **|**  ".join(gear["major"].strip().strip("-").split("<br/>"))
        minor_attr = "  **|**  ".join(gear["minor"].strip().strip("-").split("<br/>"))

        if major_attr:
            embed.add_field(name='Major Attribute(s)', value=major_attr, inline=True)
        
        if minor_attr:
            embed.add_field(name='Minor Attribute(s)', value=minor_attr, inline=True)
        
        if gear['rarity'] == 'header-he':
            embed.color = 0xDA9513
        else:
            embed.color = 0x07C973
        
        return embed
    
    def render_weapon_talent(self, talent):
        embed = MessageEmbed()
        # embed.set_author(name='GearSheet')

        embed.title = talent['name']
        embed.description = talent['description']

        req = talent['requirements']['34']
        req_str = '**electronics**: %s, **firearms**: %s, **stamina**: %s' % \
                  ('none' if req['electronics'] == 0 else req['electronics'],
                   'none' if req['firearms'] == 0 else req['firearms'],
                   'none' if req['stamina'] == 0 else req['stamina'])

        embed.add_field(name='Rolls On', value=', '.join(talent['rollsOn']), inline=True)
        embed.add_field(name='Requirements', value=req_str, inline=True)

        if 'note' in talent.keys():
            embed.set_footer(text=talent['note'])

        embed.color = 0xDA9513

        return embed

    def render_player_talent(self, talent):
        embed = MessageEmbed()

        embed.title = talent['name']
        embed.description = talent['description']

        embed.add_field(name='Type', value=talent['type'], inline=True)
        embed.add_field(name='Benefits', value=talent['benefit'], inline=True)

        embed.color = 0xDA9513

        return embed

    def render_gear_talent(self, talent):
        embed = MessageEmbed()

        embed.title = talent['name']
        embed.description = talent['description']

        embed.set_footer(text='Rolls on %s' % talent['slot'])

        embed.color = 0xDA9513

        return embed

    def render_gearset(self, gearset):
        embed = MessageEmbed()

        embed.title = gearset['name']

        embed.add_field(name='2 piece bonus', value=gearset['2'], inline=True)
        embed.add_field(name='3 piece bonus', value=gearset['3'], inline=True)
        embed.add_field(name='4 piece bonus', value=gearset['4'])
        embed.add_field(name='5 piece bonus', value=gearset['5'], color='489979')
        embed.add_field(name='6 piece bonus', value=gearset['6'])

        embed.set_footer(text="added in patch %s" % gearset['patch'])
        embed.color = '52377'

        return embed

    def render_weapon(self, weapon):
        self.normalize(weapon)
        embed = MessageEmbed()

        embed.title = weapon['name']

        embed.add_field(name='Type', value=weapon['type'], inline=True)
        embed.add_field(name='Variant', value=weapon['variant'], inline=True)
        embed.add_field(name='RPM', value=weapon['rpm'], inline=True)

        embed.add_field(name='Scaling', value=weapon['scaling'], inline=True)
        embed.add_field(name='Magazine Size', value=weapon['MagSize'], inline=True)
        embed.add_field(name='Optimal Range(m)', value=weapon['OptimalRange'], inline=True)

        embed.add_field(name='Reload Speed(ms)', value=weapon['ReloadSpeed'], inline=True)
        embed.add_field(name='Headshot Multiplier', value=weapon['HeadshotMultiplier'], inline=True)
        embed.add_field(name='Native Bonus', value=weapon['WeaponBonus'], inline=True)

        embed.add_field(name='Bonus', value=weapon['Bonus'], inline=True)

        damageStr = "%s - %s" % (weapon['256']['min'], weapon['256']['max'])

        embed.add_field(name='Base Damage', value=damageStr, inline=True)

        if 'modCompat' in weapon.keys():
            compat = ', '.join(weapon['modCompat']['compat'])
            embed.add_field(name='Compatible Mods', value=compat)

            if 'note' in weapon['modCompat'].keys():
                embed.set_footer(text="%s" % weapon['modCompat']['note'])

        if 'talent' in weapon.keys():
            description = weapon['talent']['description']
            embed.description = description

        embed.color = 0xDA9513


        return embed

    def normalize(self, item):  # don't give empty params to bot embed
        for i in item.keys():
            if type(item[i]) is str and len(item[i]) == 0:
                item[i] = '-'

    def render_weapon_mods(self, mod):
        embed = MessageEmbed()
        key_names = {"Mod_Category", "name", "Primary_Attribute", "Mod_Type", "Crit_Chance",
                     "Crit_Damage", "Headshot_Damage", "Accuracy", "Stability", "Reload_Speed",
                    "Rate_Of_Fire", "Optimal_Range", "Magazine_Size", "Decreased_Threat", "Increased_Threat"}

        for key in mod.keys():
            if key == 'name':
                embed.title = mod['name']
            elif key in key_names:
                val_str = ", ".join(mod[key]) if type(mod[key]) is list else mod[key]
                embed.add_field(name=key.replace("_", " "), value=val_str, inline=True)

        embed.set_footer(text="All mods will roll their Primary Attributes, "
                              "as well as an additional 2 attributes")

        embed.color = 0xDA9513

        return embed

    def render_exotic_gear(self, exotic_gear):
        embed = MessageEmbed()

        embed.title = exotic_gear['name']
        embed.description = exotic_gear['description']

        embed.color = 0xCF5A2E
        return embed

    def render_gear_attribute(self, gear_attribute):
        embed = MessageEmbed()

        key_names = ["type", "name", "Minimum_Total", "Mask", "Body_Armor", "Backpack", "Gloves", "Knee_Pads", "Holster",
                     "Maximum_Total", "Max_With_Gear_Mods"]

        for key in gear_attribute.keys():
            if key == 'name':
                embed.title = gear_attribute[key]
            elif key in key_names:
                val = gear_attribute[key]
                embed.add_field(name=key.replace("_", " "), value=val, inline=True)
        
        embed.color = 0xDA9513

        return embed
Пример #33
0
 def __init__(self, sock):
     self.sock = sock
     self.sem = BoundedSemaphore(1)
Пример #34
0
from gevent.lock import BoundedSemaphore
from paramiko import SSHException

from pyinfra import logger
from pyinfra.api.util import (
    get_arg_value,
    log_host_command_error,
    make_hash,
    underscore,
)
from pyinfra.progress import progress_spinner

# Index of snake_case facts -> CamelCase classes
FACTS = {}
FACT_LOCK = BoundedSemaphore()


def is_fact(name):
    return name in FACTS


def get_fact_class(name):
    return FACTS[name]


def get_fact_names():
    '''
    Returns a list of available facts in camel_case format.
    '''
Пример #35
0
import traceback
import zipfile

import gevent.monkey
from gevent.lock import BoundedSemaphore

import VMStateManager.vbox_monitor
from manager_constants import CHECKOUT_TIME, VBOX_PROBETIME
from Workshop_Queue import Workshop_Queue
from Workshop_Unit import Workshop_Unit

gevent.monkey.patch_all()
aggregatedInfo = []
availableWorkshops = []
unitsOnHold = []
aggregatedInfoSem = BoundedSemaphore(1)


def cleanup():
    logging.info("Cleaning up webdata aggregator...")
    try:

        logging.info("Clean up complete. Exiting...")
    except Exception as e:
        logging.error("Error during cleanup" + str(e))


def getAvailableUnits():
    logging.info("webdata_aggregator: Getting available units.")
    availableUnits = []
    getGroupToVms = VMStateManager.vbox_monitor.getGroupToVms().copy()
Пример #36
0
 def _get_greenlet_lock(self):
     if not hasattr(self._thread_local, 'greenlet_lock'):
         greenlet_lock = self._thread_local.greenlet_lock = BoundedSemaphore(1)
     else:
         greenlet_lock = self._thread_local.greenlet_lock
     return greenlet_lock
Пример #37
0
def getLogger(name):
    _handler = logging.StreamHandler()
    _handler.setFormatter(
        logging.Formatter(
            '%(asctime)s %(levelname)s %(module)s:%(lineno)s %(message)s'))
    _handler.setFormatter(
        logging.Formatter(
            '%(asctime)s %(module)s %(levelname)s:%(lineno)s %(message)s'))
    log = logging.getLogger(name)
    log.addHandler(_handler)
    log.setLevel(logging.DEBUG)
    return log


#设定对共享资源的访问数量
sem = BoundedSemaphore(1)
log = getLogger(__name__)
signal_stop = False
local_stash = local()
evt = Event()
art = AsyncResult()
q = Queue()

# ------------------------------------------------------------------------
# Queue、spawn
# def init():
#     while True:
#         if signal_stop: break
#         art.set(False)
#         gevent.sleep(0.1)
#         log.info('init...')
Пример #38
0
    @Author:	b0ring
    @MySite:	https://unnamebao.github.io/
    @Date:		2019-09-19 16:11:06
    @Version:	1.0.0
'''

import nmap
import gevent
from gevent import monkey, pool
from gevent import sleep
import getopt
import sys
import csv
from gevent.lock import BoundedSemaphore

sem = BoundedSemaphore(1)
sem2 = BoundedSemaphore(0)
monkey.patch_all()
port_list = [
    1, 7, 19, 21, 22, 23, 25, 31, 42, 53, 67, 69, 79, 80, 99, 102, 109, 110,
    113, 119, 135, 137, 138, 139, 143, 161, 177, 389, 443, 456, 513, 544, 548,
    553, 555, 568, 569, 635, 636, 666, 993, 1001, 1011, 1024
]
port_list = [str(port) for port in port_list]
port_str = ",".join(port_list)
ip_port_dict = {}
flag = [
    1000,
]

    def __init__(self, args):
        self._homepage_links = []
        self._args = args
        self.service_config = args.service_config
        self._debug = {
            'hb_stray': 0,
            'msg_pubs': 0,
            'msg_subs': 0,
            'msg_query': 0,
            'msg_hbt': 0,
            'ttl_short': 0,
            'policy_rr': 0,
            'policy_lb': 0,
            'policy_fi': 0,
            'db_upd_hb': 0,
            'max_pend_pb':0,
            'max_pend_sb':0,
            'max_pend_hb':0,
            'cur_pend_pb':0,
            'cur_pend_sb':0,
            'cur_pend_hb':0,
            'throttle_subs':0,
            'restarting':0,
        }
        self._ts_use = 1
        self.short_ttl_map = {}
        self._sem = BoundedSemaphore(1)

        self._base_url = "http://%s:%s" % (self._args.listen_ip_addr,
                                           self._args.listen_port)
        self._pipe_start_app = None

        bottle.route('/', 'GET', self.homepage_http_get)

        # heartbeat
        bottle.route('/heartbeat', 'POST', self.api_heartbeat)

        # publish service
        bottle.route('/publish', 'POST', self.api_publish)
        self._homepage_links.append(
            LinkObject(
                'action',
                self._base_url , '/publish', 'publish service'))
        bottle.route('/publish/<end_point>', 'POST', self.api_publish)

        # subscribe service
        bottle.route('/subscribe',  'POST', self.api_subscribe)
        self._homepage_links.append(
            LinkObject(
                'action',
                self._base_url , '/subscribe', 'subscribe service'))

        # query service
        bottle.route('/query',  'POST', self.api_query)
        self._homepage_links.append(
            LinkObject(
                'action',
                self._base_url , '/query', 'query service'))

        # collection - services
        bottle.route('/services', 'GET', self.show_all_services)
        self._homepage_links.append(
            LinkObject(
                'action',
                self._base_url , '/services', 'show published services'))
        bottle.route('/services.json', 'GET', self.services_json)
        self._homepage_links.append(
            LinkObject(
                'action',
                self._base_url , '/services.json',
                'List published services in JSON format'))
        # show a specific service type
        bottle.route('/services/<service_type>', 'GET', self.show_all_services)

        # update service
        bottle.route('/service/<id>', 'PUT', self.service_http_put)

        # get service info
        bottle.route('/service/<id>', 'GET',  self.service_http_get)
        bottle.route('/service/<id>/brief', 'GET', self.service_brief_http_get)

        # delete (un-publish) service
        bottle.route('/service/<id>', 'DELETE', self.service_http_delete)

        # collection - clients
        bottle.route('/clients', 'GET', self.show_all_clients)
        self._homepage_links.append(
            LinkObject(
                'action',
                self._base_url , '/clients', 'list all subscribers'))
        bottle.route('/clients.json', 'GET', self.clients_json)
        self._homepage_links.append(
            LinkObject(
                'action',
                self._base_url , '/clients.json',
                'list all subscribers in JSON format'))

        # show config
        bottle.route('/config', 'GET', self.config_http_get)
        self._homepage_links.append(
            LinkObject(
                'action',
                self._base_url , '/config', 'show discovery service config'))

        # show debug
        bottle.route('/stats', 'GET', self.show_stats)
        self._homepage_links.append(
            LinkObject(
                'action',
                self._base_url , '/stats', 'show discovery service stats'))

        # cleanup 
        bottle.route('/cleanup', 'GET', self.cleanup_http_get)
        self._homepage_links.append(LinkObject('action',
            self._base_url , '/cleanup', 'Purge inactive publishers'))

        if not self._pipe_start_app:
            self._pipe_start_app = bottle.app()

        # sandesh init
        self._sandesh = Sandesh()
        # Reset the sandesh send rate limit value
        if self._args.sandesh_send_rate_limit is not None:
            SandeshSystem.set_sandesh_send_rate_limit( \
                self._args.sandesh_send_rate_limit)
        module = Module.DISCOVERY_SERVICE
        module_name = ModuleNames[module]
        node_type = Module2NodeType[module]
        node_type_name = NodeTypeNames[node_type]
        instance_id = self._args.worker_id
        self._sandesh.init_generator(
            module_name, socket.gethostname(), node_type_name, instance_id,
            self._args.collectors, 'discovery_context', 
            int(self._args.http_server_port), ['sandesh', 'uve'])
        self._sandesh.set_logging_params(enable_local_log=self._args.log_local,
                                         category=self._args.log_category,
                                         level=self._args.log_level,
                                         file=self._args.log_file,
                                         enable_syslog=self._args.use_syslog,
                                         syslog_facility=self._args.syslog_facility)
        self._sandesh.trace_buffer_create(name="dsHeartBeatTraceBuf",
                                          size=1000)

        # DB interface initialization
        self._db_conn = self._args.zk
        self._db_conn.set_ds(self)

        # update services database (old db didn't keep HB)
        for entry in self._db_conn.service_entries():
            update = False
            if 'heartbeat' not in entry:
                entry['heartbeat'] = int(time.time())
                self._debug['db_upd_hb'] += 1
                update = True

            if update:
                self._db_conn.update_service(
                    entry['service_type'], entry['service_id'], entry)

        # build in-memory subscriber data
        self._sub_data = {}
        for (client_id, service_type) in self._db_conn.subscriber_entries():
            self.create_sub_data(client_id, service_type)

        # must be done after we have built in-memory publisher data from db.
        self._db_conn.start_background_tasks()
Пример #40
0
from gevent import monkey
monkey.patch_all()
from gevent.lock import BoundedSemaphore
import gevent
from parser import *
from queue import Queue
from pybloom import ScalableBloomFilter
import json
from redis import Redis
from utilities.util_conf import *
import requests

# 设置协程个数
sem = BoundedSemaphore(BoundedSemaphoreNum)

#读取redis
redis = Redis()


def htmlrun(url, user_type='pc', proxies=None):

    sem.acquire()
    try:

        html = download_html(url, user_type, proxies)
        # 如果跳转后的url不是内部url
        # if not is_interior_url(redirectUrl, url):
        # 	return
    except:
        sem.release()
        return
Пример #41
0
class CrispinConnectionPool(object):
    """
    Connection pool for Crispin clients.

    Connections in a pool are specific to an IMAPAccount.

    Parameters
    ----------
    account_id : int
        Which IMAPAccount to open up a connection to.
    num_connections : int
        How many connections in the pool.
    readonly : bool
        Is the connection to the IMAP server read-only?
    """

    def __init__(self, account_id, num_connections, readonly):
        log.info('Creating Crispin connection pool for account {} with {} '
                 'connections'.format(account_id, num_connections))
        self.account_id = account_id
        self.readonly = readonly
        self._queue = Queue(num_connections, items=num_connections * [None])
        self._sem = BoundedSemaphore(num_connections)
        self._set_account_info()

    @contextlib.contextmanager
    def get(self):
        """ Get a connection from the pool, or instantiate a new one if needed.
        If `num_connections` connections are already in use, block until one is
        available.
        """
        # A gevent semaphore is granted in the order that greenlets tried to
        # acquire it, so we use a semaphore here to prevent potential
        # starvation of greenlets if there is high contention for the pool.
        # The queue implementation does not have that property; having
        # greenlets simply block on self._queue.get(block=True) could cause
        # individual greenlets to block for arbitrarily long.
        self._sem.acquire()
        client = self._queue.get()
        try:
            if client is None:
                client = self._new_connection()
            yield client
        except CONN_DISCARD_EXC_CLASSES as exc:
            # Discard the connection on socket or IMAP errors. Technically this
            # isn't always necessary, since if you got e.g. a FETCH failure you
            # could reuse the same connection. But for now it's the simplest
            # thing to do.
            log.info('IMAP connection error; discarding connection',
                     exc_info=True)
            if client is not None and \
               not isinstance(exc, CONN_UNUSABLE_EXC_CLASSES):
                try:
                    client.logout()
                except Exception:
                    log.info('Error on IMAP logout', exc_info=True)
            client = None
            raise exc
        except:
            raise
        finally:
            self._queue.put(client)
            self._sem.release()

    def _set_account_info(self):
        with session_scope(self.account_id) as db_session:
            account = db_session.query(ImapAccount).get(self.account_id)
            self.sync_state = account.sync_state
            self.provider = account.provider
            self.provider_info = account.provider_info
            self.email_address = account.email_address
            self.auth_handler = account.auth_handler
            if account.provider == 'gmail':
                self.client_cls = GmailCrispinClient
            else:
                self.client_cls = CrispinClient

    def _new_raw_connection(self):
        """Returns a new, authenticated IMAPClient instance for the account."""
        with session_scope(self.account_id) as db_session:
            if self.provider == 'gmail':
                account = db_session.query(GmailAccount).options(
                    joinedload(GmailAccount.auth_credentials)).get(
                    self.account_id)
            else:
                account = db_session.query(GenericAccount).options(
                    joinedload(GenericAccount.secret)).get(self.account_id)
            db_session.expunge(account)

        return self.auth_handler.connect_account(account)

    def _new_connection(self):
        conn = self._new_raw_connection()
        return self.client_cls(self.account_id, self.provider_info,
                               self.email_address, conn,
                               readonly=self.readonly)
Пример #42
0
class DianpingProcessor(BaseProcessor):
    name = 'dianping-shop'

    def __init__(self, *args, **kwargs):
        BaseProcessor.__init__(self, *args, **kwargs)
        self.build_args()

        # 缓存的城市信息
        from gevent.lock import BoundedSemaphore

        self._city_cache = {}
        self._city_cache_lock = BoundedSemaphore(1)

    def build_args(self):
        """
        处理命令行参数
        """
        import argparse

        parser = argparse.ArgumentParser()
        parser.add_argument('--limit', type=int)
        parser.add_argument('--skip', type=int, default=0)
        parser.add_argument('--query', type=str)
        self.args, leftover = parser.parse_known_args()

    def build_cursor(self):
        col = get_mongodb('raw_dianping', 'Dining', 'mongo-raw')

        query = {}
        if self.args.query:
            exec 'from bson import ObjectId'
            query = eval(self.args.query)

        cursor = col.find(query).skip(self.args.skip)
        if self.args.limit:
            cursor.limit(self.args.limit)
        return cursor

    def populate_tasks(self):
        for val in self.build_cursor():

            def task(entry=val):
                self.process_details(entry)

            self.add_task(task)

    def get_city(self, city_name, coords):
        """
        通过城市名称和坐标,获得城市详情
        """
        if city_name not in self._city_cache:
            try:
                self._city_cache_lock.acquire()
                if city_name not in self._city_cache:
                    col = get_mongodb('geo', 'Locality', 'mongo')
                    lat = coords['lat']
                    lng = coords['lng']
                    if not isinstance(lat, float) or not isinstance(
                            lng, float):
                        return
                    geo_json = {
                        'type': 'Point',
                        'coordinates': [coords['lng'], coords['lat']]
                    }
                    max_distance = 200000
                    city_list = list(
                        col.find({
                            'alias': city_name,
                            'location': {
                                '$near': {
                                    '$geometry': geo_json,
                                    '$maxDistance': max_distance
                                }
                            }
                        }))
                    if city_list:
                        city = city_list[0]
                        self._city_cache[city_name] = city
                    else:
                        self.log(
                            'Failed to find city: %s, lat=%f, lng=%f' %
                            (city_name, lat, lng), logging.WARN)
                        self._city_cache[city_name] = None
            finally:
                self._city_cache_lock.release()

        return self._city_cache[city_name]

    @staticmethod
    def calc_rating(entry):
        """
        计算店铺的rating
        """
        if 'review_stat' not in entry:
            return

        review = entry['review_stat']
        tmp = 0
        for idx in xrange(1, 6):
            key = 'reviewCountStar%d' % idx
            tmp += idx * review[key]
        total_cnt = review['reviewCountAllStar']
        if total_cnt == 0:
            return
        rating = float(tmp) / total_cnt

        return {'rating': rating, 'voteCnt': total_cnt}

    def process_details(self, entry):
        """
        处理店铺详情
        """
        city_info = self.get_city(entry['city_name'], {
            'lat': entry['lat'],
            'lng': entry['lng']
        })
        if not city_info:
            return

        country = {}
        for key in ('_id', 'zhName', 'enName'):
            if key in city_info['country']:
                country[key] = city_info['country'][key]

        locality = {}
        for key in ('_id', 'zhName', 'enName', 'location'):
            if key in city_info:
                locality[key] = city_info[key]

        shop = {
            'source': {
                'dianping': {
                    'id': entry['shop_id']
                }
            },
            'zhName': entry['title'],
            'alias': [entry['title']],
            'address': entry['addr'],
            'location': {
                'type': 'Point',
                'coordinates': [entry['lng'], entry['lat']]
            },
            'country': country,
            'locality': locality,
            'targets': [country['_id'], locality['_id']],
            'taoziEna': True,
            'lxpEna': True
        }

        tags = []
        if 'tags' in entry and entry['tags']:
            for t in entry['tags']:
                tags.append(t)
        if 'cat_name' in entry and entry['cat_name']:
            cat_name = entry['cat_name']
            tags.append(cat_name)
            entry['style'] = cat_name
        tags = list(set(tags))
        if tags:
            shop['tags'] = tags

        fields_map = {
            'mean_price': 'price',
            'tel': 'tel',
            'open_time': 'openTime',
            'cover_image': 'cover_image'
        }
        for key1, key2 in fields_map.items():
            if key1 in entry and entry[key1]:
                shop[key2] = entry[key1]

        score = self.calc_rating(entry)
        if score:
            shop['voteCnt'] = score['voteCnt']
            shop['rating'] = score['rating']

        self.update_shop(shop)

    @staticmethod
    def add_image(image_url):
        from hashlib import md5

        url_hash = md5(image_url).hexdigest()
        image = {'url_hash': url_hash, 'key': url_hash, 'url': image_url}
        col_im = get_mongodb('imagestore', 'Images', 'mongo')
        if not col_im.find_one({'key': image['key']}, {'_id': 1}):
            col = get_mongodb('imagestore', 'ImageCandidates', 'mongo')
            col.update({'key': image['key']}, {'$set': image}, upsert=True)
        return image['key']

    @staticmethod
    def update_shop(shop):
        """
        将店铺存储至数据库
        """
        if 'cover_image' in shop:
            cover = shop.pop('cover_image')
            image_key = DianpingProcessor.add_image(cover)
            shop['images'] = [{'key': image_key}]

        add_to_set = {}
        for key in ('tags', 'alias'):
            if key in shop:
                value_list = shop.pop(key)
                add_to_set[key] = {'$each': value_list}
        ops = {'$set': shop}
        if add_to_set:
            ops['$addToSet'] = add_to_set

        col = get_mongodb('raw_dianping', 'DiningProc', 'mongo-raw')
        col.update({'source.dianping.id': shop['source']['dianping']['id']},
                   ops,
                   upsert=True)
Пример #43
0
class CrispinConnectionPool(object):
    """
    Connection pool for Crispin clients.

    Connections in a pool are specific to an IMAPAccount.

    Parameters
    ----------
    account_id : int
        Which IMAPAccount to open up a connection to.
    num_connections : int
        How many connections in the pool.
    readonly : bool
        Is the connection to the IMAP server read-only?
    """

    def __init__(self, account_id, num_connections, readonly):
        log.info('Creating Crispin connection pool for account {} with {} '
                 'connections'.format(account_id, num_connections))
        self.account_id = account_id
        self.readonly = readonly
        self._queue = Queue(num_connections, items=num_connections * [None])
        self._sem = BoundedSemaphore(num_connections)
        self._set_account_info()

    @contextlib.contextmanager
    def get(self):
        """ Get a connection from the pool, or instantiate a new one if needed.
        If `num_connections` connections are already in use, block until one is
        available.
        """
        # A gevent semaphore is granted in the order that greenlets tried to
        # acquire it, so we use a semaphore here to prevent potential
        # starvation of greenlets if there is high contention for the pool.
        # The queue implementation does not have that property; having
        # greenlets simply block on self._queue.get(block=True) could cause
        # individual greenlets to block for arbitrarily long.
        self._sem.acquire()
        client = self._queue.get()
        try:
            if client is None:
                client = self._new_connection()
            yield client
        except CONN_DISCARD_EXC_CLASSES as exc:
            # Discard the connection on socket or IMAP errors. Technically this
            # isn't always necessary, since if you got e.g. a FETCH failure you
            # could reuse the same connection. But for now it's the simplest
            # thing to do.
            log.info('IMAP connection error; discarding connection',
                     exc_info=True)
            if client is not None and \
               not isinstance(exc, CONN_UNUSABLE_EXC_CLASSES):
                try:
                    client.logout()
                except Exception:
                    log.info('Error on IMAP logout', exc_info=True)
            client = None
            raise exc
        except:
            raise
        finally:
            self._queue.put(client)
            self._sem.release()

    def _set_account_info(self):
        with session_scope(self.account_id) as db_session:
            account = db_session.query(ImapAccount).get(self.account_id)
            self.sync_state = account.sync_state
            self.provider = account.provider
            self.provider_info = account.provider_info
            self.email_address = account.email_address
            self.auth_handler = account.auth_handler
            if account.provider == 'gmail':
                self.client_cls = GmailCrispinClient
            else:
                self.client_cls = CrispinClient

    def _new_raw_connection(self):
        """Returns a new, authenticated IMAPClient instance for the account."""
        with session_scope(self.account_id) as db_session:
            if self.provider == 'gmail':
                account = db_session.query(GmailAccount).options(
                    joinedload(GmailAccount.auth_credentials)).get(
                    self.account_id)
            else:
                account = db_session.query(GenericAccount).options(
                    joinedload(GenericAccount.imap_secret)).get(self.account_id)
            db_session.expunge(account)

        return self.auth_handler.connect_account(account)

    def _new_connection(self):
        conn = self._new_raw_connection()
        return self.client_cls(self.account_id, self.provider_info,
                               self.email_address, conn,
                               readonly=self.readonly)
Пример #44
0
 def __init__(self):
     """Cluster 抽象"""
     self.sem = BoundedSemaphore(1)
     self._clients = {}
     self.init_config()
Пример #45
0
class RWLock( object ):
    def __init__( self ):
        self._canRead = Event()
        self._canWrite = Event()
        self._mutex = BoundedSemaphore( value = 1 )
        self._readers = 0
        self._isWriting = False
        self._canRead.set()
        self._canWrite.set()

    def rLock( self ):
        isReady = False
        while not isReady:
            self._canRead.wait()
            self._mutex.acquire( blocking = True, timeout = None )
            if not self._isWriting:
                self._canWrite.clear()
                self._readers += 1
                isReady = True
            self._mutex.release()

    def rUnlock( self ):
        self._mutex.acquire( blocking = True, timeout = None )
        self._readers -= 1
        if 0 == self._readers:
            self._canWrite.set()
        self._mutex.release()

    def wLock( self ):
        isReady = False
        while not isReady:
            self._canRead.clear()
            self._canWrite.wait()
            self._mutex.acquire( blocking = True, timeout = None )
            if not self._isWriting and 0 == self._readers:
                isReady = True
                self._isWriting = True
                self._canWrite.clear()
            self._mutex.release()

    def wUnlock( self ):
        self._mutex.acquire( blocking = True, timeout = None )
        self._isWriting = False
        self._canWrite.set()
        self._canRead.set()
        self._mutex.release()

    def writer( self ):
        return _rwlock_w( self )

    def reader( self ):
        return _rwlock_r( self )
Пример #46
0
"""
Basic template for gevent web server
"""

import gevent
from gevent import monkey
monkey.patch_all()

from gevent.lock import BoundedSemaphore
from gevent.pywsgi import WSGIServer
from cgi import parse_qs, escape
from Cookie import SimpleCookie

import argparse, redis, json, logging.handlers, signal, sys, uuid, datetime

SEM = BoundedSemaphore(1)

LOG_LEVEL = logging.DEBUG
LOG_FORMAT = '[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d] %(message)s'

SYSLOG = logging.handlers.SysLogHandler(address='/dev/log')
SYSLOG.setFormatter(logging.Formatter(LOG_FORMAT))

logging.basicConfig(format=LOG_FORMAT)
logging.getLogger().setLevel(LOG_LEVEL)

POOL = redis.ConnectionPool(host='localhost',
                            port=6379,
                            db=0,
                            max_connections=100)
Пример #47
0
class ConnectionPool(object):
    """
    Generic TCP connection pool, with the following features:
        * Configurable pool size
        * Auto-reconnection when a broken socket is detected
        * Optional periodic keepalive
    """

    # Frequency at which the pool is populated at startup
    SPAWN_FREQUENCY = 0.1

    def __init__(self, size, exc_classes=DEFAULT_EXC_CLASSES, keepalive=None):
        self.size = size
        self.conn = deque()
        self.lock = BoundedSemaphore(size)
        self.keepalive = keepalive
        # Exceptions list must be in tuple form to be caught properly
        self.exc_classes = tuple(exc_classes)
        for i in iter(range(size)):
            self.lock.acquire()
        for i in iter(range(size)):
            gevent.spawn_later(self.SPAWN_FREQUENCY*i, self._addOne)
        if self.keepalive:
            gevent.spawn(self._keepalive_periodic)

    def _new_connection(self):
        """
        Estabilish a new connection (to be implemented in subclasses).
        """
        raise NotImplementedError

    def _keepalive(self, c):
        """
        Implement actual application-level keepalive (to be
        reimplemented in subclasses).

        :raise: socket.error if the connection has been closed or is broken.
        """
        raise NotImplementedError()

    def _keepalive_periodic(self):
        delay = float(self.keepalive) / self.size
        while 1:
            try:
                with self.get() as c:
                    self._keepalive(c)
            except self.exc_classes:
                # Nothing to do, the pool will generate a new connection later
                pass
            gevent.sleep(delay)

    def _addOne(self):
        stime = 0.1
        while 1:
            c = self._new_connection()
            if c:
                break
            gevent.sleep(stime)
            if stime < 400:
                stime *= 2

        self.conn.append(c)
        self.lock.release()

    @contextmanager
    def get(self):
        """
        Get a connection from the pool, to make and receive traffic.

        If the connection fails for any reason (socket.error), it is dropped
        and a new one is scheduled. Please use @retry as a way to automatically
        retry whatever operation you were performing.
        """
        self.lock.acquire()
        try:
            c = self.conn.popleft()
            yield c
        except self.exc_classes:
            # The current connection has failed, drop it and create a new one
            gevent.spawn_later(1, self._addOne)
            raise
        except:
            self.conn.append(c)
            self.lock.release()
            raise
        else:
            # NOTE: cannot use finally because MUST NOT reuse the connection
            # if it failed (socket.error)
            self.conn.append(c)
            self.lock.release()
class DiscoveryZkClient(object):
    def __init__(self,
                 discServer,
                 zk_srv_ip='127.0.0.1',
                 zk_srv_port='2181',
                 reset_config=False):
        self._reset_config = reset_config
        self._service_id_to_type = {}
        self._ds = discServer
        self._zk_sem = BoundedSemaphore(1)
        self._election = None
        self._restarting = False

        zk_endpts = []
        for ip in zk_srv_ip.split(','):
            zk_endpts.append('%s:%s' % (ip, zk_srv_port))

        # logging
        logger = logging.getLogger('discovery-service')
        logger.setLevel(logging.WARNING)
        handler = logging.handlers.RotatingFileHandler(
            '/var/log/contrail/discovery_zk.log',
            maxBytes=1024 * 1024,
            backupCount=10)
        log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s',
                                       datefmt='%m/%d/%Y %I:%M:%S %p')
        handler.setFormatter(log_format)
        logger.addHandler(handler)

        self._zk = kazoo.client.KazooClient(
            hosts=','.join(zk_endpts),
            handler=kazoo.handlers.gevent.SequentialGeventHandler(),
            logger=logger)
        self._logger = logger

        # connect
        self.connect()

        if reset_config:
            self.delete_node("/services", recursive=True)
            self.delete_node("/clients", recursive=True)
            self.delete_node("/election", recursive=True)

        # create default paths
        self.create_node("/services")
        self.create_node("/clients")
        self.create_node("/election")

        self._debug = {
            'subscription_expires': 0,
            'oos_delete': 0,
            'db_excepts': 0,
        }

    # end __init__

    # Discovery server used for syslog, cleanup etc
    def set_ds(self, discServer):
        self._ds = discServer

    # end set_ds

    def is_restarting(self):
        return self._restarting

    # end is_restarting

    # restart
    def restart(self):
        self._zk_sem.acquire()
        self._restarting = True
        self.syslog("restart: acquired lock; state %s " % self._zk.state)
        # initiate restart if our state is suspended or lost
        if self._zk.state != "CONNECTED":
            self.syslog("restart: starting ...")
            try:
                self._zk.stop()
                self._zk.close()
                self._zk.start()
                self.syslog("restart: done")
            except:
                e = sys.exc_info()[0]
                self.syslog('restart: exception %s' % str(e))
        self._restarting = False
        self._zk_sem.release()

    # start
    def connect(self):
        while True:
            try:
                self._zk.start()
                break
            except gevent.event.Timeout as e:
                self.syslog(
                    'Failed to connect with Zookeeper -will retry in a second')
                gevent.sleep(1)
            # Zookeeper is also throwing exception due to delay in master election
            except Exception as e:
                self.syslog('%s -will retry in a second' % (str(e)))
                gevent.sleep(1)
        self.syslog('Connected to ZooKeeper!')

    # end

    def start_background_tasks(self):
        # spawn loop to expire subscriptions
        gevent.Greenlet.spawn(self.inuse_loop)

        # spawn loop to expire services
        gevent.Greenlet.spawn(self.service_oos_loop)

    # end

    def syslog(self, log_msg):
        if self._logger is None:
            return
        self._logger.info(log_msg)

    # end

    def get_debug_stats(self):
        return self._debug

    # end

    def _zk_listener(self, state):
        if state == "CONNECTED":
            self._election.cancel()

    # end

    def _zk_election_callback(self, func, *args, **kwargs):
        self._zk.remove_listener(self._zk_listener)
        func(*args, **kwargs)

    # end

    def master_election(self, path, identifier, func, *args, **kwargs):
        self._zk.add_listener(self._zk_listener)
        while True:
            self._election = self._zk.Election(path, identifier)
            self._election.run(self._zk_election_callback, func, *args,
                               **kwargs)

    # end master_election

    def create_node(self, path, value='', makepath=True, sequence=False):
        value = str(value)
        while True:
            try:
                return self._zk.set(path, value)
            except kazoo.exceptions.NoNodeException:
                self.syslog('create %s' % (path))
                return self._zk.create(path,
                                       value,
                                       makepath=makepath,
                                       sequence=sequence)
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()

    # end create_node

    def get_children(self, path):
        while True:
            try:
                return self._zk.get_children(path)
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()
            except Exception:
                return []

    # end get_children

    def read_node(self, path):
        while True:
            try:
                data, stat = self._zk.get(path)
                return data, stat
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()
            except kazoo.exceptions.NoNodeException:
                self.syslog('exc read: node %s does not exist' % path)
                return (None, None)

    # end read_node

    def delete_node(self, path, recursive=False):
        while True:
            try:
                return self._zk.delete(path, recursive=recursive)
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()
            except kazoo.exceptions.NoNodeException:
                self.syslog('exc delete: node %s does not exist' % path)
                return None

    # end delete_node

    def exists_node(self, path):
        while True:
            try:
                return self._zk.exists(path)
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()

    # end exists_node

    def service_entries(self):
        service_types = self.get_children('/services')
        for service_type in service_types:
            services = self.get_children('/services/%s' % (service_type))
            for service_id in services:
                data, stat = self.read_node('/services/%s/%s' %
                                            (service_type, service_id))
                entry = json.loads(data)
                yield (entry)

    def subscriber_entries(self):
        service_types = self.get_children('/clients')
        for service_type in service_types:
            subscribers = self.get_children('/clients/%s' % (service_type))
            for client_id in subscribers:
                cl_entry = self.lookup_client(service_type, client_id)
                if cl_entry:
                    yield ((client_id, service_type))

    # end

    def update_service(self, service_type, service_id, data):
        path = '/services/%s/%s' % (service_type, service_id)
        self.create_node(path, value=json.dumps(data), makepath=True)

    # end

    def insert_service(self, service_type, service_id, data):

        # ensure election path for service type exists
        path = '/election/%s' % (service_type)
        self.create_node(path)

        # preclude duplicate service entry
        sid_set = set()

        # prevent background task from deleting node under our nose
        seq_list = self.get_children(path)
        # data for election node is service ID
        for sequence in seq_list:
            sid, stat = self.read_node('/election/%s/%s' %
                                       (service_type, sequence))
            if sid is not None:
                sid_set.add(sid)
        if not service_id in sid_set:
            path = '/election/%s/node-' % (service_type)
            pp = self.create_node(path,
                                  service_id,
                                  makepath=True,
                                  sequence=True)
            pat = path + "(?P<id>.*$)"
            mch = re.match(pat, pp)
            seq = mch.group('id')
            data['sequence'] = seq
            self.syslog('ST %s, SID %s not found! Added with sequence %s' %
                        (service_type, service_id, seq))

    # end insert_service

    # forget service and subscribers
    def delete_service(self, service_type, service_id, recursive=False):
        #if self.lookup_subscribers(service_type, service_id):
        #    return

        path = '/services/%s/%s' % (service_type, service_id)
        self.delete_node(path, recursive=recursive)

        # delete service node if all services gone
        path = '/services/%s' % (service_type)
        if self.get_children(path):
            return
        self.delete_node(path)

    #end delete_service

    def lookup_service(self, service_type, service_id=None):
        if not self.exists_node('/services/%s' % (service_type)):
            return None
        if service_id:
            data = None
            path = '/services/%s/%s' % (service_type, service_id)
            datastr, stat = self.read_node(path)
            if datastr:
                data = json.loads(datastr)
                clients = self.get_children(path)
                data['in_use'] = len(clients)
            return data
        else:
            r = []
            services = self.get_children('/services/%s' % (service_type))
            for service_id in services:
                entry = self.lookup_service(service_type, service_id)
                r.append(entry)
            return r

    # end lookup_service

    def query_service(self, service_type):
        path = '/election/%s' % (service_type)
        if not self.exists_node(path):
            return None
        seq_list = self.get_children(path)
        seq_list = sorted(seq_list)

        r = []
        for sequence in seq_list:
            service_id, stat = self.read_node('/election/%s/%s' %
                                              (service_type, sequence))
            entry = self.lookup_service(service_type, service_id)
            r.append(entry)
        return r

    # end

    # TODO use include_data available in new versions of kazoo
    # tree structure /services/<service-type>/<service-id>
    def get_all_services(self):
        r = []
        service_types = self.get_children('/services')
        for service_type in service_types:
            services = self.lookup_service(service_type)
            r.extend(services)
        return r

    # end

    def insert_client(self, service_type, service_id, client_id, blob, ttl):
        data = {'ttl': ttl, 'blob': blob}

        path = '/services/%s/%s/%s' % (service_type, service_id, client_id)
        self.create_node(path, value=json.dumps(data))

        path = '/clients/%s/%s/%s' % (service_type, client_id, service_id)
        self.create_node(path, value=json.dumps(data), makepath=True)

    # end insert_client

    def lookup_subscribers(self, service_type, service_id):
        path = '/services/%s/%s' % (service_type, service_id)
        if not self.exists_node(path):
            return None
        clients = self.get_children(path)
        return clients

    # end lookup_subscribers

    def lookup_client(self, service_type, client_id):
        try:
            datastr, stat = self.read_node('/clients/%s/%s' %
                                           (service_type, client_id))
            data = json.loads(datastr) if datastr else None
        except ValueError:
            self.syslog('raise ValueError st=%s, cid=%s' %
                        (service_type, client_id))
            data = None
        return data

    # end lookup_client

    def insert_client_data(self, service_type, client_id, cldata):
        path = '/clients/%s/%s' % (service_type, client_id)
        self.create_node(path, value=json.dumps(cldata), makepath=True)

    # end insert_client_data

    def lookup_subscription(self,
                            service_type,
                            client_id=None,
                            service_id=None,
                            include_meta=False):
        if not self.exists_node('/clients/%s' % (service_type)):
            return None
        if client_id and service_id:
            try:
                datastr, stat = self.read_node(
                    '/clients/%s/%s/%s' %
                    (service_type, client_id, service_id))
                data = json.loads(datastr)
                blob = data['blob']
                if include_meta:
                    return (blob, stat, data['ttl'])
                else:
                    return blob
            except kazoo.exceptions.NoNodeException:
                return None
        elif client_id:
            # our version of Kazoo doesn't support include_data :-(
            try:
                services = self.get_children('/clients/%s/%s' %
                                             (service_type, client_id))
                r = []
                for service_id in services:
                    datastr, stat = self.read_node(
                        '/clients/%s/%s/%s' %
                        (service_type, client_id, service_id))
                    if datastr:
                        data = json.loads(datastr)
                        blob = data['blob']
                        r.append((service_id, blob, stat))
                # sort services in the order of assignment to this client
                # (based on modification time)
                rr = sorted(r, key=lambda entry: entry[2].last_modified)
                return [(service_id, blob) for service_id, blob, stat in rr]
            except kazoo.exceptions.NoNodeException:
                return None
        else:
            clients = self.get_children('/clients/%s' % (service_type))
            return clients

    # end lookup_subscription

    # delete client subscription. Cleanup path if possible
    def delete_subscription(self, service_type, client_id, service_id):
        path = '/clients/%s/%s/%s' % (service_type, client_id, service_id)
        self.delete_node(path)

        path = '/services/%s/%s/%s' % (service_type, service_id, client_id)
        self.delete_node(path)

        # delete client node if all subscriptions gone
        path = '/clients/%s/%s' % (service_type, client_id)
        if self.get_children(path):
            return
        self.delete_node(path)

        # purge in-memory cache - ideally we are not supposed to know about
        # this
        self._ds.delete_sub_data(client_id, service_type)

        # delete service node if all clients gone
        path = '/clients/%s' % (service_type)
        if self.get_children(path):
            return
        self.delete_node(path)

    # end

    # TODO use include_data available in new versions of kazoo
    # tree structure /clients/<service-type>/<client-id>/<service-id>
    # return tuple (service_type, client_id, service_id)
    def get_all_clients(self):
        r = []
        service_types = self.get_children('/clients')
        for service_type in service_types:
            clients = self.get_children('/clients/%s' % (service_type))
            for client_id in clients:
                services = self.get_children('/clients/%s/%s' %
                                             (service_type, client_id))
                rr = []
                for service_id in services:
                    (datastr, stat,
                     ttl) = self.lookup_subscription(service_type,
                                                     client_id,
                                                     service_id,
                                                     include_meta=True)
                    rr.append((service_type, client_id, service_id,
                               stat.last_modified, ttl))
                rr = sorted(rr, key=lambda entry: entry[3])
                r.extend(rr)
        return r

    # end get_all_clients

    # reset in-use count of clients for each service
    def inuse_loop(self):
        while True:
            service_types = self.get_children('/clients')
            for service_type in service_types:
                clients = self.get_children('/clients/%s' % (service_type))
                for client_id in clients:
                    services = self.get_children('/clients/%s/%s' %
                                                 (service_type, client_id))
                    for service_id in services:
                        path = '/clients/%s/%s/%s' % (service_type, client_id,
                                                      service_id)
                        datastr, stat = self.read_node(path)
                        data = json.loads(datastr)
                        now = time.time()
                        exp_t = stat.last_modified + data['ttl'] +\
                            disc_consts.TTL_EXPIRY_DELTA
                        if now > exp_t:
                            self.delete_subscription(service_type, client_id,
                                                     service_id)
                            self.syslog('Expiring st:%s sid:%s cid:%s' %
                                        (service_type, service_id, client_id))
                            self._debug['subscription_expires'] += 1
            gevent.sleep(10)

    def service_oos_loop(self):
        if self._ds._args.hc_interval <= 0:
            return

        while True:
            for entry in self.service_entries():
                if not self._ds.service_expired(entry, include_down=False):
                    continue
                service_type = entry['service_type']
                service_id = entry['service_id']
                path = '/election/%s/node-%s' % (service_type,
                                                 entry['sequence'])
                if not self.exists_node(path):
                    continue
                self.syslog('Deleting sequence node %s for service %s:%s' %
                            (path, service_type, service_id))
                self.delete_node(path)
                entry['sequence'] = -1
                self.update_service(service_type, service_id, entry)
                self._debug['oos_delete'] += 1
            gevent.sleep(self._ds._args.hc_interval)
Пример #49
0
 def __init__(self, value = 1, maxSeconds = 10):
     self._semaphore = BoundedSemaphore(value)
     self._maxSeconds = maxSeconds
     self._timer = None
     self._leaked = 0
     self._stopped = False
Пример #50
0
    default=1024,
    help=
    'Beam width used in the CTC decoder when building candidate transcriptions. Default: 1024'
)
parser.add_argument('-p',
                    '--port',
                    default=8080,
                    help='Port to run server on. Default: 8080')
parser.add_argument('--debuglevel',
                    default=20,
                    help='Debug logging level. Default: 20')
ARGS = parser.parse_args()

logging.getLogger().setLevel(int(ARGS.debuglevel))

gSem = BoundedSemaphore(1)  # Only one Deepspeech instance available at a time

if os.path.isdir(ARGS.model):
    model_dir = ARGS.model
    ARGS.model = os.path.join(model_dir, 'model.pbmm')

LM_WEIGHT = ARGS.lw
VALID_WORD_COUNT_WEIGHT = ARGS.vwcw
BEAM_WIDTH = ARGS.bw

print('Initializing model...')
logger.info("ARGS.model: %s", ARGS.model)

# code for version deepspech version 0.7 and above
model = deepspeech.Model(ARGS.model)
Пример #51
0
class EchoNode:

    def __init__(self, api, token_address):
        assert isinstance(api, RaidenAPI)
        self.ready = Event()

        self.api = api
        self.token_address = token_address

        existing_channels = self.api.get_channel_list(
            api.raiden.default_registry.address,
            self.token_address,
        )

        open_channels = [
            channel_state
            for channel_state in existing_channels
            if channel.get_status(channel_state) == CHANNEL_STATE_OPENED
        ]

        if len(open_channels) == 0:
            token = self.api.raiden.chain.token(self.token_address)
            if not token.balance_of(self.api.raiden.address) > 0:
                raise ValueError('not enough funds for echo node %s for token %s' % (
                    pex(self.api.raiden.address),
                    pex(self.token_address),
                ))
            self.api.token_network_connect(
                self.api.raiden.default_registry.address,
                self.token_address,
                token.balance_of(self.api.raiden.address),
                initial_channel_target=10,
                joinable_funds_target=.5,
            )

        self.last_poll_offset = 0
        self.received_transfers = Queue()
        self.stop_signal = None  # used to signal REMOVE_CALLBACK and stop echo_workers
        self.greenlets = list()
        self.lock = BoundedSemaphore()
        self.seen_transfers = deque(list(), TRANSFER_MEMORY)
        self.num_handled_transfers = 0
        self.lottery_pool = Queue()
        # register ourselves with the raiden alarm task
        self.api.raiden.alarm.register_callback(self.echo_node_alarm_callback)
        self.echo_worker_greenlet = gevent.spawn(self.echo_worker)
        log.info('Echo node started')

    def echo_node_alarm_callback(self, block_number):
        """ This can be registered with the raiden AlarmTask.
        If `EchoNode.stop()` is called, it will give the return signal to be removed from
        the AlarmTask callbacks.
        """
        if not self.ready.is_set():
            self.ready.set()
        log.debug('echo_node callback', block_number=block_number)
        if self.stop_signal is not None:
            return REMOVE_CALLBACK
        else:
            self.greenlets.append(gevent.spawn(self.poll_all_received_events))
            return True

    def poll_all_received_events(self):
        """ This will be triggered once for each `echo_node_alarm_callback`.
        It polls all channels for `EventPaymentReceivedSuccess` events,
        adds all new events to the `self.received_transfers` queue and
        respawns `self.echo_node_worker`, if it died. """

        locked = False
        try:
            with Timeout(10):
                locked = self.lock.acquire(blocking=False)
                if not locked:
                    return
                else:
                    received_transfers = self.api.get_raiden_events_payment_history(
                        token_address=self.token_address,
                        offset=self.last_poll_offset,
                    )

                    # received transfer is a tuple of (block_number, event)
                    received_transfers = [
                        event
                        for event in received_transfers
                        if type(event) == EventPaymentReceivedSuccess
                    ]

                    for event in received_transfers:
                        transfer = copy.deepcopy(event)
                        self.received_transfers.put(transfer)

                    # set last_poll_block after events are enqueued (timeout safe)
                    if received_transfers:
                        self.last_poll_offset += len(received_transfers)

                    if not self.echo_worker_greenlet.started:
                        log.debug(
                            'restarting echo_worker_greenlet',
                            dead=self.echo_worker_greenlet.dead,
                            successful=self.echo_worker_greenlet.successful(),
                            exception=self.echo_worker_greenlet.exception,
                        )
                        self.echo_worker_greenlet = gevent.spawn(self.echo_worker)
        except Timeout:
            log.info('timeout while polling for events')
        finally:
            if locked:
                self.lock.release()

    def echo_worker(self):
        """ The `echo_worker` works through the `self.received_transfers` queue and spawns
        `self.on_transfer` greenlets for all not-yet-seen transfers. """
        log.debug('echo worker', qsize=self.received_transfers.qsize())
        while self.stop_signal is None:
            if self.received_transfers.qsize() > 0:
                transfer = self.received_transfers.get()
                if transfer in self.seen_transfers:
                    log.debug(
                        'duplicate transfer ignored',
                        initiator=pex(transfer.initiator),
                        amount=transfer.amount,
                        identifier=transfer.identifier,
                    )
                else:
                    self.seen_transfers.append(transfer)
                    self.greenlets.append(gevent.spawn(self.on_transfer, transfer))
            else:
                gevent.sleep(.5)

    def on_transfer(self, transfer):
        """ This handles the echo logic, as described in
        https://github.com/raiden-network/raiden/issues/651:

            - for transfers with an amount that satisfies `amount % 3 == 0`, it sends a transfer
            with an amount of `amount - 1` back to the initiator
            - for transfers with a "lucky number" amount `amount == 7` it does not send anything
            back immediately -- after having received "lucky number transfers" from 7 different
            addresses it sends a transfer with `amount = 49` to one randomly chosen one
            (from the 7 lucky addresses)
            - consecutive entries to the lucky lottery will receive the current pool size as the
            `echo_amount`
            - for all other transfers it sends a transfer with the same `amount` back to the
            initiator """
        echo_amount = 0
        if transfer.amount % 3 == 0:
            log.info(
                'ECHO amount - 1',
                initiator=pex(transfer.initiator),
                amount=transfer.amount,
                identifier=transfer.identifier,
            )
            echo_amount = transfer.amount - 1

        elif transfer.amount == 7:
            log.info(
                'ECHO lucky number draw',
                initiator=pex(transfer.initiator),
                amount=transfer.amount,
                identifier=transfer.identifier,
                poolsize=self.lottery_pool.qsize(),
            )

            # obtain a local copy of the pool
            pool = self.lottery_pool.copy()
            tickets = [pool.get() for _ in range(pool.qsize())]
            assert pool.empty()
            del pool

            if any(ticket.initiator == transfer.initiator for ticket in tickets):
                assert transfer not in tickets
                log.debug(
                    'duplicate lottery entry',
                    initiator=pex(transfer.initiator),
                    identifier=transfer.identifier,
                    poolsize=len(tickets),
                )
                # signal the poolsize to the participant
                echo_amount = len(tickets)

            # payout
            elif len(tickets) == 6:
                log.info('payout!')
                # reset the pool
                assert self.lottery_pool.qsize() == 6
                self.lottery_pool = Queue()
                # add new participant
                tickets.append(transfer)
                # choose the winner
                transfer = random.choice(tickets)
                echo_amount = 49
            else:
                self.lottery_pool.put(transfer)

        else:
            log.debug(
                'echo transfer received',
                initiator=pex(transfer.initiator),
                amount=transfer.amount,
                identifier=transfer.identifier,
            )
            echo_amount = transfer.amount

        if echo_amount:
            log.debug(
                'sending echo transfer',
                target=pex(transfer.initiator),
                amount=echo_amount,
                orig_identifier=transfer.identifier,
                echo_identifier=transfer.identifier + echo_amount,
                token_address=pex(self.token_address),
                num_handled_transfers=self.num_handled_transfers + 1,
            )

            self.api.transfer(
                self.api.raiden.default_registry.address,
                self.token_address,
                echo_amount,
                transfer.initiator,
                identifier=transfer.identifier + echo_amount,
            )
        self.num_handled_transfers += 1

    def stop(self):
        self.stop_signal = True
        self.greenlets.append(self.echo_worker_greenlet)
        gevent.joinall(self.greenlets, raise_error=True)
Пример #52
0
from gevent import monkey
monkey.patch_all()
import gevent
from gevent import pool
import json
import logging
import fit
import socket
import numpy as np
import simtk.unit as unit
from jinja2 import Template
import matplotlib.pyplot as plt
import evb
from gevent.lock import BoundedSemaphore
sem = BoundedSemaphore(200)


class ParamFail(BaseException):
    pass


class InitFail(BaseException):
    pass


class EVBServer(object):

    def __init__(self, port):
        self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self._port = port
        self.s.bind(("127.0.0.1", port))
Пример #53
0
class LibMLK(object):
    def __init__(self, serverIP, Plist):
        self.serverIP = serverIP
        self.baseUrl = "http://%s/" % serverIP
        self.crypt = Account(Plist.userID, Plist.devideToken)
        self.mlkLock = BoundedSemaphore(200)
        self.IID = Plist.iid
        self.VID = Plist.vid


    def __init__(self):
        pass

    @property
    def headers(self):
        return {
            "VID": self.VID,
            "PID": "-",
            "IID": self.IID,
            "DEVICE_INFO": "iPad2,1:::iPhone OS 8.1.2",
            "Device": "ios",
            "AppVersion": 28,
            "APP_ID_3": self.crypt.deviceToken,
            "APP_ID_2": self.crypt.hashedUserID,
            "APP_ID_1": self.crypt.cryptedUserID,
            "Encrypted": True,
            "User-Agent": "toto/1.1.25.2 CFNetwork/711.1.16 Darwin/14.0.0",
            "Accept-Language": "zh-cn",
            "Accept": "application/json"
        }

    def _post(self, url, params={}, data={}):
        data["_method"] = "GET"
        data = urllib.urlencode(data)
        data = self.crypt.encrypt(data)
        url = urlparse.urljoin(self.baseUrl, url)
        if len(params) > 0:
            e = self.crypt.encrypt(urllib.urlencode(params)).encode("base64").replace("\n", "")
            url = "%s?e=%s" % (url, e)
        ret = None
        try:
            self.mlkLock.acquire()
            ret = requests.post(url, data=data, headers=self.headers, proxies=proxies)
        except:
            traceback.print_exc()
        finally:
            self.mlkLock.release()
        if ret is None:
            raise BaseException()
        if "encrypted" in ret.headers and ret.headers["encrypted"] == "true":
            rtn = self.crypt.decrypt(ret.content)
        else:
            rtn = ret.content
        return rtn

    def get(self, url, params={}, data={}):
        url = urlparse.urlparse(url)
        path = url.path
        query = dict(urlparse.parse_qsl(url.query))
        query.update(params)
        return self._post(path, params=query, data=data)

    def setUsername(self, name):
        ret = self._post("users/update", data={"user_name": name})
        self.user_name = name
        return json.loads(ret)

    def finishTutorial(self):
        ret = self._post("users/update",
                         data={"user_name": self.user_name, "tutorial_finish": True})
        return json.loads(ret)

    def getMessages(self, page_type="Home"):
        params = {
            "last_read_at": int(time.time()),
            "page_type": page_type
        }
        ret = self._post("users/messages", params=params)
        return json.loads(ret)

    def getStages(self):
        ret = self._post("stages")
        return json.loads(ret)

    def getAreas(self, stage_id):
        ret = self._post("areas", params={"stage_id": stage_id})
        return json.loads(ret)

    def getMonsters(self):
        ret = self._post("user_monsters")
        return json.loads(ret)

    def getDecks(self):
        ret = self._post("user_decks")
        return json.loads(ret)

    def getUnits(self):
        ret = self._post("user_units")
        return json.loads(ret)

    def receiveLoginBonus(self):
        ret = self._post("users/receive_login_bonus")
        return json.loads(ret)

    def getLoginRewardList(self):
        ret = self._post("accu_login_activity")
        return json.loads(ret)

    def receiveLoginReward(self, day):
        params = {"day": day}
        ret = self._post("accu_login_activity/fetch_rewards", params=params)
        return json.loads(ret)

    def getRewardList(self):
        ret = self._post("user_presents")
        return json.loads(ret)

    def reward(self, uuid):
        params = {"uuid": uuid}
        ret = self._post("user_presents/receive", params)
        return json.loads(ret)

    def rewardAll(self):
        ret = self._post("user_presents/receive")
        return json.loads(ret)

    def getUserData(self):
        ret = self._post("users/preset_data.json?tutorial_session=true")
        return json.loads(ret)

    def gacha(self, gacha_id, num):
        params = {"id": gacha_id, "count": num}
        ret = self._post("gachas/execute", params=params)
        return json.loads(ret)

    def getUnitList(self):
        ret = self._post("user_units")
        return json.loads(ret)

    def quest(self, quest_id, party_id="001", difficulty_id="normal"):
        params = {
            "base": "Quest/Quest",
            "difficulty_id": difficulty_id,
            "id": quest_id,
            "mode": "quest",
            "name": "Quest",
            "party_id": party_id,
            "tipsLoading": "true",
        }
        ret = self._post("quests/execute/%s.json" % quest_id, params=params)
        ret = json.loads(ret)
        result_url = ret["result_url"]
        if "ap_use_url" in ret:
            ap_use_url = ret["ap_use_url"]
            self.get(ap_use_url)
        time.sleep(30)
        ret = self.get(result_url, params={"time": "27.1234"})
        return ret
Пример #54
0
class Auction(ESCODBServiceMixin, RequestIDServiceMixin, EscoAuditServiceMixin,
              ESCOBiddersServiceMixin, DateTimeServiceMixin, EscoStagesMixin,
              EscoPostAuctionMixin):
    """ESCO Auction Worker Class"""
    def __init__(self,
                 tender_id,
                 worker_defaults,
                 auction_data={},
                 lot_id=None):
        self.generate_request_id()
        self.tender_id = tender_id
        self.lot_id = lot_id
        if lot_id:
            self.auction_doc_id = tender_id + "_" + lot_id
        else:
            self.auction_doc_id = tender_id
        self.tender_url = urljoin(
            worker_defaults["resource_api_server"],
            '/api/{}/{}/{}'.format(worker_defaults["resource_api_version"],
                                   worker_defaults["resource_name"],
                                   self.tender_id))
        if auction_data:
            self.debug = True
            LOGGER.setLevel(logging.DEBUG)
            self._auction_data = auction_data
        else:
            self.debug = False
        self._end_auction_event = Event()
        self.bids_actions = BoundedSemaphore()
        self.session = RequestsSession()
        self.worker_defaults = worker_defaults
        if self.worker_defaults.get('with_document_service', False):
            self.session_ds = RequestsSession()
        self._bids_data = {}
        self.db = Database(str(self.worker_defaults["COUCH_DATABASE"]),
                           session=Session(retry_delays=range(10)))
        self.audit = {}
        self.retries = 10
        self.bidders_count = 0
        self.bidders_data = []
        self.bidders_features = {}
        self.bidders_coeficient = {}
        self.features = None
        self.mapping = {}
        self.rounds_stages = []

    def schedule_auction(self):
        self.generate_request_id()
        self.get_auction_document()
        if self.debug:
            LOGGER.info("Get _auction_data from auction_document")
            self._auction_data = self.auction_document.get(
                'test_auction_data', {})
        self.get_auction_info()
        self.prepare_audit()
        self.prepare_auction_stages()
        self.save_auction_document()
        round_number = 0
        SCHEDULER.add_job(self.start_auction,
                          'date',
                          kwargs={"switch_to_round": round_number},
                          run_date=self.convert_datetime(
                              self.auction_document['stages'][0]['start']),
                          name="Start of Auction",
                          id="Start of Auction")
        round_number += 1

        SCHEDULER.add_job(self.end_first_pause,
                          'date',
                          kwargs={"switch_to_round": round_number},
                          run_date=self.convert_datetime(
                              self.auction_document['stages'][1]['start']),
                          name="End of Pause Stage: [0 -> 1]",
                          id="End of Pause Stage: [0 -> 1]")
        round_number += 1
        for index in xrange(2, len(self.auction_document['stages'])):
            if self.auction_document['stages'][index - 1]['type'] == 'bids':
                SCHEDULER.add_job(
                    self.end_bids_stage,
                    'date',
                    kwargs={"switch_to_round": round_number},
                    run_date=self.convert_datetime(
                        self.auction_document['stages'][index]['start']),
                    name="End of Bids Stage: [{} -> {}]".format(
                        index - 1, index),
                    id="End of Bids Stage: [{} -> {}]".format(
                        index - 1, index))
            elif self.auction_document['stages'][index - 1]['type'] == 'pause':
                SCHEDULER.add_job(
                    self.next_stage,
                    'date',
                    kwargs={"switch_to_round": round_number},
                    run_date=self.convert_datetime(
                        self.auction_document['stages'][index]['start']),
                    name="End of Pause Stage: [{} -> {}]".format(
                        index - 1, index),
                    id="End of Pause Stage: [{} -> {}]".format(
                        index - 1, index))
            round_number += 1
        LOGGER.info("Prepare server ...",
                    extra={
                        "JOURNAL_REQUEST_ID": self.request_id,
                        "MESSAGE_ID": AUCTION_WORKER_SERVICE_PREPARE_SERVER
                    })
        self.server = run_server(
            self,
            self.convert_datetime(
                self.auction_document['stages'][-2]['start']),
            LOGGER,
            form_handler=form_handler,
            bids_form=BidsForm,
            cookie_path="esco-tenders")

    def wait_to_end(self):
        self._end_auction_event.wait()
        LOGGER.info("Stop auction worker",
                    extra={
                        "JOURNAL_REQUEST_ID": self.request_id,
                        "MESSAGE_ID":
                        AUCTION_WORKER_SERVICE_STOP_AUCTION_WORKER
                    })

    def start_auction(self, switch_to_round=None):
        self.generate_request_id()
        self.audit['timeline']['auction_start']['time'] = datetime.now(
            tzlocal()).isoformat()
        LOGGER.info('---------------- Start auction ----------------',
                    extra={
                        "JOURNAL_REQUEST_ID": self.request_id,
                        "MESSAGE_ID": AUCTION_WORKER_SERVICE_START_AUCTION
                    })
        self.get_auction_info()
        self.get_auction_document()
        # Initital Bids
        bids = deepcopy(self.bidders_data)
        self.auction_document["initial_bids"] = []
        bids_info = sorting_start_bids_by_amount(bids, features=self.features)
        for index, bid in enumerate(bids_info):
            amount = str(Fraction(bid["value"]["amountPerformance"]))
            audit_info = {
                "bidder":
                bid["id"],
                "date":
                bid["date"],
                "amount":
                amount,
                "contractDuration": {
                    "years": bid["value"]["contractDuration"]["years"],
                    "days": bid["value"]["contractDuration"]["days"],
                },
                "yearlyPaymentsPercentage":
                bid["value"]["yearlyPaymentsPercentage"]
            }
            if self.features:
                amount_features = cooking(amount,
                                          self.features,
                                          self.bidders_features[bid["id"]],
                                          reverse=True)
                coeficient = self.bidders_coeficient[bid["id"]]
                audit_info["amount_features"] = str(amount_features)
                audit_info["coeficient"] = str(coeficient)
            else:
                coeficient = None
                amount_features = None

            self.audit['timeline']['auction_start']['initial_bids'].append(
                audit_info)
            self.auction_document["initial_bids"].append(
                prepare_initial_bid_stage(
                    time=bid["date"] if "date" in bid else self.startDate,
                    bidder_id=bid["id"],
                    bidder_name=self.mapping[bid["id"]],
                    amount=amount,
                    coeficient=coeficient,
                    amount_features=amount_features,
                    contractDurationDays=bid["value"]["contractDuration"]
                    ["days"],
                    contractDurationYears=bid["value"]["contractDuration"]
                    ["years"],
                    yearlyPaymentsPercentage=bid["value"]
                    ["yearlyPaymentsPercentage"],
                    annualCostsReduction=bid["value"]["annualCostsReduction"]))
        if isinstance(switch_to_round, int):
            self.auction_document["current_stage"] = switch_to_round
        else:
            self.auction_document["current_stage"] = 0

        all_bids = deepcopy(self.auction_document["initial_bids"])
        minimal_bids = []
        for bid_info in self.bidders_data:
            minimal_bids.append(
                get_latest_bid_for_bidder(all_bids, str(bid_info['id'])))

        minimal_bids = self.filter_bids_keys(
            sorting_by_amount(minimal_bids, reverse=False))
        self.update_future_bidding_orders(minimal_bids)
        self.save_auction_document()

    def end_first_pause(self, switch_to_round=None):
        self.generate_request_id()
        LOGGER.info('---------------- End First Pause ----------------',
                    extra={
                        "JOURNAL_REQUEST_ID": self.request_id,
                        "MESSAGE_ID": AUCTION_WORKER_SERVICE_END_FIRST_PAUSE
                    })
        self.bids_actions.acquire()
        self.get_auction_document()

        if isinstance(switch_to_round, int):
            self.auction_document["current_stage"] = switch_to_round
        else:
            self.auction_document["current_stage"] += 1

        self.save_auction_document()
        self.bids_actions.release()

    def end_auction(self):
        LOGGER.info('---------------- End auction ----------------',
                    extra={
                        "JOURNAL_REQUEST_ID": self.request_id,
                        "MESSAGE_ID": AUCTION_WORKER_SERVICE_END_AUCTION
                    })
        LOGGER.debug("Stop server",
                     extra={"JOURNAL_REQUEST_ID": self.request_id})
        if self.server:
            self.server.stop()
        LOGGER.debug("Clear mapping",
                     extra={"JOURNAL_REQUEST_ID": self.request_id})
        delete_mapping(self.worker_defaults, self.auction_doc_id)

        start_stage, end_stage = self.get_round_stages(ROUNDS)
        minimal_bids = deepcopy(
            self.auction_document["stages"][start_stage:end_stage])
        minimal_bids = self.filter_bids_keys(
            sorting_by_amount(minimal_bids, reverse=False))
        self.auction_document["results"] = []
        for item in minimal_bids:
            self.auction_document["results"].append(
                prepare_results_stage(**item))
        self.auction_document["current_stage"] = (
            len(self.auction_document["stages"]) - 1)
        LOGGER.debug(' '.join(
            ('Document in end_stage: \n',
             yaml_dump(json.loads(dumps(self.auction_document))))),
                     extra={"JOURNAL_REQUEST_ID": self.request_id})
        self.approve_audit_info_on_announcement()
        LOGGER.info('Audit data: \n {}'.format(
            yaml_dump(json.loads(dumps(self.audit)))),
                    extra={"JOURNAL_REQUEST_ID": self.request_id})
        if self.debug:
            LOGGER.debug('Debug: put_auction_data disabled !!!',
                         extra={"JOURNAL_REQUEST_ID": self.request_id})
            sleep(10)
            self.save_auction_document()
        else:
            if self.put_auction_data():
                self.save_auction_document()
        LOGGER.debug("Fire 'stop auction worker' event",
                     extra={"JOURNAL_REQUEST_ID": self.request_id})

    def cancel_auction(self):
        self.generate_request_id()
        if self.get_auction_document():
            LOGGER.info(
                "Auction {} canceled".format(self.auction_doc_id),
                extra={'MESSAGE_ID': AUCTION_WORKER_SERVICE_AUCTION_CANCELED})
            self.auction_document["current_stage"] = -100
            self.auction_document["endDate"] = datetime.now(
                tzlocal()).isoformat()
            LOGGER.info("Change auction {} status to 'canceled'".format(
                self.auction_doc_id),
                        extra={
                            'MESSAGE_ID':
                            AUCTION_WORKER_SERVICE_AUCTION_STATUS_CANCELED
                        })
            self.save_auction_document()
        else:
            LOGGER.info(
                "Auction {} not found".format(self.auction_doc_id),
                extra={'MESSAGE_ID': AUCTION_WORKER_SERVICE_AUCTION_NOT_FOUND})

    def reschedule_auction(self):
        self.generate_request_id()
        if self.get_auction_document():
            LOGGER.info(
                "Auction {} has not started and will be rescheduled".format(
                    self.auction_doc_id),
                extra={
                    'MESSAGE_ID': AUCTION_WORKER_SERVICE_AUCTION_RESCHEDULE
                })
            self.auction_document["current_stage"] = -101
            self.save_auction_document()
        else:
            LOGGER.info(
                "Auction {} not found".format(self.auction_doc_id),
                extra={'MESSAGE_ID': AUCTION_WORKER_SERVICE_AUCTION_NOT_FOUND})
Пример #55
0
class Master(object):
    """ workers: a dictionary to store the information about workers
        format: {ip_port: ('Status', Remote_call)}

        jobs_tracker: a dictionary to store the information about jobs
        format:
        { task_name:
              { "mappers":
                      { mapper_id: [remote_call, mapper_ip_port, split_info, finished]},
                "reducers":
                      { reducer_id: [remote_call, reducer_ip_port, finished]},
                "num_mapper":
                      num_mapper,
                "num_reducer":
                      num_reducer,
                "task_file":
                      [filename, codes],
                "split_infos":
                      split_infos,
                "file_info":
                      file_info,
                "output_file:
                      output_file
                "done":
                      True/False
                 }
        }

        mapper_queue: free mapper queue
        format: [(ip_port, remote_call)]

        reducer_queue: free reducer queue
        format: [(ip_port, remote_call)]
    """

    def __init__(self, port, data_dir):
        gevent.spawn(self.controller)
        self.state = STATE_READY
        self.workers = {}
        self.jobs_tracker = {}
        self.port = port
        self.data_dir = data_dir
        self.mapper_queue = Queue()
        self.reducer_queue = Queue()
        self.jobs_tracker_lock = BoundedSemaphore(1)
        self.workers_lock = BoundedSemaphore(1)

    def controller(self):
        while True:
            print '[Master:%s] ' % self.state
            # down_workers = []
            # self.workers_lock.acquire()
            local_workers = dict(self.workers)
            for w in local_workers:
                print '(%s, %s)' % (w, local_workers[w][0])
                # not spawn a coroutine to ping this worker
                if local_workers[w][2] == False:
                    local_workers[w][2] = True
                    gevent.spawn(self.each_ping, w, local_workers[w][1])
            gevent.sleep(1)

    def each_ping(self, ip_port, c):
        alive = True
        while alive:
            try:
                c.ping()
            except:
                print "**** Error: Worker %s is down" % ip_port
                self.workers.pop(ip_port)
                print "********** Reassign jobs in %s" % ip_port
                gevent.spawn(self.reassign_job, [ip_port])
                alive = False
            gevent.sleep(1)

    # Failure tolerance
    # reassign the failure worker's job to another worker
    # remote = (ip_port, c)
    # def assign_mapper(self, split_info, task_file,
    #                  mapper_id, num_mapper, num_reducer, task_name, file_info):
    # def assign_reducer(self, task_file, num_mapper, reducer_id, output_file, task_name):
    def reassign_job(self, down_workers):
        self.jobs_tracker_lock.acquire()
        reassign_list = []
        for down_worker in down_workers:
            for task_name in self.jobs_tracker:
                # whether deal with failure after the job is done
                # if self.jobs_tracker[task_name]["done"] == False:
                job_dict = self.jobs_tracker[task_name]
                for mapper_id in job_dict["mappers"]:
                    if job_dict["mappers"][mapper_id][1] == down_worker:
                        print "********** down %s did %s mapper %d" % (down_worker, task_name, mapper_id)
                        job_dict["mappers"][mapper_id][3] = False
                        reassign_list.append([task_name, mapper_id, 0])
                for reducer_id in job_dict["reducers"]:
                    if job_dict["reducers"][reducer_id][1] == down_worker:
                        print "********** down %s did %s reducer %d" % (down_worker, task_name, reducer_id)
                        job_dict["reducers"][reducer_id][2] = False
                        reassign_list.append([task_name, reducer_id, 1])
        self.jobs_tracker_lock.release()
        for reassign in reassign_list:
            task_name = reassign[0]
            # Reassign mapper
            if reassign[2] == 0:
                mapper_id = reassign[1]
                print "********** Reassign %s mapper %d" % (task_name, mapper_id)
                gevent.spawn(self.reassign_mapper, mapper_id, task_name)
            # Reassign reducer
            elif reassign[2] == 1:
                reducer_id = reassign[1]
                self.jobs_tracker[task_name]["reducers"].pop(reducer_id)
                print "********** Reassign %s reducer %d" % (task_name, reducer_id)
                gevent.spawn(self.reassign_reducer, reducer_id, task_name)

    def register(self, ip_port):
        gevent.spawn(self.register_async, ip_port)
        return self.data_dir

    def register_async(self, ip_port):
        print '[Master:%s] ' % self.state,
        print 'Registered worker (%s)' % ip_port
        c = zerorpc.Client()
        c.connect("tcp://" + ip_port)
        # self.workers_lock.acquire()
        self.workers[ip_port] = [STATE_READY, c, False]
        # self.workers_lock.release()
        self.mapper_queue.put_nowait((ip_port, c))
        self.reducer_queue.put_nowait((ip_port, c))
        c.ping()

    # Master gets job from client, split input data according to the split size.
    # Then it assigns jobs to mappers and reducers.
    def do_job(self, task_file, split_size, num_reducer, input_file, output_file):
        # identical name for each task
        # use this name to generate intermediate filename
        task_name = time.strftime("%Y%m%d%H%M%S", time.localtime())
        gevent.spawn(self.do_job_async, task_file, split_size, num_reducer, input_file, output_file, task_name)
        return task_name

    # create a new coroutine to handle client's job
    def do_job_async(self, task_file, split_size, num_reducer, input_file, output_file, task_name):
        print "Task %s get" % task_file[0]
        split_size = int(split_size)
        num_reducer = int(num_reducer)
        split_infos, file_info = self.split_file(split_size, input_file)

        num_mapper = len(split_infos)

        # initialize jobs_tracker for task_name
        self.jobs_tracker[task_name] = {"mappers": {}, "reducers": {},
                                        "num_mapper": num_mapper, "num_reducer": num_reducer,
                                        "task_file": task_file, "split_infos": split_infos,
                                        "file_info": file_info, "output_file": output_file,
                                        "split_size": split_size, "done": False}

        print "Task " + task_name + " : assigning %d mappers, %d reducers" % (num_mapper, num_reducer)
        # Map task
        gevent.spawn(self.assign_mappers, task_name)
        # Reduce task
        gevent.spawn(self.assign_reducers, task_name)

    # Client gets report from master
    def client_query(self, task_name):
        # print "client_query for %s" % task_name
        job_tracker = self.jobs_tracker[task_name]
        mappers = job_tracker["mappers"]
        reducers = job_tracker["reducers"]
        needed_mapper = job_tracker["num_mapper"]
        needed_reducer = job_tracker["num_reducer"]

        finished_mapper_num = 0
        finished_reducer_num = 0
        for mapper in mappers:
            if mappers[mapper][3]:
                finished_mapper_num += 1

        for reducer in reducers:
            if reducers[reducer][2]:
                finished_reducer_num += 1

        result_dict = {"finished_mapper": finished_mapper_num, "assigned_mapper": len(mappers),
                       "needed_mapper": needed_mapper, "finished_reducer": finished_reducer_num,
                       "assigned_reducer": len(reducers), "needed_reducer": needed_reducer}
        if finished_reducer_num == needed_reducer:
            job_dict = self.jobs_tracker[task_name]
            print 'Task %s finished ' % task_name
            self.jobs_tracker[task_name]["done"] = True
        return result_dict

    # Split the input file and store the associated information
    # in a list which contains dictionary. Each dictionary is a Mapper's input.
    def split_file(self, split_size, input_file):
        """ One split only has one file.
            split_info = {0:[(file_name0, start, end)], 1:[(file_name1, start, end)]}
            One split may has more than one file.
            split_info =  {0:[(file_name0, start, end), (file_name1, start, end)],
                           1:[(file_name1, start, end)]}

            file_info = [(file0_path, file0_size), (file1_path, file1_size)]
        """
        split_info = {}
        file_info = []
        # Single file
        if not input_file.endswith('_'):
            file_path = self.data_dir + '/' + input_file
            file_size = os.path.getsize(file_path)
            split_num = int(math.ceil(float(file_size) / split_size))
            # Split file
            for i in range(split_num):
                split_info[i] = []
                start = i * split_size
                if (start + split_size) > file_size:
                    end = file_size
                else:
                    end = start + split_size
                split_info[i].append((file_path, start, end))
            file_info = [(file_path, file_size)]
        # Multiple files
        else:
            # Get all file name by the base name
            # and calculate the total file size.
            # file_info = [[file_dir1, file_size], [file_dir2, file_size], ...]
            total_size = 0
            for root, dir_names, file_names in os.walk(self.data_dir):
                for file_name in fnmatch.filter(file_names, input_file + '*'):
                    dir_file = root + '/' + file_name
                    one_file_size = os.path.getsize(dir_file)
                    total_size += one_file_size
                    file_info.append((dir_file, one_file_size))

            # Get worker num(split num)
            split_num = int(math.ceil(float(total_size) / split_size))

            # Split file
            start = 0
            used_file = 0
            for i in range(split_num):
                remaining_size = split_size
                split_info[i] = []
                while remaining_size > 0:
                    current_file_name = file_info[used_file][0]
                    current_file_size = file_info[used_file][1]
                    # Required remaining_size <= file remaining_size
                    if remaining_size <= (current_file_size - start):
                        split_info[i].append((current_file_name, start, start + remaining_size))
                        if remaining_size == current_file_size - start:
                            start = 0
                            used_file += 1
                        else:
                            start = start + remaining_size
                        remaining_size = 0
                    # Required remaining_size > file remaining_size
                    else:
                        if used_file < len(file_info) - 1:
                            split_info[i].append((current_file_name, start, current_file_size))
                            remaining_size -= current_file_size - start
                            start = 0
                            used_file += 1

                        # This is the last file, then finish split
                        else:
                            split_info[i].append((current_file_name, start, current_file_size))
                            remaining_size = 0
        return split_info, file_info

    # Assign map jobs to free mappers
    def assign_mappers(self, task_name):
        num_mapper = self.jobs_tracker[task_name]["num_mapper"]
        for mapper_id in range(num_mapper):
            ip_port, c = self.mapper_queue.get()
            # Handle failure before assign task.
            while ip_port not in self.workers:
                ip_port, c = self.mapper_queue.get()
            print "Task " + task_name + " : mappers id %d assigned to %s" % (mapper_id, ip_port)
            gevent.spawn(self.assign_mapper, ip_port, c, mapper_id, task_name)

    # Assign map job to a single free mapper
    # After the mapper finished its map job, return back to free mapper queue
    # and notify all reducers to fetch intermediate data
    def assign_mapper(self, ip_port, c, mapper_id, task_name):
        job_dict = self.jobs_tracker[task_name]
        split_info = job_dict["split_infos"][mapper_id]
        task_file = job_dict["task_file"]
        num_mapper = job_dict["num_mapper"]
        num_reducer = job_dict["num_reducer"]
        file_info = job_dict["file_info"]
        split_size = job_dict["split_size"]

        self.jobs_tracker_lock.acquire()
        self.jobs_tracker[task_name]["mappers"][mapper_id] = [c, ip_port, split_info, False]
        self.jobs_tracker_lock.release()
        try:
            success = c.do_map(split_info, task_file,
                               mapper_id, num_mapper, num_reducer, task_name, file_info, split_size)
        except:
            print "**** Error: Can't assign task %s map task to mapper %d %s" \
                  % (task_name, mapper_id, ip_port)

    def mapper_finish(self, success, task_name, mapper_id, ip_port):
        if success:
            """ jobs_tracker =
             { task_name:
              { "mappers":
                      { mapper_id: [remote_call, mapper_ip_port, split_info, finished]}
            """
            self.jobs_tracker[task_name]["mappers"][mapper_id][3] = True
            print "Task %s : mapper %d finished" % (task_name, mapper_id)

            self.jobs_tracker_lock.acquire()
            reducers_dict = self.jobs_tracker[task_name]["reducers"]
            for reducer_id in reducers_dict:
                reducer_c = reducers_dict[reducer_id][0]
                print "mapper %d is notifying reducer %d" % (mapper_id, reducer_id)
                try:
                    reducer_c.notify_mapper_finish(mapper_id, ip_port)
                except:
                    print "**** Error: Task %s mapper %d can't notify reducer %d %s" \
                          % (task_name, mapper_id, reducer_id, reducers_dict[reducer_id][1])
                print "Mapper %d is notifying reducer %d done" % (mapper_id, reducer_id)
            self.jobs_tracker_lock.release()
        else:
            print "Task %s : mapper %d failed" % (task_name, mapper_id)
        if ip_port in self.workers:
            print "%s returns to free mapper queue." % ip_port
            self.mapper_queue.put_nowait((ip_port, self.workers[ip_port][1]))

    # Assign reduce jobs to free reducers
    def assign_reducers(self, task_name):
        num_reducer = self.jobs_tracker[task_name]["num_reducer"]
        procs = []
        for i in range(num_reducer):
            ip_port, c = self.reducer_queue.get()
            while ip_port not in self.workers:
                ip_port, c = self.reducer_queue.get()
            print "Task " + task_name + " : reducer id %d assigned to %s" % (i, ip_port)
            proc = gevent.spawn(self.assign_reducer, ip_port, c, i, task_name)
            procs.append(proc)

    # Assign one reduce job to one reducer
    def assign_reducer(self, ip_port, c, reducer_id, task_name):
        task_file = self.jobs_tracker[task_name]["task_file"]
        num_mapper = self.jobs_tracker[task_name]["num_mapper"]
        output_file = self.jobs_tracker[task_name]["output_file"]

        self.jobs_tracker_lock.acquire()
        self.jobs_tracker[task_name]["reducers"][reducer_id] = [c, ip_port, False]
        for mapper_id in self.jobs_tracker[task_name]["mappers"]:
            if self.jobs_tracker[task_name]["mappers"][mapper_id][3]:
                c.notify_mapper_finish(mapper_id, self.jobs_tracker[task_name]["mappers"][mapper_id][1])
        self.jobs_tracker_lock.release()
        try:
            c.do_reduce(task_file, num_mapper, reducer_id, output_file, task_name)
        except:
            print "**** Error: Can't assign task %s reduce task to reducer %d %s" \
                  % (task_name, reducer_id, ip_port)

    def reducer_finish(self, success, task_name, reducer_id, ip_port):
        if success:
            """ jobs_tracker =
                { task_name:
                  { "reducers":
                          { reducer_id: [remote_call, reducer_ip_port, finished]}
            """
            self.jobs_tracker[task_name]["reducers"][reducer_id][2] = True
            print "Task %s : reducer %d finished" % (task_name, reducer_id)
        else:
            print "Task %s : reducer %d failed" % (task_name, reducer_id)
        if ip_port in self.workers:
            self.reducer_queue.put_nowait((ip_port, self.workers[ip_port][1]))
            print "%s returns to free reducer queue." % ip_port

    # Reassign one map job to one mapper
    def reassign_mapper(self, mapper_id, task_name):
        ip_port, c = self.mapper_queue.get()
        while ip_port not in self.workers:
            ip_port, c = self.mapper_queue.get()
        print "Reassign Task %s : mappers id %d to %s" % (task_name, mapper_id, ip_port)

        self.assign_mapper(ip_port, c, mapper_id, task_name)

    # Reassign one reduce job to one reducer
    def reassign_reducer(self, reducer_id, task_name):
        job_dict = self.jobs_tracker[task_name]
        ip_port, c = self.reducer_queue.get()
        while ip_port not in self.workers:
            ip_port, c = self.reducer_queue.get()
        print "Reassign Task %s : reducer id %d to %s" % (task_name, reducer_id, ip_port)
        self.assign_reducer(ip_port, c, reducer_id, task_name)

    # Collector get result from master
    def get_result(self, filename_base):
        print "Receive collect command: collect " + filename_base
        keys = self.jobs_tracker.keys()
        keys.sort(reverse=True)
        for task_name in keys:
            if self.jobs_tracker[task_name]["output_file"] == filename_base:

                job_dict = self.jobs_tracker[task_name]
                for mapper_id in job_dict["mappers"]:
                    try:
                        job_dict["mappers"][mapper_id][0]\
                            .remove_intermediate_file(task_name, mapper_id, job_dict["num_reducer"])
                    except:
                        print "**** Error: task %s: mapper %d lost connection" % (task_name, mapper_id)
                print "collect " + filename_base + " from " + task_name
                job_dict = self.jobs_tracker[task_name]
                result = ""
                for reducer_id in job_dict["reducers"]:
                    result += job_dict["reducers"][reducer_id][0]\
                        .fetch_result_file(filename_base, reducer_id)
                self.jobs_tracker.pop(task_name, None)
                return True, result
        print "Error: Can't find a job with output: " + filename_base
        return False, ''