コード例 #1
0
ファイル: server.py プロジェクト: 831jsh/xcat-core
class XCATMessager(utils.Messager):
    def __init__(self, sock):
        self.sock = sock
        self.sem = BoundedSemaphore(1)

    def _send(self, d):
        buf = json.dumps(d)
        self.sem.acquire()
        self.sock.sendall(utils.int2bytes(len(buf)) + buf.encode('utf-8'))
        self.sem.release()

    def info(self, msg):
        d = {'type': MSG_TYPE, 'msg': {'type': 'info', 'data': msg}}
        self._send(d)

    def warn(self, msg):
        d = {'type': MSG_TYPE, 'msg': {'type': 'warning', 'data': msg}}
        self._send(d)

    def error(self,  msg, node=''):
        d = {'type': MSG_TYPE, 'msg': {'type': 'error', 'node': node, 'data': msg}}
        self._send(d)

    def syslog(self, msg):
        d = {'type': MSG_TYPE, 'msg': {'type': 'syslog', 'data': msg}}
        self._send(d)

    def info_with_host(self, msg):
        d = {'type': MSG_TYPE, 'msg': {'type': 'info_with_host', 'data': msg}}
        self._send(d)

    def update_node_attributes(self, attribute, node, data):
        d = {'type': DB_TYPE, 'attribute': {'name': attribute, 'method': 'set', 'type': 'node', 'node': node, 'value': data}}
        self._send(d)
コード例 #2
0
ファイル: mw.py プロジェクト: SandStormHoldings/ScratchDocs
class Logger(BaseMiddleware):
    def __init__(self, link):
        self._sem = Semaphore(1)
        self.link = link

    def run(self, producer, request):

        import gevent.monkey, gevent.socket
        #gevent.monkey.patch_all()
        import socket

        assert socket.socket is gevent.socket.socket, "gevent monkey patch has not occurred"

        self._sem.acquire()
        try:
            print(
                json.dumps([
                    datetime.datetime.now().isoformat(), request.remote_addr,
                    request.method, request.path,
                    request.headers.get('Authorization')
                ]))
        finally:
            self._sem.release()
            pass
        return self.link(producer, request)
コード例 #3
0
ファイル: server.py プロジェクト: xcat2/xcat-core
class XCATMessager(utils.Messager):
    def __init__(self, sock):
        self.sock = sock
        self.sem = BoundedSemaphore(1)

    def _send(self, d):
        buf = json.dumps(d)
        self.sem.acquire()
        self.sock.sendall(utils.int2bytes(len(buf)) + buf.encode('utf-8'))
        self.sem.release()

    def info(self, msg):
        d = {'type': MSG_TYPE, 'msg': {'type': 'info', 'data': msg}}
        self._send(d)

    def warn(self, msg):
        d = {'type': MSG_TYPE, 'msg': {'type': 'warning', 'data': msg}}
        self._send(d)

    def error(self,  msg, node=''):
        d = {'type': MSG_TYPE, 'msg': {'type': 'error', 'node': node, 'data': msg}}
        self._send(d)

    def syslog(self, msg):
        d = {'type': MSG_TYPE, 'msg': {'type': 'syslog', 'data': msg}}
        self._send(d)

    def info_with_host(self, msg):
        d = {'type': MSG_TYPE, 'msg': {'type': 'info_with_host', 'data': msg}}
        self._send(d)

    def update_node_attributes(self, attribute, node, data):
        d = {'type': DB_TYPE, 'attribute': {'name': attribute, 'method': 'set', 'type': 'node', 'node': node, 'value': data}}
        self._send(d)
コード例 #4
0
class Cluster(object):
    """
        client-side cluster
        manage all client
    """
    def __init__(self):
        """Cluster 抽象"""
        self.sem = BoundedSemaphore(1)
        self._clients = {}
        self.init_config()

    def get_client(self, service) -> Client:
        if not service in self._clients:
            self.sem.acquire()
            if not service in self._clients:
                self._clients[service] = Client(self.context, service)
            self.sem.release()
        return self._clients[service]

    def init_config(self):
        bee_rpc_registry_conf = config.get("bee.rpc.registry")
        server = Server()
        server.__dict__ = bee_rpc_registry_conf
        self.registry = Builder.build(server)

        bee_rpc_client_conf = config.get("bee.rpc.client")
        if bee_rpc_client_conf != None:
            for k, v in dict(bee_rpc_client_conf).items():
                opts = ClientOptions()
                opts.name = k
                opts.cover(Map.from_dict(v))
                self._clients[k] = Client(opts=opts, registry=self.registry)
コード例 #5
0
class gevent_sem_sample:
    '''
    To understand how the semaphore works in different routines.
    Apply only 1 semaphore to lock the code block. 
    Once one thread acquired the semaphore, others threads will be blocked in acquire request until the acquired semaphore released.
    '''
    def __init__(self):
        self.sem = BoundedSemaphore(
            1)  # Only 1 code block can run at the same time.
        self.s = 0

    def worker1(self, n):
        print('Worker1 %i start' % n)
        self.sem.acquire()
        #gevent.sleep(0)
        print('Worker1 %i acquired semaphore' % n)
        self.s = 1
        print('Worker1 %i s=%d' % (n, self.s))
        sleep(10)
        self.sem.release()
        print('Worker1 %i released semaphore' % n)

    def worker2(self, n):
        print('Worker2 %i start' % n)

        with self.sem:
            print('Worker2 %i acquired semaphore' % n)
            self.s = 2
            print('Worker2 %i s=%d' % (n, self.s))
            #sleep(0)

        print('Worker2 %i released semaphore' % n)
コード例 #6
0
class ConnectionPool(object):
    def __init__(self,
                 dsn,
                 max_con=12,
                 max_idle=3,
                 connection_factory=psycopg2.extras.RealDictConnection):
        self.dsn = dsn
        self.max_con = max_con
        self.max_idle = max_idle
        self.connection_factory = connection_factory
        self._sem = Semaphore(max_con)
        self._free = []
        self._local = gevent_local()

    def __enter__(self):
        self._sem.acquire()
        try:
            if getattr(self._local, 'con', None) is not None:
                con = self._local.con
                print(
                    'WARNING: returning existing connection (re-entered connection pool)!'
                )
            if self._free:
                con = self._free.pop()
            else:
                con = psycopg2.connect(
                    dsn=self.dsn, connection_factory=self.connection_factory)
            self._local.con = con
            return con
        except:  # StandardError:
            #print('releasing')
            self._sem.release()
            #print('released')
            raise

    def __exit__(self, exc_type, exc_value, traceback):
        try:
            if self._local.con is None:
                raise RuntimeError("Exit connection pool with no connection?")
            if exc_type is not None:
                self.rollback()
            else:
                self.commit()
            if len(self._free) < self.max_idle:
                self._free.append(self._local.con)
            self._local.con = None
        finally:
            self._sem.release()
            #print('released')

    def commit(self):
        self._local.con.commit()

    def rollback(self):
        self._local.con.rollback()
コード例 #7
0
class Device:
    class EventError(Exception):
        pass

    def __init__(self, path: str, name: str, host: str, port: int):
        """ Constructor """
        self._name = name
        self.host = host
        self.port = port
        self.datadir = os.path.join(path, 'data')
        if not os.path.exists(self.datadir):
            os.mkdir(self.datadir)
        db_path = os.path.join(self.datadir, f'device_{self._name}.sqlite3')
        self.lock_db = BoundedSemaphore(1)
        self.db = sqlite3.connect(db_path)

    @property
    def name(self):
        return f'[DEV {self._name}]'

    @contextmanager
    def cursor(self):
        """ Cursor decorator with database lock """
        cursor = self.db.cursor()
        try:
            self.lock_db.acquire()
            yield cursor
            self.db.commit()
        finally:
            # noinspection PyBroadException
            try:
                if self.lock_db.locked():
                    self.lock_db.release()
            except Exception:
                pass
            # noinspection PyBroadException
            try:
                cursor.close()
            except Exception:
                pass

    def run(self):
        """ Run """
        raise NotImplementedError()

    def event(self, ts: datetime, value: TValue, retries: int = 0) -> int:
        """ Notify event """
        raise NotImplementedError()

    def _value(self, value: str) -> TValue:
        """ Convert value """
        raise NotImplementedError()
コード例 #8
0
class Factory():

    def __init__(self):
        self.sem = BoundedSemaphore(1)
        self.clients = Map()

    def open(self, name: str) -> Client:
        self.sem.acquire()
        client = self.clients.get(name)
        if client == None:
            client = self.create(name)
        self.sem.release()
        return client

    def create(self, name: str) -> Client:
        client = self.clients.get(name)
        if client != None:
            return client
        else:
            opts = self.load_options(name)
            if opts == None:
                return
            mode = Primary()
            if opts.read_preference == READ_PREFERENCE_PRIMARY_PREFERRED:
                mode = PrimaryPreferred()
            elif opts.read_preference == READ_PREFERENCE_SECONDARY:
                mode = Secondary()
            elif opts.read_preference == READ_PREFERENCE_SECONDARY_PREFERRED:
                mode = SecondaryPreferred()
            elif opts.read_preference == READ_PREFERENCE_NEAREST:
                mode = Nearest()
            kwargs = {
                "read_preference" : mode,
                "maxPoolSize" : opts.max_pool_size,
                "minPoolSize" : opts.min_pool_size,
                "socketTimeoutMS" : opts.socket_time_out,
                "connectTimeoutMS" : opts.connect_time_out
            }
            _client = MongoClient(host=opts.uri, **kwargs)
            client = Client(db=name, c=_client, opts=opts)
            self.clients.set(name, client)
            return client

    def load_options(self, name) -> Options:
        key = "bee.data.mongo." + name
        if not config.exist(key):
            return None

        opts = Options()
        bee_data_mongo_conf = config.get(key)
        opts.cover(bee_data_mongo_conf)
        return opts
コード例 #9
0
ファイル: monkey.py プロジェクト: anno1337/simple-requests
class _LeakySemaphore(object):
    def __init__(self, value = 1, maxSeconds = 10):
        self._semaphore = BoundedSemaphore(value)
        self._maxSeconds = maxSeconds
        self._timer = None
        self._leaked = 0
        self._stopped = False

    def _leak(self):
        sleep(self._maxSeconds)
        self._leaked += 1
        self._semaphore.release()

    @property
    def inUse(self):
        return self._semaphore._initial_value - self.semaphore.counter

    @property
    def waiting(self):
        return len(self._semaphore._links)

    def release(self):
        if self._stopped:
            return
        if self._leaked > 0:
            self._leaked -= 1
        else:
            self._semaphore.release()

    def stop(self):
        self._stopped = True

        if self._timer is not None:
            self._timer.kill(block = False)
            self._timer = None

        while self.waiting > 0:
            self._semaphore.release()
            sleep(0.1)

    def acquire(self):
        if self._stopped:
            return
        if self._semaphore.locked() and not self._timer:
            self._timer = spawn(self._leak)
        self._semaphore.acquire(blocking = True, timeout = None)
        if self._timer is not None:
            self._timer.kill(block = False)
            self._timer = None
            if self.waiting > 0:
                self._timer = spawn(self._leak)
コード例 #10
0
class _LeakySemaphore(object):
    def __init__(self, value=1, maxSeconds=10):
        self._semaphore = BoundedSemaphore(value)
        self._maxSeconds = maxSeconds
        self._timer = None
        self._leaked = 0
        self._stopped = False

    def _leak(self):
        sleep(self._maxSeconds)
        self._leaked += 1
        self._semaphore.release()

    @property
    def inUse(self):
        return self._semaphore._initial_value - self.semaphore.counter

    @property
    def waiting(self):
        return len(self._semaphore._links)

    def release(self):
        if self._stopped:
            return
        if self._leaked > 0:
            self._leaked -= 1
        else:
            self._semaphore.release()

    def stop(self):
        self._stopped = True

        if self._timer is not None:
            self._timer.kill(block=False)
            self._timer = None

        while self.waiting > 0:
            self._semaphore.release()
            sleep(0.1)

    def acquire(self):
        if self._stopped:
            return
        if self._semaphore.locked() and not self._timer:
            self._timer = spawn(self._leak)
        self._semaphore.acquire(blocking=True, timeout=None)
        if self._timer is not None:
            self._timer.kill(block=False)
            self._timer = None
            if self.waiting > 0:
                self._timer = spawn(self._leak)
コード例 #11
0
    def acquire(self, blocking=True, timeout=-1):
        # This is the Python 3 signature.
        # On Python 2, Lock.acquire has the signature `Lock.acquire([wait])`
        # where `wait` is a boolean that cannot be passed by name, only position.
        # so we're fine to use the Python 3 signature.

        # Transform the default -1 argument into the None that our
        # semaphore implementation expects, and raise the same error
        # the stdlib implementation does.
        if timeout == -1:
            timeout = None
        if not blocking and timeout is not None:
            raise ValueError("can't specify a timeout for a non-blocking call")
        if timeout is not None:
            if timeout < 0:
                # in C: if(timeout < 0 && timeout != -1)
                raise ValueError("timeout value must be strictly positive")
            if timeout > self._TIMEOUT_MAX:
                raise OverflowError('timeout value is too large')

        acquired = BoundedSemaphore.acquire(self, blocking, timeout)
        if not acquired and not blocking and getcurrent(
        ) is not get_hub_if_exists():
            # Run other callbacks. This makes spin locks works.
            # We can't do this if we're in the hub, which we could easily be:
            # printing the repr of a thread checks its tstate_lock, and sometimes we
            # print reprs in the hub.
            # See https://github.com/gevent/gevent/issues/1464

            # By using sleep() instead of self.wait(0), we don't force a trip
            # around the event loop *unless* we've been running callbacks for
            # longer than our switch interval.
            sleep()
        return acquired
コード例 #12
0
class Cluster(object):
    def __init__(self, config_file):
        u"""Cluster 抽象"""
        self.config_file = config_file
        self.config = Config(config_file)
        self.context = Context(self.config.parse_refer(),
                               self.config.parse_registry())

        self.clients = {}
        self.sem = BoundedSemaphore(1)

    def get_client(self, service):
        if service not in self.clients:
            self.sem.acquire()
            if service not in self.clients:
                self.clients[service] = Client(self.context, service)
            self.sem.release()
        return self.clients[service]
コード例 #13
0
class DianpingHelper(object):
    def __init__(self):
        # 缓存的城市信息
        from gevent.lock import BoundedSemaphore

        self._city_cache = {}
        self._city_cache_lock = BoundedSemaphore(1)

    def get_city(self, city_name, coords):
        """
        通过城市名称和坐标,获得城市详情
        """
        if city_name not in self._city_cache:
            try:
                self._city_cache_lock.acquire()
                if city_name not in self._city_cache:
                    col = get_mongodb('geo', 'Locality', 'mongo')
                    lat = coords['lat']
                    lng = coords['lng']
                    geo_json = {
                        'type': 'Point',
                        'coordinates': [coords['lng'], coords['lat']]
                    }
                    max_distance = 200000
                    city_list = list(
                        col.find({
                            'alias': city_name,
                            'location': {
                                '$near': {
                                    '$geometry': geo_json,
                                    '$maxDistance': max_distance
                                }
                            }
                        }))
                    if city_list:
                        city = city_list[0]
                        self._city_cache[city_name] = city
                    else:
                        self._city_cache[city_name] = None
                        raise ValueError(
                            'Failed to find city: %s, lat=%f, lng=%f' %
                            (city_name, lat, lng))
            finally:
                self._city_cache_lock.release()
コード例 #14
0
ファイル: utils.py プロジェクト: macdaliot/py-beach
class RWLock( object ):
    def __init__( self ):
        self._canRead = Event()
        self._canWrite = Event()
        self._mutex = BoundedSemaphore( value = 1 )
        self._readers = 0
        self._isWriting = False
        self._canRead.set()
        self._canWrite.set()

    def rLock( self ):
        isReady = False
        while not isReady:
            self._canRead.wait()
            self._mutex.acquire( blocking = True, timeout = None )
            if not self._isWriting:
                self._canWrite.clear()
                self._readers += 1
                isReady = True
            self._mutex.release()

    def rUnlock( self ):
        self._mutex.acquire( blocking = True, timeout = None )
        self._readers -= 1
        if 0 == self._readers:
            self._canWrite.set()
        self._mutex.release()

    def wLock( self ):
        isReady = False
        while not isReady:
            self._canRead.clear()
            self._canWrite.wait()
            self._mutex.acquire( blocking = True, timeout = None )
            if not self._isWriting and 0 == self._readers:
                isReady = True
                self._isWriting = True
                self._canWrite.clear()
            self._mutex.release()

    def wUnlock( self ):
        self._mutex.acquire( blocking = True, timeout = None )
        self._isWriting = False
        self._canWrite.set()
        self._canRead.set()
        self._mutex.release()

    def writer( self ):
        return _rwlock_w( self )

    def reader( self ):
        return _rwlock_r( self )
コード例 #15
0
ファイル: utils.py プロジェクト: rebaker501/py-beach
class RWLock( object ):
    def __init__( self ):
        self._canRead = Event()
        self._canWrite = Event()
        self._mutex = BoundedSemaphore( value = 1 )
        self._readers = 0
        self._isWriting = False
        self._canRead.set()
        self._canWrite.set()

    def rLock( self ):
        isReady = False
        while not isReady:
            self._canRead.wait()
            self._mutex.acquire( blocking = True, timeout = None )
            if not self._isWriting:
                self._canWrite.clear()
                self._readers += 1
                isReady = True
            self._mutex.release()

    def rUnlock( self ):
        self._mutex.acquire( blocking = True, timeout = None )
        self._readers -= 1
        if 0 == self._readers:
            self._canWrite.set()
        self._mutex.release()

    def wLock( self ):
        isReady = False
        while not isReady:
            self._canRead.clear()
            self._canWrite.wait()
            self._mutex.acquire( blocking = True, timeout = None )
            if not self._isWriting and 0 == self._readers:
                isReady = True
                self._isWriting = True
                self._canWrite.clear()
            self._mutex.release()

    def wUnlock( self ):
        self._mutex.acquire( blocking = True, timeout = None )
        self._isWriting = False
        self._canWrite.set()
        self._canRead.set()
        self._mutex.release()

    def writer( self ):
        return _rwlock_w( self )

    def reader( self ):
        return _rwlock_r( self )
コード例 #16
0
ファイル: batch_tests.py プロジェクト: flxf/gbatchy
    def test_concurrent_batching(self):
        lock = BoundedSemaphore(1)
        lock.acquire()  # now 0

        N_CALLS = [0]

        @batched()
        def fn(arg_list):
            N_CALLS[0] += 1
            lock.acquire()

        @batched()
        def fn2(arg_list):
            N_CALLS[0] += 1
            lock.release()

        @batch_context
        def test():
            a, b = spawn(fn), spawn(fn2)
            self.assertEquals(0, N_CALLS[0])
            a.get(), b.get()

        test()  # shouldn't hang.
コード例 #17
0
ファイル: thread.py プロジェクト: MrKiven/gevent
        def acquire(self, blocking=True, timeout=-1):
            # Transform the default -1 argument into the None that our
            # semaphore implementation expects, and raise the same error
            # the stdlib implementation does.
            if timeout == -1:
                timeout = None
            if not blocking and timeout is not None:
                raise ValueError("can't specify a timeout for a non-blocking call")
            if timeout is not None:
                if timeout < 0:
                    # in C: if(timeout < 0 && timeout != -1)
                    raise ValueError("timeout value must be strictly positive")
                if timeout > self._TIMEOUT_MAX:
                    raise OverflowError('timeout value is too large')

            return BoundedSemaphore.acquire(self, blocking, timeout)
コード例 #18
0
ファイル: thread.py プロジェクト: erics8/wwqLyParse
        def acquire(self, blocking=True, timeout=-1):
            # Transform the default -1 argument into the None that our
            # semaphore implementation expects, and raise the same error
            # the stdlib implementation does.
            if timeout == -1:
                timeout = None
            if not blocking and timeout is not None:
                raise ValueError("can't specify a timeout for a non-blocking call")
            if timeout is not None:
                if timeout < 0:
                    # in C: if(timeout < 0 && timeout != -1)
                    raise ValueError("timeout value must be strictly positive")
                if timeout > self._TIMEOUT_MAX:
                    raise OverflowError('timeout value is too large')

            return BoundedSemaphore.acquire(self, blocking, timeout)
コード例 #19
0
ファイル: connection_pool.py プロジェクト: lypc/archimetes
class ConnectionPool(object):
    """dynamic service connection pool"""

    def __init__(self, server_node, iface_cls, config):

        self._section_name = utils.get_module(__name__)
        self._logger = logging.getLogger(__name__)
        self._host = server_node.split(":")[0]
        self._port = int(server_node.split(":")[1])
        self._iface_cls = iface_cls

        self._get_conn_timeout = config.getint(self._section_name, "pool_timeout",
                                               default=settings.DEFAULT_POOL_TIMEOUT)
        self._socket_timeout = config.getint(self._section_name, "request_timeout",
                                             default=settings.DEFAULT_REQUEST_TIMEOUT) * 1000
        self._size = config.getint(self._section_name, "pool_size", default=settings.DEFAULT_POOL_SIZE)

        self._c_module_serialize = config.getboolean(self._section_name, "c_module_serialize",
                                                     default=settings.USE_C_MODULE_SERIALIZE)

        self._closed = False
        if ASYNC_TAG:
            from gevent.lock import BoundedSemaphore
            from gevent import queue as Queue
            self._semaphore = BoundedSemaphore(self._size)
            self._connection_queue = Queue.LifoQueue(self._size)
            self._QueueEmpty = Queue.Empty
        else:
            from threading import BoundedSemaphore
            import Queue
            self._semaphore = BoundedSemaphore(self._size)
            self._connection_queue = Queue.LifoQueue(self._size)
            self._QueueEmpty = Queue.Empty

    def close(self):
        self._closed = True
        while not self._connection_queue.empty():
            try:
                conn = self._connection_queue.get(block=False)
                try:
                    self._close_connection(conn)
                except:
                    pass
            except self._QueueEmpty:
                pass

    def _create_connection(self):
        self._logger.debug("create a new connection ip:%s port:%s" %(self._host, self._port))
        socket_ = TSocket.TSocket(self._host, self._port)
        if self._socket_timeout > 0:
            socket_.setTimeout(self._socket_timeout)
        transport = TTransport.TBufferedTransport(socket_)
        if self._c_module_serialize:
            protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
        else:
            protocol = TBinaryProtocol.TBinaryProtocol(transport)
        connection = self._iface_cls(protocol)
        transport.open()
        return connection

    def _close_connection(self, conn):
        try:
            conn._iprot.trans.close()
        except:
            pass
        try:
            conn._oprot.trans.close()
        except:
            pass

    def get_connection(self):
        """ get a connection from the pool. This blocks until one is available."""
        self._semaphore.acquire()
        if self._closed:
            raise RuntimeError('connection pool closed')
        try:
            return self._connection_queue.get(block=False)
        except self._QueueEmpty:

            try:
                return self._create_connection()
            except Exception as e:
                self._semaphore.release()
                raise e

    def return_connection(self, conn):
        """ return a connection to the pool."""
        if self._closed:
            self._close_connection(conn)
            return
        self._connection_queue.put(conn)
        self._semaphore.release()

    def release_connection(self, conn):
        """ call when the connect is no usable anymore"""
        try:

            self._close_connection(conn)
        except:
            pass
        if not self._closed:
            self._semaphore.release()

    def release_all_connection(self):
        """ call when the all connect in pool is no usable anymore"""
        while not self._connection_queue.empty():
            try:
                conn = self._connection_queue.get(block=False)
                try:
                    self._close_connection(conn)
                except:
                    pass
            except self._QueueEmpty:
                pass
コード例 #20
0
ファイル: bot.py プロジェクト: lordzouga/gearsheet_bot
class GearSheetPlugin(Plugin):
    session = ""
    conn = None
    WEAPON_TALENTS = 'weapontalents'
    PLAYER_TALENTS = 'playertalents'
    GEAR_TALENTS = 'geartalents'
    GEAR_SETS = 'gearsets'
    WEAPONS = 'weapons'
    WEAPON_MODS = 'weaponmods'
    EXOTIC_GEARS = 'exoticgears'
    GEAR_ATTRIBUTES = 'gearattributes'

    names = {}
    vendor_names = {}
    logger = None
    lock = None

    def __init__(self, bot, config):
        super().__init__(bot, config)

        print('Logging in to backend api...')
        login_params = json.dumps({'username': '******', 'password': '******', 'appcode': 'gearsheet'})
        conn = http.client.HTTPConnection("localhost:9000")

        conn.request('POST', '/login', login_params, {'Content-Type': 'application/json'})
        login_response = conn.getresponse()
        login_response = json.loads(login_response.read().decode('utf-8'))

        if login_response['result'] != 'ok':
            print("Login to baasbox failed")
            return

        print('Login successful.')
        self.session = login_response['data']["X-BB-SESSION"]

        # get a list of all indexed names
        params = urllib.parse.urlencode({'fields': 'name'})
        conn.request('GET', '/document/indexes?%s' % params, headers={'X-BB-SESSION': self.session})
        res = json.loads(conn.getresponse().read().decode('utf-8'))
        self.names = {i['name'] for i in res['data']}

        conn.close()

        vendors_param = { "fields": "name" }
        response = requests.get(BACKEND_HOST + '/document/vendors-indexes', params=vendors_param, headers={SESSION_HEADER: self.session})
        self.vendor_names = {i['name'] for i in response.json()['data']}

        # init logging
        self.logger = logging.getLogger('gearsheet_bot')
        self.logger.setLevel(logging.INFO)

        fh = logging.FileHandler('access.log')
        fh.setLevel(logging.INFO)

        formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')
        fh.setFormatter(formatter)

        self.logger.addHandler(fh)
        
        self.lock = BoundedSemaphore(1)

        #print(self.state.guilds)

    #in a plugin class
    @Plugin.listen('Ready')
    def on_ready(self, event):
        self.client.update_presence(Status.online, Game(type=GameType.default, name='!help for guide'))

    @Plugin.command('help')
    def command_help(self, event):
        skip = { 264445053596991498 : "discord bot list"}
                                
        if event.guild.id in skip.keys():
            return
        
        self.log_it(event, "", "help")
        event.msg.reply('Usage Guide <https://docs.google.com/document/d/1G1d1oj0qDbv6yf7EOEkGAHN40OqsdkXRrxCHo_3QC2w/view>')

    @Plugin.command('ping')
    def command_ping(self, event):
        event.msg.reply("Pong!")

    @Plugin.command('servers')
    def command_servers(self, event):
        if event.author.id in [195168390476726272]:
            event.msg.reply("I am currently installed on %s servers" % len(self.state.guilds))
    
    @Plugin.command('nicknames')
    def command_nicknames(self, event):
        self.log_it(event, "", "nicknames")
        event.msg.reply("<https://goo.gl/Brh1TF>")

    @Plugin.command('add')
    def command_add(self, event):
        pass

    def log_it(self, event, param, command):
        if event.author.id not in [195168390476726272]:
            self.logger.info("%s - %s - %s - %s - %s" % (command, str(event.author).replace(" ", "_"), event.guild.name.replace(" ", "_"), event.guild.id, param))

    @Plugin.command('g')
    @Plugin.command('s')
    @Plugin.command('sheet')
    @Plugin.command('gearsheet')
    def command_talents(self, event):
        if len(event.args) > 0:
            param = ' '.join(event.args).lower()

            if param == 'help':
                help_text = '''I can only perform simple searches for **The Division** related items\n
Example: to find out what *Responsive* talent does, use `!gearsheet responsive`\n
Popular community nicknames for items are also supported.\n
**PRO TIP**: `!sheet responsive` will also work.

My reddit thread: <https://goo.gl/638vpi>.

**Credit** to @Pfftman#6620 | /u/pfftman | PSN: pfftman'''

                self.log_it(event, param, "gearsheet")
                event.msg.reply(help_text)
                return

            if param in util.aliases.keys():
                param = util.aliases[param].lower()
            
            if param in scopes and param != 'weapons':
                self.log_it(event, param, "gearsheet")
                query = {
                    'fields': 'name',
                    'orderBy': 'name'
                }
                names = requests.get(BACKEND_HOST + "/document/%s" % param, query, headers={SESSION_HEADER: self.session}).json()
                # print(names)
                name_list = ['`' + i["name"] + '`' for i in names["data"]]

                event.msg.reply('there are **%s items**' % len(name_list))
                event.msg.reply(",  ".join(name_list))

                return
            
            # start_time = time.time()
            conn = http.client.HTTPConnection("localhost:9000")
            conn.request('GET', '/plugin/bot.index?%s' % (urllib.parse.urlencode({"param": param})),
                            headers={'X-BB-SESSION': self.session})

            response = conn.getresponse().read().decode('utf-8')
            conn.close()
            # time_diff = time.time() - start_time

            
            self.log_it(event, param, "gearsheet")

            response = json.loads(response)
            if response['result'] != 'ok':
                matches = [("**%s**" % i).title() for i in self.names if fuzz.partial_ratio(param, i) > 80]

                if len(matches) > 0:
                    match_str = "this %s" % ', '.join(matches) if len(matches) == 1 else \
                        "any of these %s" % ', '.join(matches)
                    text = "Did you mean to search for %s?" % \
                           match_str
                    event.msg.reply('%s' % text)
                else:
                    event.msg.reply('```item not found```')

                return

            for item in response['data']:
                collection_name = item['@class']

                embed = None
                if collection_name == self.WEAPON_TALENTS:
                    embed = self.render_weapon_talent(item)
                elif collection_name == self.PLAYER_TALENTS:
                    embed = self.render_player_talent(item)
                elif collection_name == self.GEAR_TALENTS:
                    embed = self.render_gear_talent(item)
                elif collection_name == self.GEAR_SETS:
                    embed = self.render_gearset(item)
                elif collection_name == self.WEAPONS:
                    embed = self.render_weapon(item)
                elif collection_name == self.WEAPON_MODS:
                    embed = self.render_weapon_mods(item)
                elif collection_name == self.EXOTIC_GEARS:
                    embed = self.render_exotic_gear(item)
                elif collection_name == self.GEAR_ATTRIBUTES:
                    embed = self.render_gear_attribute(item)

                event.msg.reply(embed=embed)

    @Plugin.command('v')
    @Plugin.command('vendors')
    @Plugin.command('vendor')
    def command_vendors(self, event):
        if len(event.args) > 0:
            param = ' '.join(event.args).lower()
            splitted = param.strip().split(" with ")

            # handle requests from weapons in plural form
            if splitted[0] in weapon_types:
                temp = splitted[0]
                splitted[0] = temp[:-1]
            
            if event.author.id not in [195168390476726272]:
                self.log_it(event, param, "vendors")

            # routines related to updatee
            if param.strip(' ') == 'update': #pfft              #ruben              #noots              #ahmad
                if event.author.id in { 195168390476726272, 177627571700105217, 196408555132289024, 99511296604520448 }:
                    self.handle_update(event)
                else:
                    event.msg.reply("Haha! no")
                return
            
            if param.strip(' ') == 'status':
                reply = self.render_status_command()
                event.msg.reply(embed=reply)
                return
            
            arg = None
            param_obj = None
            
            for i, item in enumerate(splitted): # check if there is already a nickname
                # start with the vendor aliases and fallback to the gearsheet aliases
                if item in util.vendor_aliases.keys():
                    splitted[i] = util.vendor_aliases[item].lower()
            
            # determine the kind of request to send to the server
            query = splitted[0]    
            if len(splitted) == 1: # this block takes care of args without 'with'
                param_obj = {
                    "param": splitted[0],
                    "has_arg": False
                }
            elif len(splitted) >= 2:
                arg = splitted[1]
                param_obj = {
                    "param": splitted[0],
                    "has_arg": True,
                    "arg": splitted[1] # take only one argument
                }
            else:
                event.msg.reply('```You shouldn\'t be able to get here. Yet.. ```')
                return

            header = {SESSION_HEADER: self.session}
            response = requests.get(BACKEND_HOST + '/plugin/vendors.index', params=param_obj, headers=header)

            if response.json()['result'] != 'ok': # item not found in vendors list
                # try to determine if it was a bad input from user or an item that doesn't exist
                self.reply_item_not_found(query, event)
                return
            
            data = remove_duplicates(response.json()["data"])
            data = sorted(data, key=lambda item: item['name'])
            embed = None

            if len(data) > 1:
                embed = self.render_multiple_items(data)
            else:
                for item in data:
                    collection = item['@class']

                    if collection == "vendors-%s" % VENDOR_WEAPONS:
                        embed = self.render_vendor_weapon(item)
                    elif collection == get_collection_name(VENDOR_GEAR):
                        embed = self.render_vendor_gear(item)
                    elif collection == get_collection_name(VENDOR_GEAR_MODS):
                        embed = self.render_vendor_gear_mod(item)
                    elif collection == get_collection_name(VENDOR_WEAPON_MODS):
                        embed = self.render_vendor_weapon_mod(item)
            
            if embed != None:
                event.msg.reply(embed=embed)
        else:
            event.msg.reply('<http://rubenalamina.mx/the-division-weekly-vendor-reset/>')
    
    def render_status_command(self):
        param = {
            "orderBy": "time desc",
            "recordsPerPage": 1,
            "page": 0
        }

        res = requests.get(BACKEND_HOST + '/document/vendors-update', params=param, headers={SESSION_HEADER: self.session}).json()
        info = res['data'][0]
        
        today = arrow.utcnow()
        reset_text = ""

        if today.weekday() == 5:
            reset_text = "in 6 days"
        else:
            temp = today.shift(weekday=5)
            reset_text = temp.humanize()
        
        last_updated = arrow.Arrow.fromtimestamp(info['time'])

        embed = MessageEmbed()
        embed.title = "Last Updated %s" % last_updated.humanize()
        embed.description = "by %s" % info['updater']
        
        embed.add_field(name='Next Vendor Reset (in game)', value=reset_text, inline=True)

        return embed

    def reply_item_not_found(self, query, event):
        pieces = ["vest", "backpack", "mask", "gloves", "knee pads", "holster"]
        temp = [i for i in pieces if (" " + i) in query]

        if len(temp) > 0:
            gear_piece = query.strip(" " + temp[0])
            if gear_piece in self.names:
                event.msg.reply('Sorry, no gearset or highend item like that this week')
            else:
                event.msg.reply('Are you sure %s exists?' % query)
        elif query in ["performance mod", "stamina mod", "electronics mod", "weapon mod"]:
            event.msg.reply('Sorry, no mod like that this week')

        elif util.aliases or query in self.names or query in self.vendor_names:
            event.msg.reply("Sorry, no item like that this week")

        else:
            event.msg.reply("Are you sure this item exists?")


    def handle_update(self, event):
        if not self.lock.locked():
            start_time = time.time()
            self.lock.acquire()

            event.msg.reply("Vendors update initiated by Master @%s" % (str(event.author)))
            vendors.update()

            # log the update in the db
            info = {
                "updater": str(event.author),
                "time": int(time.time()),
                "server": event.guild.name,
                "server_id": event.guild.id
            }

            requests.post(BACKEND_HOST + "/document/vendors-update", json=info, headers={SESSION_HEADER: self.session})

            # release lock
            self.lock.release()

            duration = time.time() - start_time
            event.msg.reply("Update done. Duration: `{0:.2f}s`".format(duration))
        else:
            event.msg.reply("update is already running")


    def render_multiple_items(self, items):
        embed = MessageEmbed()
        embed.description = "found in %s items" % len(items)
        embed.color = 0xDA9513

        for item in items:
            collection = item["@class"]

            if collection == get_collection_name(VENDOR_WEAPONS):
                talents = " **-** ".join([ i for i in [item['talent1'], item['talent2'], item['talent3']] if i.strip() != "-"])
                body = '''`%s`  |  **%s** DMG  |  %s''' % (item["vendor"], item['dmg'], talents.strip())

                embed.add_field(name=item["name"], value=body)
            elif collection == get_collection_name(VENDOR_GEAR):
                major_attrs = item["major"].strip().strip("-").split("<br/>")
                minor_attrs = item["minor"].strip().strip("-").split("<br/>")

                main_stats = []
                if (item['fire'].strip().strip('-')):
                    main_stats.append("**Firearms:** %s" % item['fire'])
                if (item['stam'].strip().strip('-')):
                    main_stats.append("**Stamina:** %s" % item['stam'])
                if (item['elec'].strip().strip('-')):
                    main_stats.append("**Electronics:** %s" % item['elec'])

                all_attrs = "  **|**  ".join(main_stats + [i for i in major_attrs + minor_attrs if i != ""])
                
                body = "`%s`  |  %s" % (item["vendor"], all_attrs)

                embed.add_field(name=item["name"], value=body)
            elif collection == get_collection_name(VENDOR_GEAR_MODS):
                attr = item["attribute"]

                body = "`%s` | %s | %s" % (item["vendor"], item['stat'], attr)
                embed.add_field(name=item['name'], value=body)
            elif collection == get_collection_name(VENDOR_WEAPON_MODS):
                attrs = item['attributes'].split("<br/>")

                attrs_str = " **|** ".join([i for i in attrs[:3]])

                body = "`%s` | %s " % (item["vendor"], attrs_str)
                embed.add_field(name=item["name"], value=body)
            else: return None
        
        return embed

    def render_vendor_weapon(self, weapon):
        embed = MessageEmbed()

        embed.title = weapon['name']
        embed.description = weapon['vendor']
        # embed.add_field(name='Vendor', value=weapon['vendor'], inline=True)
        embed.add_field(name='Price', value=weapon['price'], inline=True)
        embed.add_field(name="Damage", value=weapon['dmg'], inline=True)
        embed.add_field(name='Bonus', value=weapon['bonus'], inline=True)
        
        talents = " **-** ".join([ i for i in [weapon['talent1'], weapon['talent2'], weapon['talent3']] if i.strip() != "-"])
        embed.add_field(name='Talents', value=talents)
        
        if weapon['type'] == 'exotic':
            embed.color = 0xCF5A2E
        else:
            embed.color = 0xDA9513

        return embed
    
    def render_vendor_gear_mod(self, gearmod):
        embed = MessageEmbed()

        embed.title = gearmod['name']
        embed.description = gearmod['vendor']

        embed.add_field(name='Price', value=gearmod['price'], inline=True)
        embed.add_field(name='Stat', value=gearmod['stat'], inline=True)
        embed.add_field(name='Attribute', value=gearmod['attribute'])

        if gearmod['type'] == 'purple-mod':
            embed.color = 0x993D78
        else:
            embed.color = 0xDA9513
        
        return embed
    
    def render_vendor_weapon_mod(self, weaponmod):
        embed = MessageEmbed()

        embed.title = weaponmod['name']
        embed.description = weaponmod['vendor']

        embed.add_field(name='Price', value=weaponmod['price'], inline=True)
        # embed.add_field(name='Stat', value=weaponmod[''], inline=True)
        attr = " **-** ".join(weaponmod["attributes"].split('<br/>'))
        embed.add_field(name='Attribute', value=attr)
        embed.color = 0xDA9513

        return embed

    def render_vendor_gear(self, gear):
        embed = MessageEmbed()

        embed.title = gear['name']
        embed.description = gear['vendor']

        embed.add_field(name='Price', value=gear['price'], inline=True)
        embed.add_field(name='Armor', value=gear['armor'], inline=True)
        embed.add_field(name="Gearscore", value=gear['score'], inline=True)

        if (gear['fire'].strip().strip('-')):
            embed.add_field(name='Firearms', value=gear['fire'], inline=True)
        if (gear['stam'].strip().strip('-')):
            embed.add_field(name='Stamina', value=gear['stam'], inline=True)
        if (gear['elec'].strip().strip('-')):
            embed.add_field(name='Electronics', value=gear['elec'], inline=True)
        
        major_attr = "  **|**  ".join(gear["major"].strip().strip("-").split("<br/>"))
        minor_attr = "  **|**  ".join(gear["minor"].strip().strip("-").split("<br/>"))

        if major_attr:
            embed.add_field(name='Major Attribute(s)', value=major_attr, inline=True)
        
        if minor_attr:
            embed.add_field(name='Minor Attribute(s)', value=minor_attr, inline=True)
        
        if gear['rarity'] == 'header-he':
            embed.color = 0xDA9513
        else:
            embed.color = 0x07C973
        
        return embed
    
    def render_weapon_talent(self, talent):
        embed = MessageEmbed()
        # embed.set_author(name='GearSheet')

        embed.title = talent['name']
        embed.description = talent['description']

        req = talent['requirements']['34']
        req_str = '**electronics**: %s, **firearms**: %s, **stamina**: %s' % \
                  ('none' if req['electronics'] == 0 else req['electronics'],
                   'none' if req['firearms'] == 0 else req['firearms'],
                   'none' if req['stamina'] == 0 else req['stamina'])

        embed.add_field(name='Rolls On', value=', '.join(talent['rollsOn']), inline=True)
        embed.add_field(name='Requirements', value=req_str, inline=True)

        if 'note' in talent.keys():
            embed.set_footer(text=talent['note'])

        embed.color = 0xDA9513

        return embed

    def render_player_talent(self, talent):
        embed = MessageEmbed()

        embed.title = talent['name']
        embed.description = talent['description']

        embed.add_field(name='Type', value=talent['type'], inline=True)
        embed.add_field(name='Benefits', value=talent['benefit'], inline=True)

        embed.color = 0xDA9513

        return embed

    def render_gear_talent(self, talent):
        embed = MessageEmbed()

        embed.title = talent['name']
        embed.description = talent['description']

        embed.set_footer(text='Rolls on %s' % talent['slot'])

        embed.color = 0xDA9513

        return embed

    def render_gearset(self, gearset):
        embed = MessageEmbed()

        embed.title = gearset['name']

        embed.add_field(name='2 piece bonus', value=gearset['2'], inline=True)
        embed.add_field(name='3 piece bonus', value=gearset['3'], inline=True)
        embed.add_field(name='4 piece bonus', value=gearset['4'])
        embed.add_field(name='5 piece bonus', value=gearset['5'], color='489979')
        embed.add_field(name='6 piece bonus', value=gearset['6'])

        embed.set_footer(text="added in patch %s" % gearset['patch'])
        embed.color = '52377'

        return embed

    def render_weapon(self, weapon):
        self.normalize(weapon)
        embed = MessageEmbed()

        embed.title = weapon['name']

        embed.add_field(name='Type', value=weapon['type'], inline=True)
        embed.add_field(name='Variant', value=weapon['variant'], inline=True)
        embed.add_field(name='RPM', value=weapon['rpm'], inline=True)

        embed.add_field(name='Scaling', value=weapon['scaling'], inline=True)
        embed.add_field(name='Magazine Size', value=weapon['MagSize'], inline=True)
        embed.add_field(name='Optimal Range(m)', value=weapon['OptimalRange'], inline=True)

        embed.add_field(name='Reload Speed(ms)', value=weapon['ReloadSpeed'], inline=True)
        embed.add_field(name='Headshot Multiplier', value=weapon['HeadshotMultiplier'], inline=True)
        embed.add_field(name='Native Bonus', value=weapon['WeaponBonus'], inline=True)

        embed.add_field(name='Bonus', value=weapon['Bonus'], inline=True)

        damageStr = "%s - %s" % (weapon['256']['min'], weapon['256']['max'])

        embed.add_field(name='Base Damage', value=damageStr, inline=True)

        if 'modCompat' in weapon.keys():
            compat = ', '.join(weapon['modCompat']['compat'])
            embed.add_field(name='Compatible Mods', value=compat)

            if 'note' in weapon['modCompat'].keys():
                embed.set_footer(text="%s" % weapon['modCompat']['note'])

        if 'talent' in weapon.keys():
            description = weapon['talent']['description']
            embed.description = description

        embed.color = 0xDA9513


        return embed

    def normalize(self, item):  # don't give empty params to bot embed
        for i in item.keys():
            if type(item[i]) is str and len(item[i]) == 0:
                item[i] = '-'

    def render_weapon_mods(self, mod):
        embed = MessageEmbed()
        key_names = {"Mod_Category", "name", "Primary_Attribute", "Mod_Type", "Crit_Chance",
                     "Crit_Damage", "Headshot_Damage", "Accuracy", "Stability", "Reload_Speed",
                    "Rate_Of_Fire", "Optimal_Range", "Magazine_Size", "Decreased_Threat", "Increased_Threat"}

        for key in mod.keys():
            if key == 'name':
                embed.title = mod['name']
            elif key in key_names:
                val_str = ", ".join(mod[key]) if type(mod[key]) is list else mod[key]
                embed.add_field(name=key.replace("_", " "), value=val_str, inline=True)

        embed.set_footer(text="All mods will roll their Primary Attributes, "
                              "as well as an additional 2 attributes")

        embed.color = 0xDA9513

        return embed

    def render_exotic_gear(self, exotic_gear):
        embed = MessageEmbed()

        embed.title = exotic_gear['name']
        embed.description = exotic_gear['description']

        embed.color = 0xCF5A2E
        return embed

    def render_gear_attribute(self, gear_attribute):
        embed = MessageEmbed()

        key_names = ["type", "name", "Minimum_Total", "Mask", "Body_Armor", "Backpack", "Gloves", "Knee_Pads", "Holster",
                     "Maximum_Total", "Max_With_Gear_Mods"]

        for key in gear_attribute.keys():
            if key == 'name':
                embed.title = gear_attribute[key]
            elif key in key_names:
                val = gear_attribute[key]
                embed.add_field(name=key.replace("_", " "), value=val, inline=True)
        
        embed.color = 0xDA9513

        return embed
コード例 #21
0
ファイル: zeromqimpl.py プロジェクト: oxhead/RoxieBench
    class WorkloadRecord:
        def __init__(self, workload_id, workload):
            self.time_start = None
            self.time_last_report = None
            self.workload_id = workload_id
            self.workload = workload
            self.query_count = 0
            self.num_finished_jobs = 0
            self.ready_drivers = 0
            self.dispatch_completed = False
            self.counter_success = 0
            self.counter_failure = 0
            self.statistics = {}
            self.timeline_completion = {}
            self.timeline_failure = {}
            self.logger = logging.getLogger(__name__)
            self.statistics_lock = BoundedSemaphore(1)

        def start(self):
            self.logger.info("* jobs started")
            self.time_start = time.time()

        def get_workload(self):
            return self.workload

        def add_workload(self, num_queries):
            self.query_count += num_queries

        def completed_dispatch(self):
            self.logger.info("@ job dispatch completed")
            self.dispatch_completed = True

        def report_completion(self, report):
            self.time_last_report = time.time()
            self.num_finished_jobs += 1
            item_id = report['item']
            report.pop('item', None)
            self.statistics_lock.acquire()
            before_length = len(self.statistics)
            self.statistics[item_id] = report
            #self.logger.info("on report: {}, before={}, after={}".format(item_id, before_length, len(self.statistics)))
            self.statistics_lock.release()
            timeslot = int(self.time_last_report - self.time_start) + 1
            if report['success']:
                self.counter_success += 1
                if timeslot not in self.timeline_completion:
                    self.timeline_completion[timeslot] = 0
                self.timeline_completion[timeslot] += 1
            else:
                self.counter_failure += 1
                if timeslot not in self.timeline_failure:
                    self.timeline_failure[timeslot] = 0
                self.timeline_failure[timeslot] += 1

        def is_completed(self):
            """jobs may all complete before dispatch finish"""
            self.logger.info("# dispatch completed: %s",
                             self.dispatch_completed)
            self.logger.info("@ num_queries={}, num_finished_jobs={}".format(
                self.query_count, self.num_finished_jobs))
            return self.dispatch_completed and (self.query_count
                                                == self.num_finished_jobs)

        def is_started(self):
            return self.time_start is not None

        def get_report(self):
            return {
                "num_finished_jobs":
                self.num_finished_jobs,
                "num_successful_jobs":
                self.counter_success,
                "num_failed_jobs":
                self.counter_failure,
                "elapsed_time":
                self.time_last_report -
                self.time_start if self.is_completed() else time.time() -
                self.time_start
            }

        def get_statistics(self):
            self.logger.info("## total reported jobs: {}".format(
                len(self.statistics)))
            return self.statistics

        def get_timeline_completion(self):
            return self.timeline_completion

        def get_timeline_failure(self):
            return self.timeline_failure
コード例 #22
0
ファイル: echo_node.py プロジェクト: hackaugusto/raiden
class EchoNode:

    def __init__(self, api, token_address):
        assert isinstance(api, RaidenAPI)
        self.ready = Event()

        self.api = api
        self.token_address = token_address

        existing_channels = self.api.get_channel_list(
            api.raiden.default_registry.address,
            self.token_address,
        )

        open_channels = [
            channel_state
            for channel_state in existing_channels
            if channel.get_status(channel_state) == CHANNEL_STATE_OPENED
        ]

        if len(open_channels) == 0:
            token = self.api.raiden.chain.token(self.token_address)
            if not token.balance_of(self.api.raiden.address) > 0:
                raise ValueError('not enough funds for echo node %s for token %s' % (
                    pex(self.api.raiden.address),
                    pex(self.token_address),
                ))
            self.api.token_network_connect(
                self.api.raiden.default_registry.address,
                self.token_address,
                token.balance_of(self.api.raiden.address),
                initial_channel_target=10,
                joinable_funds_target=.5,
            )

        self.last_poll_offset = 0
        self.received_transfers = Queue()
        self.stop_signal = None  # used to signal REMOVE_CALLBACK and stop echo_workers
        self.greenlets = list()
        self.lock = BoundedSemaphore()
        self.seen_transfers = deque(list(), TRANSFER_MEMORY)
        self.num_handled_transfers = 0
        self.lottery_pool = Queue()
        # register ourselves with the raiden alarm task
        self.api.raiden.alarm.register_callback(self.echo_node_alarm_callback)
        self.echo_worker_greenlet = gevent.spawn(self.echo_worker)
        log.info('Echo node started')

    def echo_node_alarm_callback(self, block_number):
        """ This can be registered with the raiden AlarmTask.
        If `EchoNode.stop()` is called, it will give the return signal to be removed from
        the AlarmTask callbacks.
        """
        if not self.ready.is_set():
            self.ready.set()
        log.debug('echo_node callback', block_number=block_number)
        if self.stop_signal is not None:
            return REMOVE_CALLBACK
        else:
            self.greenlets.append(gevent.spawn(self.poll_all_received_events))
            return True

    def poll_all_received_events(self):
        """ This will be triggered once for each `echo_node_alarm_callback`.
        It polls all channels for `EventPaymentReceivedSuccess` events,
        adds all new events to the `self.received_transfers` queue and
        respawns `self.echo_node_worker`, if it died. """

        locked = False
        try:
            with Timeout(10):
                locked = self.lock.acquire(blocking=False)
                if not locked:
                    return
                else:
                    received_transfers = self.api.get_raiden_events_payment_history(
                        token_address=self.token_address,
                        offset=self.last_poll_offset,
                    )

                    # received transfer is a tuple of (block_number, event)
                    received_transfers = [
                        event
                        for event in received_transfers
                        if type(event) == EventPaymentReceivedSuccess
                    ]

                    for event in received_transfers:
                        transfer = copy.deepcopy(event)
                        self.received_transfers.put(transfer)

                    # set last_poll_block after events are enqueued (timeout safe)
                    if received_transfers:
                        self.last_poll_offset += len(received_transfers)

                    if not self.echo_worker_greenlet.started:
                        log.debug(
                            'restarting echo_worker_greenlet',
                            dead=self.echo_worker_greenlet.dead,
                            successful=self.echo_worker_greenlet.successful(),
                            exception=self.echo_worker_greenlet.exception,
                        )
                        self.echo_worker_greenlet = gevent.spawn(self.echo_worker)
        except Timeout:
            log.info('timeout while polling for events')
        finally:
            if locked:
                self.lock.release()

    def echo_worker(self):
        """ The `echo_worker` works through the `self.received_transfers` queue and spawns
        `self.on_transfer` greenlets for all not-yet-seen transfers. """
        log.debug('echo worker', qsize=self.received_transfers.qsize())
        while self.stop_signal is None:
            if self.received_transfers.qsize() > 0:
                transfer = self.received_transfers.get()
                if transfer in self.seen_transfers:
                    log.debug(
                        'duplicate transfer ignored',
                        initiator=pex(transfer.initiator),
                        amount=transfer.amount,
                        identifier=transfer.identifier,
                    )
                else:
                    self.seen_transfers.append(transfer)
                    self.greenlets.append(gevent.spawn(self.on_transfer, transfer))
            else:
                gevent.sleep(.5)

    def on_transfer(self, transfer):
        """ This handles the echo logic, as described in
        https://github.com/raiden-network/raiden/issues/651:

            - for transfers with an amount that satisfies `amount % 3 == 0`, it sends a transfer
            with an amount of `amount - 1` back to the initiator
            - for transfers with a "lucky number" amount `amount == 7` it does not send anything
            back immediately -- after having received "lucky number transfers" from 7 different
            addresses it sends a transfer with `amount = 49` to one randomly chosen one
            (from the 7 lucky addresses)
            - consecutive entries to the lucky lottery will receive the current pool size as the
            `echo_amount`
            - for all other transfers it sends a transfer with the same `amount` back to the
            initiator """
        echo_amount = 0
        if transfer.amount % 3 == 0:
            log.info(
                'ECHO amount - 1',
                initiator=pex(transfer.initiator),
                amount=transfer.amount,
                identifier=transfer.identifier,
            )
            echo_amount = transfer.amount - 1

        elif transfer.amount == 7:
            log.info(
                'ECHO lucky number draw',
                initiator=pex(transfer.initiator),
                amount=transfer.amount,
                identifier=transfer.identifier,
                poolsize=self.lottery_pool.qsize(),
            )

            # obtain a local copy of the pool
            pool = self.lottery_pool.copy()
            tickets = [pool.get() for _ in range(pool.qsize())]
            assert pool.empty()
            del pool

            if any(ticket.initiator == transfer.initiator for ticket in tickets):
                assert transfer not in tickets
                log.debug(
                    'duplicate lottery entry',
                    initiator=pex(transfer.initiator),
                    identifier=transfer.identifier,
                    poolsize=len(tickets),
                )
                # signal the poolsize to the participant
                echo_amount = len(tickets)

            # payout
            elif len(tickets) == 6:
                log.info('payout!')
                # reset the pool
                assert self.lottery_pool.qsize() == 6
                self.lottery_pool = Queue()
                # add new participant
                tickets.append(transfer)
                # choose the winner
                transfer = random.choice(tickets)
                echo_amount = 49
            else:
                self.lottery_pool.put(transfer)

        else:
            log.debug(
                'echo transfer received',
                initiator=pex(transfer.initiator),
                amount=transfer.amount,
                identifier=transfer.identifier,
            )
            echo_amount = transfer.amount

        if echo_amount:
            log.debug(
                'sending echo transfer',
                target=pex(transfer.initiator),
                amount=echo_amount,
                orig_identifier=transfer.identifier,
                echo_identifier=transfer.identifier + echo_amount,
                token_address=pex(self.token_address),
                num_handled_transfers=self.num_handled_transfers + 1,
            )

            self.api.transfer(
                self.api.raiden.default_registry.address,
                self.token_address,
                echo_amount,
                transfer.initiator,
                identifier=transfer.identifier + echo_amount,
            )
        self.num_handled_transfers += 1

    def stop(self):
        self.stop_signal = True
        self.greenlets.append(self.echo_worker_greenlet)
        gevent.joinall(self.greenlets, raise_error=True)
コード例 #23
0
ファイル: connection_pool.py プロジェクト: 2221758805/harpc
class ConnectionPool(object):
    """dynamic service connection pool"""

    def __init__(self, server_node, iface_cls, config):

        self._section_name = utils.get_module(__name__)
        self._logger = logging.getLogger(__name__)
        self._host = server_node.split(":")[0]
        self._port = int(server_node.split(":")[1])
        self._iface_cls = iface_cls

        self._get_conn_timeout = config.getint(self._section_name, "pool_timeout",
                                               default=settings.DEFAULT_POOL_TIMEOUT)
        self._socket_timeout = config.getint(self._section_name, "request_timeout",
                                             default=settings.DEFAULT_REQUEST_TIMEOUT) * 1000
        self._size = config.getint(self._section_name, "pool_size", default=settings.DEFAULT_POOL_SIZE)

        self._c_module_serialize = config.getboolean(self._section_name, "c_module_serialize",
                                                     default=settings.USE_C_MODULE_SERIALIZE)

        self._closed = False
        if ASYNC_TAG:
            from gevent.lock import BoundedSemaphore
            from gevent import queue as Queue
            self._semaphore = BoundedSemaphore(self._size)
            self._connection_queue = Queue.LifoQueue(self._size)
            self._QueueEmpty = Queue.Empty
        else:
            from threading import BoundedSemaphore
            import Queue
            self._semaphore = BoundedSemaphore(self._size)
            self._connection_queue = Queue.LifoQueue(self._size)
            self._QueueEmpty = Queue.Empty

    def close(self):
        self._closed = True
        while not self._connection_queue.empty():
            try:
                conn = self._connection_queue.get(block=False)
                try:
                    self._close_connection(conn)
                except:
                    pass
            except self._QueueEmpty:
                pass

    def _create_connection(self):
        self._logger.debug("create a new connection ip:%s port:%s" %(self._host, self._port))
        socket_ = TSocket.TSocket(self._host, self._port)
        if self._socket_timeout > 0:
            socket_.setTimeout(self._socket_timeout)
        transport = TTransport.TBufferedTransport(socket_)
        if self._c_module_serialize:
            protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport)
        else:
            protocol = TBinaryProtocol.TBinaryProtocol(transport)
        connection = self._iface_cls(protocol)
        transport.open()
        return connection

    def _close_connection(self, conn):
        try:
            conn._iprot.trans.close()
        except:
            pass
        try:
            conn._oprot.trans.close()
        except:
            pass

    def get_connection(self):
        """ get a connection from the pool. This blocks until one is available."""
        self._semaphore.acquire()
        if self._closed:
            raise RuntimeError('connection pool closed')
        try:
            return self._connection_queue.get(block=False)
        except self._QueueEmpty:

            try:
                return self._create_connection()
            except Exception as e:
                self._semaphore.release()
                raise e

    def return_connection(self, conn):
        """ return a connection to the pool."""
        if self._closed:
            self._close_connection(conn)
            return
        self._connection_queue.put(conn)
        self._semaphore.release()

    def release_connection(self, conn):
        """ call when the connect is no usable anymore"""
        try:

            self._close_connection(conn)
        except:
            pass
        if not self._closed:
            self._semaphore.release()

    def release_all_connection(self):
        """ call when the all connect in pool is no usable anymore"""
        while not self._connection_queue.empty():
            try:
                conn = self._connection_queue.get(block=False)
                try:
                    self._close_connection(conn)
                except:
                    pass
            except self._QueueEmpty:
                pass
コード例 #24
0
class LoggerMixin(object):
    def __init__(self):
        from gevent.lock import BoundedSemaphore

        self.__logger_sem = BoundedSemaphore(1)
        self.__logger = None

    def _get_logger(self):
        if not self.__logger:
            try:
                self.__logger_sem.acquire()
                if not self.__logger:
                    self.__logger = self.__init_logger()
            finally:
                self.__logger_sem.release()

        return self.__logger

    logger = property(_get_logger, doc="Get the logger of the engine")

    def log(self, msg, level=logging.INFO):
        self.logger.log(level, msg)

    def __init_logger(self):
        import argparse

        parser = argparse.ArgumentParser()
        parser.add_argument('--quiet', action='store_true')
        parser.add_argument('--log2file', action='store_true')
        parser.add_argument('--debug', action='store_true')
        parser.add_argument('--logpath', type=str)
        args, leftovers = parser.parse_known_args()

        if 'logging' not in dhaulagiri_settings:
            dhaulagiri_settings['logging'] = {}

        if args.log2file:
            dhaulagiri_settings['logging']['write_to_file'] = True
        if args.quiet:
            dhaulagiri_settings['logging']['write_to_stream'] = False
        if args.debug:
            dhaulagiri_settings['logging']['log_level'] = logging.DEBUG
        if args.logpath:
            dhaulagiri_settings['logging']['log_path'] = args.logpath

        import os
        from logging.handlers import TimedRotatingFileHandler
        from logging import StreamHandler, Formatter

        name = getattr(self, 'name', 'general_logger')

        # Set up a specific logger with our desired output level
        from hashlib import md5
        from random import randint
        import sys

        sig = md5('%d' % randint(0, sys.maxint)).hexdigest()[:8]
        logger = logging.getLogger('%s-%s' % (name, sig))

        handler_list = []
        if dhaulagiri_settings['logging']['write_to_stream']:
            handler_list.append(StreamHandler())
        if dhaulagiri_settings['logging']['write_to_file']:
            log_path = os.path.abspath(dhaulagiri_settings['logging']['log_path'])

            try:
                os.mkdir(log_path)
            except OSError:
                pass

            log_file = os.path.normpath(os.path.join(log_path, '%s.log' % name))
            handler = TimedRotatingFileHandler(log_file, when='D', interval=1, encoding='utf-8')
            handler_list.append(handler)

        log_level = dhaulagiri_settings['logging']['log_level']
        formatter = Formatter(fmt='%(asctime)s [%(name)s] [%(threadName)s] %(levelname)s: %(message)s',
                              datefmt='%Y-%m-%d %H:%M:%S%z')

        if not handler_list:
            handler_list.append(logging.NullHandler())
        for handler in handler_list:
            handler.setLevel(log_level)
            handler.setFormatter(formatter)
            logger.addHandler(handler)

        logger.setLevel(log_level)

        return logger
コード例 #25
0
class MyService:
    def __init__(self, num_workers=1):

        # SWITCH OUT THIS TASK QUEUE FOR WHATEVER SPECIALIZED CUSTOM IMPLEMENTATIONS YOU LIKE
        self.taskq = Queue()

        # start the task daemon automatically
        gevent.spawn(self._taskd)

        # pid -> requests for all jobs
        self.requests = {}
        # id -> workers
        self.workers = {}
        self.lock = Semaphore()
        self.bound = BoundedSemaphore(num_workers)

        for _ in range(num_workers):
            self._start_and_register_worker()

    def _taskd(self):
        for request in self.taskq:  # blocks until next request
            # wait until a worker is free
            self.bound.acquire()
            # execute the job if not completed
            if not request.future.ready():
                with self.lock:
                    request.assign_worker(self._find_free_worker())
                    greenlet = gevent.spawn(request.process)
                    request.handler = greenlet
            else:
                print(
                    "request {} already completed, skipping assignment".format(
                        request.pid))
                self.bound.release()

    def _find_free_worker(self):
        # FREE WORKER
        for id in self.workers:
            if self.workers[id].pid is None:
                return self.workers[id]
        print("no workers found")

    def _get_request_with_pid(self, pid):
        if pid in self.requests:
            return self.requests[pid]
        return None

    def cancel(self, pid):
        with self.lock:
            req = self._get_request_with_pid(pid)

            if req is not None:
                print("cancelling request {}".format(pid))
                req.cancel()
                if req.worker is not None:
                    print("unregistering worker {}".format(req.worker.id))
                    del self.workers[req.worker.id]
                    self._start_and_register_worker()
            else:
                print("pid {} not found".format(pid))

    def launch(self, function, args):
        try:
            # check for valid pid
            with self.lock:
                pid = args["pid"]
                print("received request {}".format(pid))
                if self._get_request_with_pid(pid) is not None:
                    raise ValueError("pid already exists")
                request = Request(function, args)
                self.taskq.put(request)
                self.requests[pid] = request
                print("queued request {}".format(pid))

            # block until request is completed (can also raise an exception)
            result = request.future.get()
            print("finished request {}".format(pid))
        except Exception as e:
            result = e
        finally:
            with self.lock:
                # remove the job from the jobs map if it is present (may not be if cancelled)
                if request.pid in self.requests:
                    del self.requests[request.pid]
                # free worker if necessary
                if request.worker is not None:
                    request.worker.pid = None
                    self.bound.release()
                print("released resources for request {}".format(pid))
            return result

    def _start_and_register_worker(self):
        port = random.randint(5000, 6000)
        endpoint = "tcp://127.0.0.1:{}".format(port)
        cmd = "python3 worker.py {}".format(endpoint)
        proc = subprocess.Popen(cmd, shell=True)
        client = zerorpc.Client(endpoint, timeout=100000, heartbeat=None)
        worker = Worker(port, proc, client)
        self.workers[worker.id] = worker
        print("started worker {}".format(port))
コード例 #26
0
ファイル: pool.py プロジェクト: vpol/geventconnpool
class ConnectionPool(object):
    """
    Generic TCP connection pool, with the following features:
        * Configurable pool size
        * Auto-reconnection when a broken socket is detected
        * Optional periodic keepalive
    """

    # Frequency at which the pool is populated at startup
    SPAWN_FREQUENCY = 0.1

    def __init__(self, size, exc_classes=DEFAULT_EXC_CLASSES, keepalive=None):
        self.size = size
        self.conn = deque()
        self.lock = BoundedSemaphore(size)
        self.keepalive = keepalive
        # Exceptions list must be in tuple form to be caught properly
        self.exc_classes = tuple(exc_classes)
        for i in iter(range(size)):
            self.lock.acquire()
        for i in iter(range(size)):
            gevent.spawn_later(self.SPAWN_FREQUENCY*i, self._addOne)
        if self.keepalive:
            gevent.spawn(self._keepalive_periodic)

    def _new_connection(self):
        """
        Estabilish a new connection (to be implemented in subclasses).
        """
        raise NotImplementedError

    def _keepalive(self, c):
        """
        Implement actual application-level keepalive (to be
        reimplemented in subclasses).

        :raise: socket.error if the connection has been closed or is broken.
        """
        raise NotImplementedError()

    def _keepalive_periodic(self):
        delay = float(self.keepalive) / self.size
        while 1:
            try:
                with self.get() as c:
                    self._keepalive(c)
            except self.exc_classes:
                # Nothing to do, the pool will generate a new connection later
                pass
            gevent.sleep(delay)

    def _addOne(self):
        stime = 0.1
        while 1:
            c = self._new_connection()
            if c:
                break
            gevent.sleep(stime)
            if stime < 400:
                stime *= 2

        self.conn.append(c)
        self.lock.release()

    @contextmanager
    def get(self):
        """
        Get a connection from the pool, to make and receive traffic.

        If the connection fails for any reason (socket.error), it is dropped
        and a new one is scheduled. Please use @retry as a way to automatically
        retry whatever operation you were performing.
        """
        self.lock.acquire()
        try:
            c = self.conn.popleft()
            yield c
        except self.exc_classes:
            # The current connection has failed, drop it and create a new one
            gevent.spawn_later(1, self._addOne)
            raise
        except:
            self.conn.append(c)
            self.lock.release()
            raise
        else:
            # NOTE: cannot use finally because MUST NOT reuse the connection
            # if it failed (socket.error)
            self.conn.append(c)
            self.lock.release()
コード例 #27
0
class DiscoveryZkClient(object):
    def __init__(self,
                 discServer,
                 zk_srv_ip='127.0.0.1',
                 zk_srv_port='2181',
                 reset_config=False):
        self._reset_config = reset_config
        self._service_id_to_type = {}
        self._ds = discServer
        self._zk_sem = BoundedSemaphore(1)
        self._election = None
        self._restarting = False

        zk_endpts = []
        for ip in zk_srv_ip.split(','):
            zk_endpts.append('%s:%s' % (ip, zk_srv_port))

        # logging
        logger = logging.getLogger('discovery-service')
        logger.setLevel(logging.WARNING)
        handler = logging.handlers.RotatingFileHandler(
            '/var/log/contrail/discovery_zk.log',
            maxBytes=1024 * 1024,
            backupCount=10)
        log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s',
                                       datefmt='%m/%d/%Y %I:%M:%S %p')
        handler.setFormatter(log_format)
        logger.addHandler(handler)

        self._zk = kazoo.client.KazooClient(
            hosts=','.join(zk_endpts),
            handler=kazoo.handlers.gevent.SequentialGeventHandler(),
            logger=logger)
        self._logger = logger

        # connect
        self.connect()

        if reset_config:
            self.delete_node("/services", recursive=True)
            self.delete_node("/clients", recursive=True)
            self.delete_node("/election", recursive=True)

        # create default paths
        self.create_node("/services")
        self.create_node("/clients")
        self.create_node("/election")

        self._debug = {
            'subscription_expires': 0,
            'oos_delete': 0,
            'db_excepts': 0,
        }

    # end __init__

    # Discovery server used for syslog, cleanup etc
    def set_ds(self, discServer):
        self._ds = discServer

    # end set_ds

    def is_restarting(self):
        return self._restarting

    # end is_restarting

    # restart
    def restart(self):
        self._zk_sem.acquire()
        self._restarting = True
        self.syslog("restart: acquired lock; state %s " % self._zk.state)
        # initiate restart if our state is suspended or lost
        if self._zk.state != "CONNECTED":
            self.syslog("restart: starting ...")
            try:
                self._zk.stop()
                self._zk.close()
                self._zk.start()
                self.syslog("restart: done")
            except:
                e = sys.exc_info()[0]
                self.syslog('restart: exception %s' % str(e))
        self._restarting = False
        self._zk_sem.release()

    # start
    def connect(self):
        while True:
            try:
                self._zk.start()
                break
            except gevent.event.Timeout as e:
                self.syslog(
                    'Failed to connect with Zookeeper -will retry in a second')
                gevent.sleep(1)
            # Zookeeper is also throwing exception due to delay in master election
            except Exception as e:
                self.syslog('%s -will retry in a second' % (str(e)))
                gevent.sleep(1)
        self.syslog('Connected to ZooKeeper!')

    # end

    def start_background_tasks(self):
        # spawn loop to expire subscriptions
        gevent.Greenlet.spawn(self.inuse_loop)

        # spawn loop to expire services
        gevent.Greenlet.spawn(self.service_oos_loop)

    # end

    def syslog(self, log_msg):
        if self._logger is None:
            return
        self._logger.info(log_msg)

    # end

    def get_debug_stats(self):
        return self._debug

    # end

    def _zk_listener(self, state):
        if state == "CONNECTED":
            self._election.cancel()

    # end

    def _zk_election_callback(self, func, *args, **kwargs):
        self._zk.remove_listener(self._zk_listener)
        func(*args, **kwargs)

    # end

    def master_election(self, path, identifier, func, *args, **kwargs):
        self._zk.add_listener(self._zk_listener)
        while True:
            self._election = self._zk.Election(path, identifier)
            self._election.run(self._zk_election_callback, func, *args,
                               **kwargs)

    # end master_election

    def create_node(self, path, value='', makepath=True, sequence=False):
        value = str(value)
        while True:
            try:
                return self._zk.set(path, value)
            except kazoo.exceptions.NoNodeException:
                self.syslog('create %s' % (path))
                return self._zk.create(path,
                                       value,
                                       makepath=makepath,
                                       sequence=sequence)
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()

    # end create_node

    def get_children(self, path):
        while True:
            try:
                return self._zk.get_children(path)
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()
            except Exception:
                return []

    # end get_children

    def read_node(self, path):
        while True:
            try:
                data, stat = self._zk.get(path)
                return data, stat
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()
            except kazoo.exceptions.NoNodeException:
                self.syslog('exc read: node %s does not exist' % path)
                return (None, None)

    # end read_node

    def delete_node(self, path, recursive=False):
        while True:
            try:
                return self._zk.delete(path, recursive=recursive)
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()
            except kazoo.exceptions.NoNodeException:
                self.syslog('exc delete: node %s does not exist' % path)
                return None

    # end delete_node

    def exists_node(self, path):
        while True:
            try:
                return self._zk.exists(path)
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()

    # end exists_node

    def service_entries(self):
        service_types = self.get_children('/services')
        for service_type in service_types:
            services = self.get_children('/services/%s' % (service_type))
            for service_id in services:
                data, stat = self.read_node('/services/%s/%s' %
                                            (service_type, service_id))
                entry = json.loads(data)
                yield (entry)

    def subscriber_entries(self):
        service_types = self.get_children('/clients')
        for service_type in service_types:
            subscribers = self.get_children('/clients/%s' % (service_type))
            for client_id in subscribers:
                cl_entry = self.lookup_client(service_type, client_id)
                if cl_entry:
                    yield ((client_id, service_type))

    # end

    def update_service(self, service_type, service_id, data):
        path = '/services/%s/%s' % (service_type, service_id)
        self.create_node(path, value=json.dumps(data), makepath=True)

    # end

    def insert_service(self, service_type, service_id, data):

        # ensure election path for service type exists
        path = '/election/%s' % (service_type)
        self.create_node(path)

        # preclude duplicate service entry
        sid_set = set()

        # prevent background task from deleting node under our nose
        seq_list = self.get_children(path)
        # data for election node is service ID
        for sequence in seq_list:
            sid, stat = self.read_node('/election/%s/%s' %
                                       (service_type, sequence))
            if sid is not None:
                sid_set.add(sid)
        if not service_id in sid_set:
            path = '/election/%s/node-' % (service_type)
            pp = self.create_node(path,
                                  service_id,
                                  makepath=True,
                                  sequence=True)
            pat = path + "(?P<id>.*$)"
            mch = re.match(pat, pp)
            seq = mch.group('id')
            data['sequence'] = seq
            self.syslog('ST %s, SID %s not found! Added with sequence %s' %
                        (service_type, service_id, seq))

    # end insert_service

    # forget service and subscribers
    def delete_service(self, service_type, service_id, recursive=False):
        #if self.lookup_subscribers(service_type, service_id):
        #    return

        path = '/services/%s/%s' % (service_type, service_id)
        self.delete_node(path, recursive=recursive)

        # delete service node if all services gone
        path = '/services/%s' % (service_type)
        if self.get_children(path):
            return
        self.delete_node(path)

    #end delete_service

    def lookup_service(self, service_type, service_id=None):
        if not self.exists_node('/services/%s' % (service_type)):
            return None
        if service_id:
            data = None
            path = '/services/%s/%s' % (service_type, service_id)
            datastr, stat = self.read_node(path)
            if datastr:
                data = json.loads(datastr)
                clients = self.get_children(path)
                data['in_use'] = len(clients)
            return data
        else:
            r = []
            services = self.get_children('/services/%s' % (service_type))
            for service_id in services:
                entry = self.lookup_service(service_type, service_id)
                r.append(entry)
            return r

    # end lookup_service

    def query_service(self, service_type):
        path = '/election/%s' % (service_type)
        if not self.exists_node(path):
            return None
        seq_list = self.get_children(path)
        seq_list = sorted(seq_list)

        r = []
        for sequence in seq_list:
            service_id, stat = self.read_node('/election/%s/%s' %
                                              (service_type, sequence))
            entry = self.lookup_service(service_type, service_id)
            r.append(entry)
        return r

    # end

    # TODO use include_data available in new versions of kazoo
    # tree structure /services/<service-type>/<service-id>
    def get_all_services(self):
        r = []
        service_types = self.get_children('/services')
        for service_type in service_types:
            services = self.lookup_service(service_type)
            r.extend(services)
        return r

    # end

    def insert_client(self, service_type, service_id, client_id, blob, ttl):
        data = {'ttl': ttl, 'blob': blob}

        path = '/services/%s/%s/%s' % (service_type, service_id, client_id)
        self.create_node(path, value=json.dumps(data))

        path = '/clients/%s/%s/%s' % (service_type, client_id, service_id)
        self.create_node(path, value=json.dumps(data), makepath=True)

    # end insert_client

    def lookup_subscribers(self, service_type, service_id):
        path = '/services/%s/%s' % (service_type, service_id)
        if not self.exists_node(path):
            return None
        clients = self.get_children(path)
        return clients

    # end lookup_subscribers

    def lookup_client(self, service_type, client_id):
        try:
            datastr, stat = self.read_node('/clients/%s/%s' %
                                           (service_type, client_id))
            data = json.loads(datastr) if datastr else None
        except ValueError:
            self.syslog('raise ValueError st=%s, cid=%s' %
                        (service_type, client_id))
            data = None
        return data

    # end lookup_client

    def insert_client_data(self, service_type, client_id, cldata):
        path = '/clients/%s/%s' % (service_type, client_id)
        self.create_node(path, value=json.dumps(cldata), makepath=True)

    # end insert_client_data

    def lookup_subscription(self,
                            service_type,
                            client_id=None,
                            service_id=None,
                            include_meta=False):
        if not self.exists_node('/clients/%s' % (service_type)):
            return None
        if client_id and service_id:
            try:
                datastr, stat = self.read_node(
                    '/clients/%s/%s/%s' %
                    (service_type, client_id, service_id))
                data = json.loads(datastr)
                blob = data['blob']
                if include_meta:
                    return (blob, stat, data['ttl'])
                else:
                    return blob
            except kazoo.exceptions.NoNodeException:
                return None
        elif client_id:
            # our version of Kazoo doesn't support include_data :-(
            try:
                services = self.get_children('/clients/%s/%s' %
                                             (service_type, client_id))
                r = []
                for service_id in services:
                    datastr, stat = self.read_node(
                        '/clients/%s/%s/%s' %
                        (service_type, client_id, service_id))
                    if datastr:
                        data = json.loads(datastr)
                        blob = data['blob']
                        r.append((service_id, blob, stat))
                # sort services in the order of assignment to this client
                # (based on modification time)
                rr = sorted(r, key=lambda entry: entry[2].last_modified)
                return [(service_id, blob) for service_id, blob, stat in rr]
            except kazoo.exceptions.NoNodeException:
                return None
        else:
            clients = self.get_children('/clients/%s' % (service_type))
            return clients

    # end lookup_subscription

    # delete client subscription. Cleanup path if possible
    def delete_subscription(self, service_type, client_id, service_id):
        path = '/clients/%s/%s/%s' % (service_type, client_id, service_id)
        self.delete_node(path)

        path = '/services/%s/%s/%s' % (service_type, service_id, client_id)
        self.delete_node(path)

        # delete client node if all subscriptions gone
        path = '/clients/%s/%s' % (service_type, client_id)
        if self.get_children(path):
            return
        self.delete_node(path)

        # purge in-memory cache - ideally we are not supposed to know about
        # this
        self._ds.delete_sub_data(client_id, service_type)

        # delete service node if all clients gone
        path = '/clients/%s' % (service_type)
        if self.get_children(path):
            return
        self.delete_node(path)

    # end

    # TODO use include_data available in new versions of kazoo
    # tree structure /clients/<service-type>/<client-id>/<service-id>
    # return tuple (service_type, client_id, service_id)
    def get_all_clients(self):
        r = []
        service_types = self.get_children('/clients')
        for service_type in service_types:
            clients = self.get_children('/clients/%s' % (service_type))
            for client_id in clients:
                services = self.get_children('/clients/%s/%s' %
                                             (service_type, client_id))
                rr = []
                for service_id in services:
                    (datastr, stat,
                     ttl) = self.lookup_subscription(service_type,
                                                     client_id,
                                                     service_id,
                                                     include_meta=True)
                    rr.append((service_type, client_id, service_id,
                               stat.last_modified, ttl))
                rr = sorted(rr, key=lambda entry: entry[3])
                r.extend(rr)
        return r

    # end get_all_clients

    # reset in-use count of clients for each service
    def inuse_loop(self):
        while True:
            service_types = self.get_children('/clients')
            for service_type in service_types:
                clients = self.get_children('/clients/%s' % (service_type))
                for client_id in clients:
                    services = self.get_children('/clients/%s/%s' %
                                                 (service_type, client_id))
                    for service_id in services:
                        path = '/clients/%s/%s/%s' % (service_type, client_id,
                                                      service_id)
                        datastr, stat = self.read_node(path)
                        data = json.loads(datastr)
                        now = time.time()
                        exp_t = stat.last_modified + data['ttl'] +\
                            disc_consts.TTL_EXPIRY_DELTA
                        if now > exp_t:
                            self.delete_subscription(service_type, client_id,
                                                     service_id)
                            self.syslog('Expiring st:%s sid:%s cid:%s' %
                                        (service_type, service_id, client_id))
                            self._debug['subscription_expires'] += 1
            gevent.sleep(10)

    def service_oos_loop(self):
        if self._ds._args.hc_interval <= 0:
            return

        while True:
            for entry in self.service_entries():
                if not self._ds.service_expired(entry, include_down=False):
                    continue
                service_type = entry['service_type']
                service_id = entry['service_id']
                path = '/election/%s/node-%s' % (service_type,
                                                 entry['sequence'])
                if not self.exists_node(path):
                    continue
                self.syslog('Deleting sequence node %s for service %s:%s' %
                            (path, service_type, service_id))
                self.delete_node(path)
                entry['sequence'] = -1
                self.update_service(service_type, service_id, entry)
                self._debug['oos_delete'] += 1
            gevent.sleep(self._ds._args.hc_interval)
コード例 #28
0
class EchoNode:
    def __init__(self, api, token_address):
        assert isinstance(api, RaidenAPI)
        self.ready = Event()

        self.api = api
        self.token_address = token_address

        existing_channels = self.api.get_channel_list(
            api.raiden.default_registry.address,
            self.token_address,
        )

        open_channels = [
            channel_state for channel_state in existing_channels
            if channel.get_status(channel_state) == CHANNEL_STATE_OPENED
        ]

        if len(open_channels) == 0:
            token = self.api.raiden.chain.token(self.token_address)
            if not token.balance_of(self.api.raiden.address) > 0:
                raise ValueError(
                    'not enough funds for echo node %s for token %s' % (
                        pex(self.api.raiden.address),
                        pex(self.token_address),
                    ))
            self.api.token_network_connect(
                self.api.raiden.default_registry.address,
                self.token_address,
                token.balance_of(self.api.raiden.address),
                initial_channel_target=10,
                joinable_funds_target=.5,
            )

        self.last_poll_offset = 0
        self.received_transfers = Queue()
        self.stop_signal = None  # used to signal REMOVE_CALLBACK and stop echo_workers
        self.greenlets = list()
        self.lock = BoundedSemaphore()
        self.seen_transfers = deque(list(), TRANSFER_MEMORY)
        self.num_handled_transfers = 0
        self.lottery_pool = Queue()
        # register ourselves with the raiden alarm task
        self.api.raiden.alarm.register_callback(self.echo_node_alarm_callback)
        self.echo_worker_greenlet = gevent.spawn(self.echo_worker)
        log.info('Echo node started')

    def echo_node_alarm_callback(self, block_number):
        """ This can be registered with the raiden AlarmTask.
        If `EchoNode.stop()` is called, it will give the return signal to be removed from
        the AlarmTask callbacks.
        """
        if not self.ready.is_set():
            self.ready.set()
        log.debug('echo_node callback', block_number=block_number)
        if self.stop_signal is not None:
            return REMOVE_CALLBACK
        else:
            self.greenlets.append(gevent.spawn(self.poll_all_received_events))
            return True

    def poll_all_received_events(self):
        """ This will be triggered once for each `echo_node_alarm_callback`.
        It polls all channels for `EventPaymentReceivedSuccess` events,
        adds all new events to the `self.received_transfers` queue and
        respawns `self.echo_node_worker`, if it died. """

        locked = False
        try:
            with Timeout(10):
                locked = self.lock.acquire(blocking=False)
                if not locked:
                    return
                else:
                    received_transfers = self.api.get_raiden_events_payment_history(
                        token_address=self.token_address,
                        offset=self.last_poll_offset,
                    )

                    # received transfer is a tuple of (block_number, event)
                    received_transfers = [
                        event for event in received_transfers
                        if type(event) == EventPaymentReceivedSuccess
                    ]

                    for event in received_transfers:
                        transfer = copy.deepcopy(event)
                        self.received_transfers.put(transfer)

                    # set last_poll_block after events are enqueued (timeout safe)
                    if received_transfers:
                        self.last_poll_offset += len(received_transfers)

                    if not self.echo_worker_greenlet.started:
                        log.debug(
                            'restarting echo_worker_greenlet',
                            dead=self.echo_worker_greenlet.dead,
                            successful=self.echo_worker_greenlet.successful(),
                            exception=self.echo_worker_greenlet.exception,
                        )
                        self.echo_worker_greenlet = gevent.spawn(
                            self.echo_worker)
        except Timeout:
            log.info('timeout while polling for events')
        finally:
            if locked:
                self.lock.release()

    def echo_worker(self):
        """ The `echo_worker` works through the `self.received_transfers` queue and spawns
        `self.on_transfer` greenlets for all not-yet-seen transfers. """
        log.debug('echo worker', qsize=self.received_transfers.qsize())
        while self.stop_signal is None:
            if self.received_transfers.qsize() > 0:
                transfer = self.received_transfers.get()
                if transfer in self.seen_transfers:
                    log.debug(
                        'duplicate transfer ignored',
                        initiator=pex(transfer.initiator),
                        amount=transfer.amount,
                        identifier=transfer.identifier,
                    )
                else:
                    self.seen_transfers.append(transfer)
                    self.greenlets.append(
                        gevent.spawn(self.on_transfer, transfer))
            else:
                gevent.sleep(.5)

    def on_transfer(self, transfer):
        """ This handles the echo logic, as described in
        https://github.com/raiden-network/raiden/issues/651:

            - for transfers with an amount that satisfies `amount % 3 == 0`, it sends a transfer
            with an amount of `amount - 1` back to the initiator
            - for transfers with a "lucky number" amount `amount == 7` it does not send anything
            back immediately -- after having received "lucky number transfers" from 7 different
            addresses it sends a transfer with `amount = 49` to one randomly chosen one
            (from the 7 lucky addresses)
            - consecutive entries to the lucky lottery will receive the current pool size as the
            `echo_amount`
            - for all other transfers it sends a transfer with the same `amount` back to the
            initiator """
        echo_amount = 0
        if transfer.amount % 3 == 0:
            log.info(
                'ECHO amount - 1',
                initiator=pex(transfer.initiator),
                amount=transfer.amount,
                identifier=transfer.identifier,
            )
            echo_amount = transfer.amount - 1

        elif transfer.amount == 7:
            log.info(
                'ECHO lucky number draw',
                initiator=pex(transfer.initiator),
                amount=transfer.amount,
                identifier=transfer.identifier,
                poolsize=self.lottery_pool.qsize(),
            )

            # obtain a local copy of the pool
            pool = self.lottery_pool.copy()
            tickets = [pool.get() for _ in range(pool.qsize())]
            assert pool.empty()
            del pool

            if any(ticket.initiator == transfer.initiator
                   for ticket in tickets):
                assert transfer not in tickets
                log.debug(
                    'duplicate lottery entry',
                    initiator=pex(transfer.initiator),
                    identifier=transfer.identifier,
                    poolsize=len(tickets),
                )
                # signal the poolsize to the participant
                echo_amount = len(tickets)

            # payout
            elif len(tickets) == 6:
                log.info('payout!')
                # reset the pool
                assert self.lottery_pool.qsize() == 6
                self.lottery_pool = Queue()
                # add new participant
                tickets.append(transfer)
                # choose the winner
                transfer = random.choice(tickets)
                echo_amount = 49
            else:
                self.lottery_pool.put(transfer)

        else:
            log.debug(
                'echo transfer received',
                initiator=pex(transfer.initiator),
                amount=transfer.amount,
                identifier=transfer.identifier,
            )
            echo_amount = transfer.amount

        if echo_amount:
            log.debug(
                'sending echo transfer',
                target=pex(transfer.initiator),
                amount=echo_amount,
                orig_identifier=transfer.identifier,
                echo_identifier=transfer.identifier + echo_amount,
                token_address=pex(self.token_address),
                num_handled_transfers=self.num_handled_transfers + 1,
            )

            self.api.transfer(
                self.api.raiden.default_registry.address,
                self.token_address,
                echo_amount,
                transfer.initiator,
                identifier=transfer.identifier + echo_amount,
            )
        self.num_handled_transfers += 1

    def stop(self):
        self.stop_signal = True
        self.greenlets.append(self.echo_worker_greenlet)
        gevent.joinall(self.greenlets, raise_error=True)
コード例 #29
0
ファイル: mr_master.py プロジェクト: tree3205/Projects
class Master(object):
    """ workers: a dictionary to store the information about workers
        format: {ip_port: ('Status', Remote_call)}

        jobs_tracker: a dictionary to store the information about jobs
        format:
        { task_name:
              { "mappers":
                      { mapper_id: [remote_call, mapper_ip_port, split_info, finished]},
                "reducers":
                      { reducer_id: [remote_call, reducer_ip_port, finished]},
                "num_mapper":
                      num_mapper,
                "num_reducer":
                      num_reducer,
                "task_file":
                      [filename, codes],
                "split_infos":
                      split_infos,
                "file_info":
                      file_info,
                "output_file:
                      output_file
                "done":
                      True/False
                 }
        }

        mapper_queue: free mapper queue
        format: [(ip_port, remote_call)]

        reducer_queue: free reducer queue
        format: [(ip_port, remote_call)]
    """

    def __init__(self, port, data_dir):
        gevent.spawn(self.controller)
        self.state = STATE_READY
        self.workers = {}
        self.jobs_tracker = {}
        self.port = port
        self.data_dir = data_dir
        self.mapper_queue = Queue()
        self.reducer_queue = Queue()
        self.jobs_tracker_lock = BoundedSemaphore(1)
        self.workers_lock = BoundedSemaphore(1)

    def controller(self):
        while True:
            print '[Master:%s] ' % self.state
            # down_workers = []
            # self.workers_lock.acquire()
            local_workers = dict(self.workers)
            for w in local_workers:
                print '(%s, %s)' % (w, local_workers[w][0])
                # not spawn a coroutine to ping this worker
                if local_workers[w][2] == False:
                    local_workers[w][2] = True
                    gevent.spawn(self.each_ping, w, local_workers[w][1])
            gevent.sleep(1)

    def each_ping(self, ip_port, c):
        alive = True
        while alive:
            try:
                c.ping()
            except:
                print "**** Error: Worker %s is down" % ip_port
                self.workers.pop(ip_port)
                print "********** Reassign jobs in %s" % ip_port
                gevent.spawn(self.reassign_job, [ip_port])
                alive = False
            gevent.sleep(1)

    # Failure tolerance
    # reassign the failure worker's job to another worker
    # remote = (ip_port, c)
    # def assign_mapper(self, split_info, task_file,
    #                  mapper_id, num_mapper, num_reducer, task_name, file_info):
    # def assign_reducer(self, task_file, num_mapper, reducer_id, output_file, task_name):
    def reassign_job(self, down_workers):
        self.jobs_tracker_lock.acquire()
        reassign_list = []
        for down_worker in down_workers:
            for task_name in self.jobs_tracker:
                # whether deal with failure after the job is done
                # if self.jobs_tracker[task_name]["done"] == False:
                job_dict = self.jobs_tracker[task_name]
                for mapper_id in job_dict["mappers"]:
                    if job_dict["mappers"][mapper_id][1] == down_worker:
                        print "********** down %s did %s mapper %d" % (down_worker, task_name, mapper_id)
                        job_dict["mappers"][mapper_id][3] = False
                        reassign_list.append([task_name, mapper_id, 0])
                for reducer_id in job_dict["reducers"]:
                    if job_dict["reducers"][reducer_id][1] == down_worker:
                        print "********** down %s did %s reducer %d" % (down_worker, task_name, reducer_id)
                        job_dict["reducers"][reducer_id][2] = False
                        reassign_list.append([task_name, reducer_id, 1])
        self.jobs_tracker_lock.release()
        for reassign in reassign_list:
            task_name = reassign[0]
            # Reassign mapper
            if reassign[2] == 0:
                mapper_id = reassign[1]
                print "********** Reassign %s mapper %d" % (task_name, mapper_id)
                gevent.spawn(self.reassign_mapper, mapper_id, task_name)
            # Reassign reducer
            elif reassign[2] == 1:
                reducer_id = reassign[1]
                self.jobs_tracker[task_name]["reducers"].pop(reducer_id)
                print "********** Reassign %s reducer %d" % (task_name, reducer_id)
                gevent.spawn(self.reassign_reducer, reducer_id, task_name)

    def register(self, ip_port):
        gevent.spawn(self.register_async, ip_port)
        return self.data_dir

    def register_async(self, ip_port):
        print '[Master:%s] ' % self.state,
        print 'Registered worker (%s)' % ip_port
        c = zerorpc.Client()
        c.connect("tcp://" + ip_port)
        # self.workers_lock.acquire()
        self.workers[ip_port] = [STATE_READY, c, False]
        # self.workers_lock.release()
        self.mapper_queue.put_nowait((ip_port, c))
        self.reducer_queue.put_nowait((ip_port, c))
        c.ping()

    # Master gets job from client, split input data according to the split size.
    # Then it assigns jobs to mappers and reducers.
    def do_job(self, task_file, split_size, num_reducer, input_file, output_file):
        # identical name for each task
        # use this name to generate intermediate filename
        task_name = time.strftime("%Y%m%d%H%M%S", time.localtime())
        gevent.spawn(self.do_job_async, task_file, split_size, num_reducer, input_file, output_file, task_name)
        return task_name

    # create a new coroutine to handle client's job
    def do_job_async(self, task_file, split_size, num_reducer, input_file, output_file, task_name):
        print "Task %s get" % task_file[0]
        split_size = int(split_size)
        num_reducer = int(num_reducer)
        split_infos, file_info = self.split_file(split_size, input_file)

        num_mapper = len(split_infos)

        # initialize jobs_tracker for task_name
        self.jobs_tracker[task_name] = {"mappers": {}, "reducers": {},
                                        "num_mapper": num_mapper, "num_reducer": num_reducer,
                                        "task_file": task_file, "split_infos": split_infos,
                                        "file_info": file_info, "output_file": output_file,
                                        "split_size": split_size, "done": False}

        print "Task " + task_name + " : assigning %d mappers, %d reducers" % (num_mapper, num_reducer)
        # Map task
        gevent.spawn(self.assign_mappers, task_name)
        # Reduce task
        gevent.spawn(self.assign_reducers, task_name)

    # Client gets report from master
    def client_query(self, task_name):
        # print "client_query for %s" % task_name
        job_tracker = self.jobs_tracker[task_name]
        mappers = job_tracker["mappers"]
        reducers = job_tracker["reducers"]
        needed_mapper = job_tracker["num_mapper"]
        needed_reducer = job_tracker["num_reducer"]

        finished_mapper_num = 0
        finished_reducer_num = 0
        for mapper in mappers:
            if mappers[mapper][3]:
                finished_mapper_num += 1

        for reducer in reducers:
            if reducers[reducer][2]:
                finished_reducer_num += 1

        result_dict = {"finished_mapper": finished_mapper_num, "assigned_mapper": len(mappers),
                       "needed_mapper": needed_mapper, "finished_reducer": finished_reducer_num,
                       "assigned_reducer": len(reducers), "needed_reducer": needed_reducer}
        if finished_reducer_num == needed_reducer:
            job_dict = self.jobs_tracker[task_name]
            print 'Task %s finished ' % task_name
            self.jobs_tracker[task_name]["done"] = True
        return result_dict

    # Split the input file and store the associated information
    # in a list which contains dictionary. Each dictionary is a Mapper's input.
    def split_file(self, split_size, input_file):
        """ One split only has one file.
            split_info = {0:[(file_name0, start, end)], 1:[(file_name1, start, end)]}
            One split may has more than one file.
            split_info =  {0:[(file_name0, start, end), (file_name1, start, end)],
                           1:[(file_name1, start, end)]}

            file_info = [(file0_path, file0_size), (file1_path, file1_size)]
        """
        split_info = {}
        file_info = []
        # Single file
        if not input_file.endswith('_'):
            file_path = self.data_dir + '/' + input_file
            file_size = os.path.getsize(file_path)
            split_num = int(math.ceil(float(file_size) / split_size))
            # Split file
            for i in range(split_num):
                split_info[i] = []
                start = i * split_size
                if (start + split_size) > file_size:
                    end = file_size
                else:
                    end = start + split_size
                split_info[i].append((file_path, start, end))
            file_info = [(file_path, file_size)]
        # Multiple files
        else:
            # Get all file name by the base name
            # and calculate the total file size.
            # file_info = [[file_dir1, file_size], [file_dir2, file_size], ...]
            total_size = 0
            for root, dir_names, file_names in os.walk(self.data_dir):
                for file_name in fnmatch.filter(file_names, input_file + '*'):
                    dir_file = root + '/' + file_name
                    one_file_size = os.path.getsize(dir_file)
                    total_size += one_file_size
                    file_info.append((dir_file, one_file_size))

            # Get worker num(split num)
            split_num = int(math.ceil(float(total_size) / split_size))

            # Split file
            start = 0
            used_file = 0
            for i in range(split_num):
                remaining_size = split_size
                split_info[i] = []
                while remaining_size > 0:
                    current_file_name = file_info[used_file][0]
                    current_file_size = file_info[used_file][1]
                    # Required remaining_size <= file remaining_size
                    if remaining_size <= (current_file_size - start):
                        split_info[i].append((current_file_name, start, start + remaining_size))
                        if remaining_size == current_file_size - start:
                            start = 0
                            used_file += 1
                        else:
                            start = start + remaining_size
                        remaining_size = 0
                    # Required remaining_size > file remaining_size
                    else:
                        if used_file < len(file_info) - 1:
                            split_info[i].append((current_file_name, start, current_file_size))
                            remaining_size -= current_file_size - start
                            start = 0
                            used_file += 1

                        # This is the last file, then finish split
                        else:
                            split_info[i].append((current_file_name, start, current_file_size))
                            remaining_size = 0
        return split_info, file_info

    # Assign map jobs to free mappers
    def assign_mappers(self, task_name):
        num_mapper = self.jobs_tracker[task_name]["num_mapper"]
        for mapper_id in range(num_mapper):
            ip_port, c = self.mapper_queue.get()
            # Handle failure before assign task.
            while ip_port not in self.workers:
                ip_port, c = self.mapper_queue.get()
            print "Task " + task_name + " : mappers id %d assigned to %s" % (mapper_id, ip_port)
            gevent.spawn(self.assign_mapper, ip_port, c, mapper_id, task_name)

    # Assign map job to a single free mapper
    # After the mapper finished its map job, return back to free mapper queue
    # and notify all reducers to fetch intermediate data
    def assign_mapper(self, ip_port, c, mapper_id, task_name):
        job_dict = self.jobs_tracker[task_name]
        split_info = job_dict["split_infos"][mapper_id]
        task_file = job_dict["task_file"]
        num_mapper = job_dict["num_mapper"]
        num_reducer = job_dict["num_reducer"]
        file_info = job_dict["file_info"]
        split_size = job_dict["split_size"]

        self.jobs_tracker_lock.acquire()
        self.jobs_tracker[task_name]["mappers"][mapper_id] = [c, ip_port, split_info, False]
        self.jobs_tracker_lock.release()
        try:
            success = c.do_map(split_info, task_file,
                               mapper_id, num_mapper, num_reducer, task_name, file_info, split_size)
        except:
            print "**** Error: Can't assign task %s map task to mapper %d %s" \
                  % (task_name, mapper_id, ip_port)

    def mapper_finish(self, success, task_name, mapper_id, ip_port):
        if success:
            """ jobs_tracker =
             { task_name:
              { "mappers":
                      { mapper_id: [remote_call, mapper_ip_port, split_info, finished]}
            """
            self.jobs_tracker[task_name]["mappers"][mapper_id][3] = True
            print "Task %s : mapper %d finished" % (task_name, mapper_id)

            self.jobs_tracker_lock.acquire()
            reducers_dict = self.jobs_tracker[task_name]["reducers"]
            for reducer_id in reducers_dict:
                reducer_c = reducers_dict[reducer_id][0]
                print "mapper %d is notifying reducer %d" % (mapper_id, reducer_id)
                try:
                    reducer_c.notify_mapper_finish(mapper_id, ip_port)
                except:
                    print "**** Error: Task %s mapper %d can't notify reducer %d %s" \
                          % (task_name, mapper_id, reducer_id, reducers_dict[reducer_id][1])
                print "Mapper %d is notifying reducer %d done" % (mapper_id, reducer_id)
            self.jobs_tracker_lock.release()
        else:
            print "Task %s : mapper %d failed" % (task_name, mapper_id)
        if ip_port in self.workers:
            print "%s returns to free mapper queue." % ip_port
            self.mapper_queue.put_nowait((ip_port, self.workers[ip_port][1]))

    # Assign reduce jobs to free reducers
    def assign_reducers(self, task_name):
        num_reducer = self.jobs_tracker[task_name]["num_reducer"]
        procs = []
        for i in range(num_reducer):
            ip_port, c = self.reducer_queue.get()
            while ip_port not in self.workers:
                ip_port, c = self.reducer_queue.get()
            print "Task " + task_name + " : reducer id %d assigned to %s" % (i, ip_port)
            proc = gevent.spawn(self.assign_reducer, ip_port, c, i, task_name)
            procs.append(proc)

    # Assign one reduce job to one reducer
    def assign_reducer(self, ip_port, c, reducer_id, task_name):
        task_file = self.jobs_tracker[task_name]["task_file"]
        num_mapper = self.jobs_tracker[task_name]["num_mapper"]
        output_file = self.jobs_tracker[task_name]["output_file"]

        self.jobs_tracker_lock.acquire()
        self.jobs_tracker[task_name]["reducers"][reducer_id] = [c, ip_port, False]
        for mapper_id in self.jobs_tracker[task_name]["mappers"]:
            if self.jobs_tracker[task_name]["mappers"][mapper_id][3]:
                c.notify_mapper_finish(mapper_id, self.jobs_tracker[task_name]["mappers"][mapper_id][1])
        self.jobs_tracker_lock.release()
        try:
            c.do_reduce(task_file, num_mapper, reducer_id, output_file, task_name)
        except:
            print "**** Error: Can't assign task %s reduce task to reducer %d %s" \
                  % (task_name, reducer_id, ip_port)

    def reducer_finish(self, success, task_name, reducer_id, ip_port):
        if success:
            """ jobs_tracker =
                { task_name:
                  { "reducers":
                          { reducer_id: [remote_call, reducer_ip_port, finished]}
            """
            self.jobs_tracker[task_name]["reducers"][reducer_id][2] = True
            print "Task %s : reducer %d finished" % (task_name, reducer_id)
        else:
            print "Task %s : reducer %d failed" % (task_name, reducer_id)
        if ip_port in self.workers:
            self.reducer_queue.put_nowait((ip_port, self.workers[ip_port][1]))
            print "%s returns to free reducer queue." % ip_port

    # Reassign one map job to one mapper
    def reassign_mapper(self, mapper_id, task_name):
        ip_port, c = self.mapper_queue.get()
        while ip_port not in self.workers:
            ip_port, c = self.mapper_queue.get()
        print "Reassign Task %s : mappers id %d to %s" % (task_name, mapper_id, ip_port)

        self.assign_mapper(ip_port, c, mapper_id, task_name)

    # Reassign one reduce job to one reducer
    def reassign_reducer(self, reducer_id, task_name):
        job_dict = self.jobs_tracker[task_name]
        ip_port, c = self.reducer_queue.get()
        while ip_port not in self.workers:
            ip_port, c = self.reducer_queue.get()
        print "Reassign Task %s : reducer id %d to %s" % (task_name, reducer_id, ip_port)
        self.assign_reducer(ip_port, c, reducer_id, task_name)

    # Collector get result from master
    def get_result(self, filename_base):
        print "Receive collect command: collect " + filename_base
        keys = self.jobs_tracker.keys()
        keys.sort(reverse=True)
        for task_name in keys:
            if self.jobs_tracker[task_name]["output_file"] == filename_base:

                job_dict = self.jobs_tracker[task_name]
                for mapper_id in job_dict["mappers"]:
                    try:
                        job_dict["mappers"][mapper_id][0]\
                            .remove_intermediate_file(task_name, mapper_id, job_dict["num_reducer"])
                    except:
                        print "**** Error: task %s: mapper %d lost connection" % (task_name, mapper_id)
                print "collect " + filename_base + " from " + task_name
                job_dict = self.jobs_tracker[task_name]
                result = ""
                for reducer_id in job_dict["reducers"]:
                    result += job_dict["reducers"][reducer_id][0]\
                        .fetch_result_file(filename_base, reducer_id)
                self.jobs_tracker.pop(task_name, None)
                return True, result
        print "Error: Can't find a job with output: " + filename_base
        return False, ''
コード例 #30
0
ファイル: crispin.py プロジェクト: bdimcheff/sync-engine
class CrispinConnectionPool(object):
    """
    Connection pool for Crispin clients.

    Connections in a pool are specific to an IMAPAccount.

    Parameters
    ----------
    account_id : int
        Which IMAPAccount to open up a connection to.
    num_connections : int
        How many connections in the pool.
    readonly : bool
        Is the connection to the IMAP server read-only?
    """

    def __init__(self, account_id, num_connections, readonly):
        log.info('Creating Crispin connection pool for account {} with {} '
                 'connections'.format(account_id, num_connections))
        self.account_id = account_id
        self.readonly = readonly
        self._queue = Queue(num_connections, items=num_connections * [None])
        self._sem = BoundedSemaphore(num_connections)
        self._set_account_info()

    @contextlib.contextmanager
    def get(self):
        """ Get a connection from the pool, or instantiate a new one if needed.
        If `num_connections` connections are already in use, block until one is
        available.
        """
        # A gevent semaphore is granted in the order that greenlets tried to
        # acquire it, so we use a semaphore here to prevent potential
        # starvation of greenlets if there is high contention for the pool.
        # The queue implementation does not have that property; having
        # greenlets simply block on self._queue.get(block=True) could cause
        # individual greenlets to block for arbitrarily long.
        self._sem.acquire()
        client = self._queue.get()
        try:
            if client is None:
                client = self._new_connection()
            yield client
        except CONN_DISCARD_EXC_CLASSES as exc:
            # Discard the connection on socket or IMAP errors. Technically this
            # isn't always necessary, since if you got e.g. a FETCH failure you
            # could reuse the same connection. But for now it's the simplest
            # thing to do.
            log.info('IMAP connection error; discarding connection',
                     exc_info=True)
            if client is not None and \
               not isinstance(exc, CONN_UNUSABLE_EXC_CLASSES):
                try:
                    client.logout()
                except Exception:
                    log.info('Error on IMAP logout', exc_info=True)
            client = None
            raise exc
        except:
            raise
        finally:
            self._queue.put(client)
            self._sem.release()

    def _set_account_info(self):
        with session_scope(self.account_id) as db_session:
            account = db_session.query(ImapAccount).get(self.account_id)
            self.sync_state = account.sync_state
            self.provider = account.provider
            self.provider_info = account.provider_info
            self.email_address = account.email_address
            self.auth_handler = account.auth_handler
            if account.provider == 'gmail':
                self.client_cls = GmailCrispinClient
            else:
                self.client_cls = CrispinClient

    def _new_raw_connection(self):
        """Returns a new, authenticated IMAPClient instance for the account."""
        with session_scope(self.account_id) as db_session:
            if self.provider == 'gmail':
                account = db_session.query(GmailAccount).options(
                    joinedload(GmailAccount.auth_credentials)).get(
                    self.account_id)
            else:
                account = db_session.query(GenericAccount).options(
                    joinedload(GenericAccount.imap_secret)).get(self.account_id)
            db_session.expunge(account)

        return self.auth_handler.connect_account(account)

    def _new_connection(self):
        conn = self._new_raw_connection()
        return self.client_cls(self.account_id, self.provider_info,
                               self.email_address, conn,
                               readonly=self.readonly)
コード例 #31
0
ファイル: connpool.py プロジェクト: rtokarev/test-run
class ConnectionPool(object):
    """
    Generic TCP connection pool, with the following features:
        * Configurable pool size
        * Auto-reconnection when a broken socket is detected
        * Optional periodic keepalive
    """

    # Frequency at which the pool is populated at startup
    SPAWN_FREQUENCY = 0.1

    def __init__(self, size, exc_classes=DEFAULT_EXC_CLASSES, keepalive=None):
        self.size = size
        self.conn = deque()
        self.lock = BoundedSemaphore(size)
        self.keepalive = keepalive
        # Exceptions list must be in tuple form to be caught properly
        self.exc_classes = tuple(exc_classes)
        for i in range(size):
            self.lock.acquire()
        for i in range(size):
            greenlet = TestRunGreenlet(self._addOne)
            greenlet.start_later(self.SPAWN_FREQUENCY * i)
        if self.keepalive:
            greenlet = TestRunGreenlet(self._keepalive_periodic)
            greenlet.start_later()

    def _new_connection(self):
        """
        Estabilish a new connection (to be implemented in subclasses).
        """
        raise NotImplementedError

    def _keepalive(self, c):
        """
        Implement actual application-level keepalive (to be
        reimplemented in subclasses).

        :raise: socket.error if the connection has been closed or is broken.
        """
        raise NotImplementedError()

    def _keepalive_periodic(self):
        delay = float(self.keepalive) / self.size
        while 1:
            try:
                with self.get() as c:
                    self._keepalive(c)
            except self.exc_classes:
                # Nothing to do, the pool will generate a new connection later
                pass
            gevent.sleep(delay)

    def _addOne(self):
        stime = 0.1
        while 1:
            c = self._new_connection()
            if c:
                break
            gevent.sleep(stime)
            if stime < 400:
                stime *= 2

        self.conn.append(c)
        self.lock.release()

    @contextmanager
    def get(self):
        """
        Get a connection from the pool, to make and receive traffic.

        If the connection fails for any reason (socket.error), it is dropped
        and a new one is scheduled. Please use @retry as a way to automatically
        retry whatever operation you were performing.
        """
        self.lock.acquire()
        try:
            c = self.conn.popleft()
            yield c
        except self.exc_classes:
            # The current connection has failed, drop it and create a new one
            greenlet = TestRunGreenlet(self._addOne)
            greenlet.start_later(1)
            raise
        except:  # noqa: E722
            self.conn.append(c)
            self.lock.release()
            raise
        else:
            # NOTE: cannot use finally because MUST NOT reuse the connection
            # if it failed (socket.error)
            self.conn.append(c)
            self.lock.release()
コード例 #32
0
class BufferedSocket():

    is_eof = False
    sock = None
    store = None
    lock = None

    def __init__(self, sock):
        self.sock = sock
        self.store = bytearray()

    def close(self):
        self.sock.close()
        self.is_eof = True

    def send(self, *_data):
        total_sent = 0
        for data in _data:
            if (isinstance(data, str)):
                data = data.encode()
            n = 0
            data_len = len(data)
            data_mview = memoryview(data)
            while (n < data_len):
                sent = self.sock.send(data_mview[n:])
                if (sent < 0):
                    return sent  # return failed
                n += sent
            total_sent += n
        return total_sent

    def sendl(self, *_data):
        if (not self.lock):
            self.lock = BoundedSemaphore()
        self.lock.acquire()
        ret = self.send(*_data)
        self.lock.release()
        return ret

    def recv(self, n):
        if (self.is_eof):
            return None
        if (self.store):
            ret = self.store
            self.store = bytearray()
            return ret
        return self.sock.recv(n)

    def recvn(self, n):
        if (self.is_eof):
            return None
        while (len(self.store) < n):
            data = self.sock.recv(4096)
            if (not data):
                self.is_eof = True
                break
            self.store.extend(data)

        # return n bytes for now
        ret = self.store[:n]
        # set remaining to new store
        self.store = self.store[n:]
        return ret

    # fails if it couldn't find the delimiter until max_size
    def readuntil(self, delimiter, max_size, discard_delimiter):
        if (self.is_eof):
            return None
        # check in store
        if (isinstance(delimiter, str)):
            delimiter = delimiter.encode()

        delimiter_len = len(delimiter)
        # scan the store until end, if not found extend
        # and continue until store > max_size
        to_scan_len = len(self.store)
        i = 0  # how much we scanned already

        _store = self.store  # get a reference
        while (True):
            if (i > max_size):
                self.is_eof = True
                return None
            if (i >= delimiter_len):
                j = 0
                lookup_from = i - delimiter_len
                while (j < delimiter_len
                       and _store[lookup_from + j] == delimiter[j]):
                    j += 1

                if (j == delimiter_len):
                    # found
                    ret = None
                    if (discard_delimiter):
                        ret = _store[:i - delimiter_len]
                    else:
                        ret = _store[:i]
                    self.store = _store[i:]  # set store to unscanned/pending
                    return ret
            if (i >= to_scan_len):
                # scanned all buffer
                data = self.sock.recv(4096)
                if (not data):
                    self.is_eof = True
                    return None

                _store.extend(data)  # fetch more data
                to_scan_len = len(_store)
            i += 1

    def __getattr__(self, key):
        ret = getattr(self.sock, key, _OBJ_END_)
        if (ret == _OBJ_END_):
            raise AttributeError()
        return ret
コード例 #33
0
ファイル: file.py プロジェクト: ivicac/sync-engine
class Lock(object):
    """ UNIX-specific exclusive file locks (released when the process ends).

    Based on
    http://blog.vmfarms.com/2011/03/cross-process-locking-and.html,
    adapted for context managers (the 'with' statement).

    Modified to be gevent-safe! Locks held by a given Greenlet may not be
    taken by other Greenlets until released, _as long as you only create one
    Lock object per lockfile_. THIS IS VERY IMPORTANT. *Make sure* that you're
    not creating multiple locks on the same file from the same process,
    otherwise you'll bypass the gevent lock!

    Parameters
    ----------
    f : file or str
        File handle or filename to use as the lock.
    block : bool
        Whether to block or throw IOError if the lock is grabbed multiple
        times.
    """

    TIMEOUT = 60

    def __init__(self, f, block=True):
        if isinstance(f, io.IOBase):
            self.filename = f.name
            self.handle = f if not f.closed else open(f, "w")  # noqa: SIM115
        else:
            self.filename = f
            mkdirp(os.path.dirname(f))
            self.handle = open(f, "w")  # noqa: SIM115
        if block:
            self.lock_op = fcntl.LOCK_EX
        else:
            self.lock_op = fcntl.LOCK_EX | fcntl.LOCK_NB
        self.block = block
        self.gevent_lock = BoundedSemaphore(1)

    def acquire(self):
        got_gevent_lock = self.gevent_lock.acquire(blocking=self.block)
        if not got_gevent_lock:
            raise IOError(
                "cannot acquire gevent lock; associated file is {}".format(
                    self.filename))
        fcntl.flock(self.handle, self.lock_op)

    def release(self):
        fcntl.flock(self.handle, fcntl.LOCK_UN)
        self.gevent_lock.release()

    def locked(self):
        return self.gevent_lock.locked()

    def __enter__(self):
        self.acquire()
        return self

    def __exit__(self, type, value, traceback):
        self.release()

    def __del__(self):
        self.handle.close()
コード例 #34
0
    class WorkloadRecord:
        def __init__(self, workload_id, workload, num_drivers):
            self.time_start = None
            self.time_last_report = None
            self.workload_id = workload_id
            self.workload = workload
            self.query_count = 0
            self.num_finished_jobs = 0
            self.ready_drivers = 0
            self.dispatch_completed = False
            self.counter_success = 0
            self.counter_failure = 0
            self.statistics = {}
            self.timeline_completion = {}
            self.timeline_failure = {}
            self.timeline_completion_detailed = {}
            self.timeline_failure_detailed = {}
            self.logger = logging.getLogger(__name__)
            self.statistics_lock = BoundedSemaphore(1)
            self.greenlet_record = None
            self.num_drivers = num_drivers
            self.ready_drivers = {}
            self.report_records = {}
            self.dispatch_records = {}
            for driver_id in range(1, self.num_drivers + 1):
                self.report_records[driver_id] = 0
                self.dispatch_records[driver_id] = 0

        def start(self):
            self.logger.info("Workload {} is started".format(self.workload_id))
            self.time_start = time.time()
            # TODO: implement
            # should start the record keeper
            # self.greenlet_record = gevent.spawn(self.workload)

        def stop(self):
            self.greenlet_record.kill()

        def worker_record(self):
            # TODO: add thread to process report every one second?
            pass

        def get_workload(self):
            return self.workload

        def add_workload(self, num_queries):
            self.query_count += num_queries

        def completed_dispatch(self):
            self.logger.info("Workload {} completed request dispatch".format(
                self.workload_id))
            self.dispatch_completed = True

        def report_completion(self, driver_id, report):
            # self.logger.info("Recording completion from driver {}".format(driver_id))
            self.time_last_report = time.time()
            self.num_finished_jobs += 1
            item_id = report['item']
            report.pop('item', None)
            # TODO: do we need lock??
            self.statistics_lock.acquire()
            #before_length = len(self.statistics)
            self.statistics[item_id] = report
            #self.logger.info("on report: {}, before={}, after={}".format(item_id, before_length, len(self.statistics)))
            self.statistics_lock.release()
            timeslot = int(self.time_last_report - self.time_start) + 1
            if report['success']:
                self.counter_success += 1
                if timeslot not in self.timeline_completion:
                    self.timeline_completion[timeslot] = 0
                self.timeline_completion[timeslot] += 1
            else:
                self.counter_failure += 1
                if timeslot not in self.timeline_failure:
                    self.timeline_failure[timeslot] = 0
                self.timeline_failure[timeslot] += 1
            self.report_records[driver_id] += 1

        def record_dispatch_result(self, driver_id, num):
            self.dispatch_records[driver_id] += num

        def is_completed(self):
            """jobs may all complete before dispatch finish"""
            self.logger.info("# dispatch completed: %s",
                             self.dispatch_completed)
            self.logger.info("@ num_queries={}, num_finished_jobs={}".format(
                self.query_count, self.num_finished_jobs))
            for driver_id in sorted(self.dispatch_records.keys()):
                self.logger.info(
                    "driver_id={}, dispatch={}, reports={}".format(
                        driver_id, self.dispatch_records[driver_id],
                        self.report_records[driver_id]))
            return self.dispatch_completed and (self.query_count
                                                == self.num_finished_jobs)

        def is_started(self):
            return self.time_start is not None

        def get_report(self):
            return {
                "num_finished_jobs":
                self.num_finished_jobs,
                "num_successful_jobs":
                self.counter_success,
                "num_failed_jobs":
                self.counter_failure,
                "elapsed_time":
                self.time_last_report -
                self.time_start if self.is_completed() else time.time() -
                self.time_start
            }

        def get_statistics(self):
            self.logger.info("## total reported jobs: {}".format(
                len(self.statistics)))
            return self.statistics

        def get_timeline_completion(self):
            return self.timeline_completion

        def get_timeline_failure(self):
            return self.timeline_failure
コード例 #35
0
class db_conf(object):
    """这是一个单例"""
    __metaclass__ = singleton.singleton

    #----------------------------------------------------------------------
    def __init__(self):
        """Constructor"""
        config = general_config.config_singleton()
        self.m_table_scene = config.get_value('mysql_table', 'table_scene', '')
        self.m_table_business = config.get_value('mysql_table',
                                                 'table_business', '')
        self.m_table_restrict_cond = config.get_value('mysql_table',
                                                      'table_restrict_cond',
                                                      '')
        self.m_table_unrestrict_cond = config.get_value(
            'mysql_table', 'table_unrestrict_cond', '')
        self.m_table_task = config.get_value('mysql_table', 'table_task', '')
        self.m_table_fixed_content_response = config.get_value(
            'mysql_table', 'table_fixed_content_response', '')
        self.m_table_common_content_response = config.get_value(
            'mysql_table', 'table_common_content_response', '')

        self.m_scene_conf = {}
        self.m_business_conf = {}
        self.m_restrict_cond_conf = {}
        self.m_unrestrict_cond_conf = {}
        self.m_task_conf = {}
        self.m_fixed_content_response_conf = {}
        self.m_common_content_response_conf = {}

        self.m_tips_lock = BoundedSemaphore(1)

    def update_confs(self):
        self.m_tips_lock.acquire()

        self.m_scene_conf.clear()
        self.m_business_conf.clear()
        self.m_restrict_cond_conf.clear()
        self.m_unrestrict_cond_conf.clear()
        self.m_task_conf.clear()
        self.m_fixed_content_response_conf.clear()
        self.m_common_content_response_conf.clear()

        self.update_restrict_cond_conf()
        self.update_unrestrict_cond_conf()
        self.update_task_conf()
        self.update_fixed_content_response_conf()
        self.update_common_content_response_conf()
        self.update_scene_conf()
        self.update_business_conf()

        self.m_tips_lock.release()

    def update_scene_conf(self):
        myres = xlresource.resource_singleton()
        mysql_conn = myres.mysql_pool.get()

        alldata = ()

        try:
            mysql_conn.ping(True)
        except Exception, e:
            myres.mysql_pool.put(mysql_conn)
            return None

        cursor = mysql_conn.cursor()
        sql = 'select unitag,restrict_cond_tag,unrestrict_cond_tag,task_conf_tag,fixed_rtn_tag,common_rtn_tag from ' + self.m_table_scene
        try:
            cursor.execute(sql)
            alldata = cursor.fetchall()
        except Exception, e:
            LOG_ERROR('service',
                      'cursor.execute sql= ' + sql + get_vrun_info())
コード例 #36
0
class DianpingProcessor(BaseProcessor):
    name = 'dianping-shop'

    def __init__(self, *args, **kwargs):
        BaseProcessor.__init__(self, *args, **kwargs)
        self.build_args()

        # 缓存的城市信息
        from gevent.lock import BoundedSemaphore

        self._city_cache = {}
        self._city_cache_lock = BoundedSemaphore(1)

    def build_args(self):
        """
        处理命令行参数
        """
        import argparse

        parser = argparse.ArgumentParser()
        parser.add_argument('--limit', type=int)
        parser.add_argument('--skip', type=int, default=0)
        parser.add_argument('--query', type=str)
        self.args, leftover = parser.parse_known_args()

    def build_cursor(self):
        col = get_mongodb('raw_dianping', 'Dining', 'mongo-raw')

        query = {}
        if self.args.query:
            exec 'from bson import ObjectId'
            query = eval(self.args.query)

        cursor = col.find(query).skip(self.args.skip)
        if self.args.limit:
            cursor.limit(self.args.limit)
        return cursor

    def populate_tasks(self):
        for val in self.build_cursor():

            def task(entry=val):
                self.process_details(entry)

            self.add_task(task)

    def get_city(self, city_name, coords):
        """
        通过城市名称和坐标,获得城市详情
        """
        if city_name not in self._city_cache:
            try:
                self._city_cache_lock.acquire()
                if city_name not in self._city_cache:
                    col = get_mongodb('geo', 'Locality', 'mongo')
                    lat = coords['lat']
                    lng = coords['lng']
                    if not isinstance(lat, float) or not isinstance(
                            lng, float):
                        return
                    geo_json = {
                        'type': 'Point',
                        'coordinates': [coords['lng'], coords['lat']]
                    }
                    max_distance = 200000
                    city_list = list(
                        col.find({
                            'alias': city_name,
                            'location': {
                                '$near': {
                                    '$geometry': geo_json,
                                    '$maxDistance': max_distance
                                }
                            }
                        }))
                    if city_list:
                        city = city_list[0]
                        self._city_cache[city_name] = city
                    else:
                        self.log(
                            'Failed to find city: %s, lat=%f, lng=%f' %
                            (city_name, lat, lng), logging.WARN)
                        self._city_cache[city_name] = None
            finally:
                self._city_cache_lock.release()

        return self._city_cache[city_name]

    @staticmethod
    def calc_rating(entry):
        """
        计算店铺的rating
        """
        if 'review_stat' not in entry:
            return

        review = entry['review_stat']
        tmp = 0
        for idx in xrange(1, 6):
            key = 'reviewCountStar%d' % idx
            tmp += idx * review[key]
        total_cnt = review['reviewCountAllStar']
        if total_cnt == 0:
            return
        rating = float(tmp) / total_cnt

        return {'rating': rating, 'voteCnt': total_cnt}

    def process_details(self, entry):
        """
        处理店铺详情
        """
        city_info = self.get_city(entry['city_name'], {
            'lat': entry['lat'],
            'lng': entry['lng']
        })
        if not city_info:
            return

        country = {}
        for key in ('_id', 'zhName', 'enName'):
            if key in city_info['country']:
                country[key] = city_info['country'][key]

        locality = {}
        for key in ('_id', 'zhName', 'enName', 'location'):
            if key in city_info:
                locality[key] = city_info[key]

        shop = {
            'source': {
                'dianping': {
                    'id': entry['shop_id']
                }
            },
            'zhName': entry['title'],
            'alias': [entry['title']],
            'address': entry['addr'],
            'location': {
                'type': 'Point',
                'coordinates': [entry['lng'], entry['lat']]
            },
            'country': country,
            'locality': locality,
            'targets': [country['_id'], locality['_id']],
            'taoziEna': True,
            'lxpEna': True
        }

        tags = []
        if 'tags' in entry and entry['tags']:
            for t in entry['tags']:
                tags.append(t)
        if 'cat_name' in entry and entry['cat_name']:
            cat_name = entry['cat_name']
            tags.append(cat_name)
            entry['style'] = cat_name
        tags = list(set(tags))
        if tags:
            shop['tags'] = tags

        fields_map = {
            'mean_price': 'price',
            'tel': 'tel',
            'open_time': 'openTime',
            'cover_image': 'cover_image'
        }
        for key1, key2 in fields_map.items():
            if key1 in entry and entry[key1]:
                shop[key2] = entry[key1]

        score = self.calc_rating(entry)
        if score:
            shop['voteCnt'] = score['voteCnt']
            shop['rating'] = score['rating']

        self.update_shop(shop)

    @staticmethod
    def add_image(image_url):
        from hashlib import md5

        url_hash = md5(image_url).hexdigest()
        image = {'url_hash': url_hash, 'key': url_hash, 'url': image_url}
        col_im = get_mongodb('imagestore', 'Images', 'mongo')
        if not col_im.find_one({'key': image['key']}, {'_id': 1}):
            col = get_mongodb('imagestore', 'ImageCandidates', 'mongo')
            col.update({'key': image['key']}, {'$set': image}, upsert=True)
        return image['key']

    @staticmethod
    def update_shop(shop):
        """
        将店铺存储至数据库
        """
        if 'cover_image' in shop:
            cover = shop.pop('cover_image')
            image_key = DianpingProcessor.add_image(cover)
            shop['images'] = [{'key': image_key}]

        add_to_set = {}
        for key in ('tags', 'alias'):
            if key in shop:
                value_list = shop.pop(key)
                add_to_set[key] = {'$each': value_list}
        ops = {'$set': shop}
        if add_to_set:
            ops['$addToSet'] = add_to_set

        col = get_mongodb('raw_dianping', 'DiningProc', 'mongo-raw')
        col.update({'source.dianping.id': shop['source']['dianping']['id']},
                   ops,
                   upsert=True)
コード例 #37
0
class EchoNode:  # pragma: no unittest
    def __init__(self, api: RaidenAPI, token_address: TokenAddress) -> None:
        assert isinstance(api, RaidenAPI)
        self.ready = Event()

        self.api = api
        self.token_address = token_address

        existing_channels = self.api.get_channel_list(
            api.raiden.default_registry.address, self.token_address
        )

        open_channels = [
            channel_state
            for channel_state in existing_channels
            if channel.get_status(channel_state) == ChannelState.STATE_OPENED
        ]

        if len(open_channels) == 0:
            token_proxy = self.api.raiden.proxy_manager.token(self.token_address)
            if not token_proxy.balance_of(self.api.raiden.address) > 0:
                raise ValueError(
                    f"Not enough funds for echo node "
                    f"{to_checksum_address(self.api.raiden.address)} for token "
                    f"{to_checksum_address(self.token_address)}"
                )

            # Using the balance of the node as funds
            funds = TokenAmount(token_proxy.balance_of(self.api.raiden.address))

            self.api.token_network_connect(
                registry_address=self.api.raiden.default_registry.address,
                token_address=self.token_address,
                funds=funds,
                initial_channel_target=10,
                joinable_funds_target=0.5,
            )

        self.num_seen_events = 0
        self.received_transfers: Queue[EventPaymentReceivedSuccess] = Queue()

        # This is used to signal REMOVE_CALLBACK and stop echo_workers
        self.stop_signal: Optional[bool] = None

        self.greenlets: Set[Greenlet] = set()
        self.lock = BoundedSemaphore()
        self.seen_transfers: Deque[EventPaymentReceivedSuccess] = deque(list(), TRANSFER_MEMORY)
        self.num_handled_transfers = 0
        self.lottery_pool = Queue()

        # register ourselves with the raiden alarm task
        self.api.raiden.alarm.register_callback(self.echo_node_alarm_callback)
        self.echo_worker_greenlet = gevent.spawn(self.echo_worker)
        log.info("Echo node started")

    def echo_node_alarm_callback(self, block: Dict[str, Any]) -> Union[object, bool]:
        """ This can be registered with the raiden AlarmTask.
        If `EchoNode.stop()` is called, it will give the return signal to be removed from
        the AlarmTask callbacks.
        """
        if not self.ready.is_set():
            self.ready.set()
        log.debug(
            "echo_node callback",
            node=to_checksum_address(self.api.address),
            block_number=block["number"],
        )
        if self.stop_signal is not None:
            return REMOVE_CALLBACK
        else:
            self.greenlets.add(gevent.spawn(self.poll_all_received_events))
            return True

    def poll_all_received_events(self) -> None:
        """ This will be triggered once for each `echo_node_alarm_callback`.
        It polls all channels for `EventPaymentReceivedSuccess` events,
        adds all new events to the `self.received_transfers` queue and
        respawns `self.echo_worker`, if it died. """

        locked = False
        try:
            with Timeout(10):
                locked = self.lock.acquire(blocking=False)
                if not locked:
                    return
                else:
                    received_transfers: List[Event] = self.api.get_raiden_events_payment_history(
                        token_address=self.token_address, offset=self.num_seen_events
                    )

                    received_transfers = [
                        event
                        for event in received_transfers
                        if type(event) == EventPaymentReceivedSuccess
                    ]

                    for event in received_transfers:
                        transfer = copy.deepcopy(event)
                        self.received_transfers.put(transfer)

                    # set last_poll_block after events are enqueued (timeout safe)
                    if received_transfers:
                        self.num_seen_events += len(received_transfers)

                    if not bool(self.echo_worker_greenlet):
                        log.debug(
                            "Restarting echo_worker_greenlet",
                            node=to_checksum_address(self.api.address),
                            dead=self.echo_worker_greenlet.dead,
                            successful=self.echo_worker_greenlet.successful(),
                            exception=self.echo_worker_greenlet.exception,
                        )
                        self.echo_worker_greenlet = gevent.spawn(self.echo_worker)
        except Timeout:
            log.info("Timeout while polling for events")
        finally:
            if locked:
                self.lock.release()

    def echo_worker(self) -> None:
        """ The `echo_worker` works through the `self.received_transfers` queue and spawns
        `self.on_transfer` greenlets for all not-yet-seen transfers. """
        log.debug("echo worker", qsize=self.received_transfers.qsize())
        while self.stop_signal is None:
            if self.received_transfers.qsize() > 0:
                transfer = self.received_transfers.get()
                if transfer in self.seen_transfers:
                    log.debug(
                        "Duplicate transfer ignored",
                        node=to_checksum_address(self.api.address),
                        initiator=to_checksum_address(transfer.initiator),
                        amount=transfer.amount,
                        identifier=transfer.identifier,
                    )
                else:
                    self.seen_transfers.append(transfer)
                    self.greenlets.add(gevent.spawn(self.on_transfer, transfer))
            else:
                gevent.sleep(0.5)

    def on_transfer(self, transfer: EventPaymentReceivedSuccess) -> None:
        """ This handles the echo logic, as described in
        https://github.com/raiden-network/raiden/issues/651:

            - for transfers with an amount that satisfies `amount % 3 == 0`, it sends a transfer
            with an amount of `amount - 1` back to the initiator
            - for transfers with a "lucky number" amount `amount == 7` it does not send anything
            back immediately -- after having received "lucky number transfers" from 7 different
            addresses it sends a transfer with `amount = 49` to one randomly chosen one
            (from the 7 lucky addresses)
            - consecutive entries to the lucky lottery will receive the current pool size as the
            `echo_amount`
            - for all other transfers it sends a transfer with the same `amount` back to the
            initiator
        """
        echo_amount = PaymentAmount(0)
        if transfer.amount % 3 == 0:
            log.info(
                "Received amount divisible by three",
                node=to_checksum_address(self.api.address),
                initiator=to_checksum_address(transfer.initiator),
                amount=transfer.amount,
                identifier=transfer.identifier,
            )
            echo_amount = PaymentAmount(transfer.amount - 1)

        elif transfer.amount == 7:
            log.info(
                "Received lottery entry",
                node=to_checksum_address(self.api.address),
                initiator=to_checksum_address(transfer.initiator),
                amount=transfer.amount,
                identifier=transfer.identifier,
                poolsize=self.lottery_pool.qsize(),
            )

            # obtain a local copy of the pool
            pool = self.lottery_pool.copy()
            tickets = [pool.get() for _ in range(pool.qsize())]
            assert pool.empty()
            del pool

            if any(ticket.initiator == transfer.initiator for ticket in tickets):
                assert transfer not in tickets
                log.debug(
                    "Duplicate lottery entry",
                    node=to_checksum_address(self.api.address),
                    initiator=to_checksum_address(transfer.initiator),
                    identifier=transfer.identifier,
                    poolsize=len(tickets),
                )
                # signal the poolsize to the participant
                echo_amount = PaymentAmount(len(tickets))

            # payout
            elif len(tickets) == 6:
                log.info("Payout!")
                # reset the pool
                assert self.lottery_pool.qsize() == 6
                self.lottery_pool = Queue()

                # add the new participant
                tickets.append(transfer)

                # choose the winner
                transfer = random.choice(tickets)
                echo_amount = PaymentAmount(49)
            else:
                self.lottery_pool.put(transfer)

        else:
            log.debug(
                "Received transfer",
                node=to_checksum_address(self.api.address),
                initiator=to_checksum_address(transfer.initiator),
                amount=transfer.amount,
                identifier=transfer.identifier,
            )
            echo_amount = PaymentAmount(transfer.amount)

        if echo_amount:
            echo_identifier = PaymentID(transfer.identifier + echo_amount)
            log.debug(
                "Sending echo transfer",
                node=to_checksum_address(self.api.address),
                target=to_checksum_address(transfer.initiator),
                amount=echo_amount,
                original_identifier=transfer.identifier,
                echo_identifier=echo_identifier,
                token_address=to_checksum_address(self.token_address),
                num_handled_transfers=self.num_handled_transfers + 1,
            )

            self.api.transfer(
                registry_address=self.api.raiden.default_registry.address,
                token_address=self.token_address,
                amount=echo_amount,
                target=TargetAddress(transfer.initiator),
                identifier=echo_identifier,
            )

        self.num_handled_transfers += 1

    def stop(self) -> None:
        self.stop_signal = True
        self.greenlets.add(self.echo_worker_greenlet)
        gevent.joinall(self.greenlets, raise_error=True)
コード例 #38
0
class CrispinConnectionPool(object):
    """
    Connection pool for Crispin clients.

    Connections in a pool are specific to an IMAPAccount.

    Parameters
    ----------
    account_id : int
        Which IMAPAccount to open up a connection to.
    num_connections : int
        How many connections in the pool.
    readonly : bool
        Is the connection to the IMAP server read-only?
    """
    def __init__(self, account_id, num_connections, readonly):
        log.info('Creating Crispin connection pool for account {} with {} '
                 'connections'.format(account_id, num_connections))
        self.account_id = account_id
        self.readonly = readonly
        self._queue = Queue(num_connections, items=num_connections * [None])
        self._sem = BoundedSemaphore(num_connections)
        self._set_account_info()

    @contextlib.contextmanager
    def get(self):
        """ Get a connection from the pool, or instantiate a new one if needed.
        If `num_connections` connections are already in use, block until one is
        available.
        """
        # A gevent semaphore is granted in the order that greenlets tried to
        # acquire it, so we use a semaphore here to prevent potential
        # starvation of greenlets if there is high contention for the pool.
        # The queue implementation does not have that property; having
        # greenlets simply block on self._queue.get(block=True) could cause
        # individual greenlets to block for arbitrarily long.
        self._sem.acquire()
        client = self._queue.get()
        try:
            if client is None:
                client = self._new_connection()
            yield client
        except CONN_DISCARD_EXC_CLASSES as exc:
            # Discard the connection on socket or IMAP errors. Technically this
            # isn't always necessary, since if you got e.g. a FETCH failure you
            # could reuse the same connection. But for now it's the simplest
            # thing to do.
            log.info('IMAP connection error; discarding connection',
                     exc_info=True)
            if client is not None:
                try:
                    client.logout()
                except:
                    log.error('Error on IMAP logout', exc_info=True)
                client = None
            raise exc
        except:
            raise
        finally:
            self._queue.put(client)
            self._sem.release()

    def _set_account_info(self):
        with session_scope() as db_session:
            account = db_session.query(Account).get(self.account_id)
            self.sync_state = account.sync_state
            self.provider_info = account.provider_info
            self.email_address = account.email_address
            self.auth_handler = account.auth_handler
            if account.provider == 'gmail':
                self.client_cls = GmailCrispinClient
            elif (getattr(account, 'supports_condstore', None) or
                  account.provider_info.get('condstore')):
                self.client_cls = CondStoreCrispinClient
            else:
                self.client_cls = CrispinClient

    def _new_connection(self):
        try:
            with session_scope() as db_session:
                account = db_session.query(Account).get(self.account_id)
                conn = self.auth_handler.connect_account(account)
                # If we can connect the account, then we can set the state
                # to 'running' if it wasn't already
                if self.sync_state != 'running':
                    self.sync_state = account.sync_state = 'running'
            return self.client_cls(self.account_id, self.provider_info,
                                   self.email_address, conn,
                                   readonly=self.readonly)
        except ValidationError, e:
            log.error('Error validating',
                      account_id=self.account_id,
                      logstash_tag='mark_invalid')
            with session_scope() as db_session:
                account = db_session.query(Account).get(self.account_id)
                account.mark_invalid()
                account.update_sync_error(str(e))
            raise
コード例 #39
0
ファイル: LibMLK.py プロジェクト: JoeyWNK/MercJP
class LibMLK(object):
    def __init__(self, serverIP, Plist):
        self.serverIP = serverIP
        self.baseUrl = "http://%s/" % serverIP
        self.crypt = Account(Plist.userID, Plist.devideToken)
        self.mlkLock = BoundedSemaphore(200)
        self.IID = Plist.iid
        self.VID = Plist.vid


    def __init__(self):
        pass

    @property
    def headers(self):
        return {
            "VID": self.VID,
            "PID": "-",
            "IID": self.IID,
            "DEVICE_INFO": "iPad2,1:::iPhone OS 8.1.2",
            "Device": "ios",
            "AppVersion": 28,
            "APP_ID_3": self.crypt.deviceToken,
            "APP_ID_2": self.crypt.hashedUserID,
            "APP_ID_1": self.crypt.cryptedUserID,
            "Encrypted": True,
            "User-Agent": "toto/1.1.25.2 CFNetwork/711.1.16 Darwin/14.0.0",
            "Accept-Language": "zh-cn",
            "Accept": "application/json"
        }

    def _post(self, url, params={}, data={}):
        data["_method"] = "GET"
        data = urllib.urlencode(data)
        data = self.crypt.encrypt(data)
        url = urlparse.urljoin(self.baseUrl, url)
        if len(params) > 0:
            e = self.crypt.encrypt(urllib.urlencode(params)).encode("base64").replace("\n", "")
            url = "%s?e=%s" % (url, e)
        ret = None
        try:
            self.mlkLock.acquire()
            ret = requests.post(url, data=data, headers=self.headers, proxies=proxies)
        except:
            traceback.print_exc()
        finally:
            self.mlkLock.release()
        if ret is None:
            raise BaseException()
        if "encrypted" in ret.headers and ret.headers["encrypted"] == "true":
            rtn = self.crypt.decrypt(ret.content)
        else:
            rtn = ret.content
        return rtn

    def get(self, url, params={}, data={}):
        url = urlparse.urlparse(url)
        path = url.path
        query = dict(urlparse.parse_qsl(url.query))
        query.update(params)
        return self._post(path, params=query, data=data)

    def setUsername(self, name):
        ret = self._post("users/update", data={"user_name": name})
        self.user_name = name
        return json.loads(ret)

    def finishTutorial(self):
        ret = self._post("users/update",
                         data={"user_name": self.user_name, "tutorial_finish": True})
        return json.loads(ret)

    def getMessages(self, page_type="Home"):
        params = {
            "last_read_at": int(time.time()),
            "page_type": page_type
        }
        ret = self._post("users/messages", params=params)
        return json.loads(ret)

    def getStages(self):
        ret = self._post("stages")
        return json.loads(ret)

    def getAreas(self, stage_id):
        ret = self._post("areas", params={"stage_id": stage_id})
        return json.loads(ret)

    def getMonsters(self):
        ret = self._post("user_monsters")
        return json.loads(ret)

    def getDecks(self):
        ret = self._post("user_decks")
        return json.loads(ret)

    def getUnits(self):
        ret = self._post("user_units")
        return json.loads(ret)

    def receiveLoginBonus(self):
        ret = self._post("users/receive_login_bonus")
        return json.loads(ret)

    def getLoginRewardList(self):
        ret = self._post("accu_login_activity")
        return json.loads(ret)

    def receiveLoginReward(self, day):
        params = {"day": day}
        ret = self._post("accu_login_activity/fetch_rewards", params=params)
        return json.loads(ret)

    def getRewardList(self):
        ret = self._post("user_presents")
        return json.loads(ret)

    def reward(self, uuid):
        params = {"uuid": uuid}
        ret = self._post("user_presents/receive", params)
        return json.loads(ret)

    def rewardAll(self):
        ret = self._post("user_presents/receive")
        return json.loads(ret)

    def getUserData(self):
        ret = self._post("users/preset_data.json?tutorial_session=true")
        return json.loads(ret)

    def gacha(self, gacha_id, num):
        params = {"id": gacha_id, "count": num}
        ret = self._post("gachas/execute", params=params)
        return json.loads(ret)

    def getUnitList(self):
        ret = self._post("user_units")
        return json.loads(ret)

    def quest(self, quest_id, party_id="001", difficulty_id="normal"):
        params = {
            "base": "Quest/Quest",
            "difficulty_id": difficulty_id,
            "id": quest_id,
            "mode": "quest",
            "name": "Quest",
            "party_id": party_id,
            "tipsLoading": "true",
        }
        ret = self._post("quests/execute/%s.json" % quest_id, params=params)
        ret = json.loads(ret)
        result_url = ret["result_url"]
        if "ap_use_url" in ret:
            ap_use_url = ret["ap_use_url"]
            self.get(ap_use_url)
        time.sleep(30)
        ret = self.get(result_url, params={"time": "27.1234"})
        return ret
コード例 #40
0
ファイル: crispin.py プロジェクト: kevinr/sync-engine
class CrispinConnectionPool(object):
    """
    Connection pool for Crispin clients.

    Connections in a pool are specific to an IMAPAccount.

    Parameters
    ----------
    account_id : int
        Which IMAPAccount to open up a connection to.
    num_connections : int
        How many connections in the pool.
    readonly : bool
        Is the connection to the IMAP server read-only?
    """

    def __init__(self, account_id, num_connections, readonly):
        log.info('Creating Crispin connection pool for account {} with {} '
                 'connections'.format(account_id, num_connections))
        self.account_id = account_id
        self.readonly = readonly
        self._queue = Queue(num_connections, items=num_connections * [None])
        self._sem = BoundedSemaphore(num_connections)
        self._set_account_info()

    @contextlib.contextmanager
    def get(self):
        """ Get a connection from the pool, or instantiate a new one if needed.
        If `num_connections` connections are already in use, block until one is
        available.
        """
        # A gevent semaphore is granted in the order that greenlets tried to
        # acquire it, so we use a semaphore here to prevent potential
        # starvation of greenlets if there is high contention for the pool.
        # The queue implementation does not have that property; having
        # greenlets simply block on self._queue.get(block=True) could cause
        # individual greenlets to block for arbitrarily long.
        self._sem.acquire()
        client = self._queue.get()
        try:
            if client is None:
                client = self._new_connection()
            yield client
        except CONN_DISCARD_EXC_CLASSES as exc:
            # Discard the connection on socket or IMAP errors. Technically this
            # isn't always necessary, since if you got e.g. a FETCH failure you
            # could reuse the same connection. But for now it's the simplest
            # thing to do.
            log.info('IMAP connection error; discarding connection',
                     exc_info=True)
            if client is not None and \
               not isinstance(exc, CONN_UNUSABLE_EXC_CLASSES):
                try:
                    client.logout()
                except Exception:
                    log.info('Error on IMAP logout', exc_info=True)
            client = None
            raise exc
        except:
            raise
        finally:
            self._queue.put(client)
            self._sem.release()

    def _set_account_info(self):
        with session_scope(self.account_id) as db_session:
            account = db_session.query(ImapAccount).get(self.account_id)
            self.sync_state = account.sync_state
            self.provider = account.provider
            self.provider_info = account.provider_info
            self.email_address = account.email_address
            self.auth_handler = account.auth_handler
            if account.provider == 'gmail':
                self.client_cls = GmailCrispinClient
            else:
                self.client_cls = CrispinClient

    def _new_raw_connection(self):
        """Returns a new, authenticated IMAPClient instance for the account."""
        with session_scope(self.account_id) as db_session:
            if self.provider == 'gmail':
                account = db_session.query(GmailAccount).options(
                    joinedload(GmailAccount.auth_credentials)).get(
                    self.account_id)
            else:
                account = db_session.query(GenericAccount).options(
                    joinedload(GenericAccount.secret)).get(self.account_id)
            db_session.expunge(account)

        return self.auth_handler.connect_account(account)

    def _new_connection(self):
        conn = self._new_raw_connection()
        return self.client_cls(self.account_id, self.provider_info,
                               self.email_address, conn,
                               readonly=self.readonly)
コード例 #41
0
class DiscoveryZkClient(object):

    def __init__(self, discServer, zk_srv_ip='127.0.0.1',
                 zk_srv_port='2181', reset_config=False):
        self._reset_config = reset_config
        self._service_id_to_type = {}
        self._ds = discServer
        self._zk_sem = BoundedSemaphore(1)
        self._election = None
        self._restarting = False

        zk_endpts = []
        for ip in zk_srv_ip.split(','):
            zk_endpts.append('%s:%s' %(ip, zk_srv_port))

        # logging
        logger = logging.getLogger('discovery-service')
        logger.setLevel(logging.WARNING)
        handler = logging.handlers.RotatingFileHandler('/var/log/contrail/discovery_zk.log', maxBytes=1024*1024, backupCount=10)
        log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
        handler.setFormatter(log_format)
        logger.addHandler(handler)

        self._zk = kazoo.client.KazooClient(
            hosts=','.join(zk_endpts),
            handler=kazoo.handlers.gevent.SequentialGeventHandler(),
            logger=logger)
        self._logger = logger

        # connect
        self.connect()

        if reset_config:
            self.delete_node("/services", recursive=True)
            self.delete_node("/clients", recursive=True)
            self.delete_node("/election", recursive=True)

        # create default paths
        self.create_node("/services")
        self.create_node("/clients")
        self.create_node("/election")

        self._debug = {
            'subscription_expires': 0,
            'oos_delete': 0,
            'db_excepts': 0,
        }
    # end __init__

    # Discovery server used for syslog, cleanup etc
    def set_ds(self, discServer):
        self._ds = discServer
    # end set_ds

    def is_restarting(self):
        return self._restarting
    # end is_restarting

    # restart
    def restart(self):
        self._zk_sem.acquire()
        self._restarting = True
        self.syslog("restart: acquired lock; state %s " % self._zk.state)
        # initiate restart if our state is suspended or lost
        if self._zk.state != "CONNECTED":
            self.syslog("restart: starting ...")
            try:
                self._zk.stop() 
                self._zk.close() 
                self._zk.start() 
                self.syslog("restart: done")
            except:
                e = sys.exc_info()[0]
                self.syslog('restart: exception %s' % str(e))
        self._restarting = False
        self._zk_sem.release()

    # start 
    def connect(self):
        while True:
            try:
                self._zk.start()
                break
            except gevent.event.Timeout as e:
                self.syslog(
                    'Failed to connect with Zookeeper -will retry in a second')
                gevent.sleep(1)
            # Zookeeper is also throwing exception due to delay in master election
            except Exception as e:
                self.syslog('%s -will retry in a second' % (str(e)))
                gevent.sleep(1)
        self.syslog('Connected to ZooKeeper!')
    # end

    def start_background_tasks(self):
        # spawn loop to expire subscriptions
        gevent.Greenlet.spawn(self.inuse_loop)

        # spawn loop to expire services
        gevent.Greenlet.spawn(self.service_oos_loop)
    # end

    def syslog(self, log_msg):
        if self._logger is None:
            return
        self._logger.info(log_msg)
    # end

    def get_debug_stats(self):
        return self._debug
    # end

    def _zk_listener(self, state):
        if state == "CONNECTED":
            self._election.cancel()
    # end

    def _zk_election_callback(self, func, *args, **kwargs):
        self._zk.remove_listener(self._zk_listener)
        func(*args, **kwargs)
    # end

    def master_election(self, path, identifier, func, *args, **kwargs):
        self._zk.add_listener(self._zk_listener)
        while True:
            self._election = self._zk.Election(path, identifier)
            self._election.run(self._zk_election_callback, func, *args, **kwargs)
    # end master_election

    def create_node(self, path, value='', makepath=True, sequence=False):
        value = str(value)
        while True:
            try:
                return self._zk.set(path, value)
            except kazoo.exceptions.NoNodeException:
                self.syslog('create %s' % (path))
                return self._zk.create(path, value, makepath=makepath, sequence=sequence)
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()
    # end create_node

    def get_children(self, path):
        while True:
            try:
                return self._zk.get_children(path)
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()
            except Exception:
                return []
    # end get_children

    def read_node(self, path):
        while True:
            try:
                data, stat = self._zk.get(path)
                return data,stat
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()
            except kazoo.exceptions.NoNodeException:
                self.syslog('exc read: node %s does not exist' % path)
                return (None, None)
    # end read_node

    def delete_node(self, path, recursive=False):
        while True:
            try:
                return self._zk.delete(path, recursive=recursive)
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()
            except kazoo.exceptions.NoNodeException:
                self.syslog('exc delete: node %s does not exist' % path)
                return None
    # end delete_node

    def exists_node(self, path):
        while True:
            try:
                return self._zk.exists(path)
            except (kazoo.exceptions.SessionExpiredError,
                    kazoo.exceptions.ConnectionLoss):
                self.restart()
    # end exists_node

    def service_entries(self):
        service_types = self.get_children('/services')
        for service_type in service_types:
            services = self.get_children('/services/%s' % (service_type))
            for service_id in services:
                data, stat = self.read_node(
                    '/services/%s/%s' % (service_type, service_id))
                entry = json.loads(data)
                yield(entry)

    def subscriber_entries(self):
        service_types = self.get_children('/clients')
        for service_type in service_types:
            subscribers = self.get_children('/clients/%s' % (service_type))
            for client_id in subscribers:
                cl_entry = self.lookup_client(service_type, client_id)
                if cl_entry:
                    yield((client_id, service_type))
    # end

    def update_service(self, service_type, service_id, data):
        path = '/services/%s/%s' % (service_type, service_id)
        self.create_node(path, value=json.dumps(data), makepath=True)
    # end

    def insert_service(self, service_type, service_id, data):

        # ensure election path for service type exists
        path = '/election/%s' % (service_type)
        self.create_node(path)

        # preclude duplicate service entry
        sid_set = set()

        # prevent background task from deleting node under our nose
        seq_list = self.get_children(path)
        # data for election node is service ID
        for sequence in seq_list:
            sid, stat = self.read_node(
                '/election/%s/%s' % (service_type, sequence))
            if sid is not None:
                sid_set.add(sid)
        if not service_id in sid_set:
            path = '/election/%s/node-' % (service_type)
            pp = self.create_node(
                path, service_id, makepath=True, sequence=True)
            pat = path + "(?P<id>.*$)"
            mch = re.match(pat, pp)
            seq = mch.group('id')
            data['sequence'] = seq
            self.syslog('ST %s, SID %s not found! Added with sequence %s' %
                        (service_type, service_id, seq))
    # end insert_service

    # forget service and subscribers
    def delete_service(self, service_type, service_id, recursive = False):
        #if self.lookup_subscribers(service_type, service_id):
        #    return

        path = '/services/%s/%s' %(service_type, service_id)
        self.delete_node(path, recursive = recursive)

        # delete service node if all services gone
        path = '/services/%s' %(service_type)
        if self.get_children(path):
            return
        self.delete_node(path)
     #end delete_service

    def lookup_service(self, service_type, service_id=None):
        if not self.exists_node('/services/%s' % (service_type)):
            return None
        if service_id:
            data = None
            path = '/services/%s/%s' % (service_type, service_id)
            datastr, stat = self.read_node(path)
            if datastr:
                data = json.loads(datastr)
                clients = self.get_children(path)
                data['in_use'] = len(clients)
            return data
        else:
            r = []
            services = self.get_children('/services/%s' % (service_type))
            for service_id in services:
                entry = self.lookup_service(service_type, service_id)
                r.append(entry)
            return r
    # end lookup_service

    def query_service(self, service_type):
        path = '/election/%s' % (service_type)
        if not self.exists_node(path):
            return None
        seq_list = self.get_children(path)
        seq_list = sorted(seq_list)

        r = []
        for sequence in seq_list:
            service_id, stat = self.read_node(
                '/election/%s/%s' % (service_type, sequence))
            entry = self.lookup_service(service_type, service_id)
            r.append(entry)
        return r
    # end

    # TODO use include_data available in new versions of kazoo
    # tree structure /services/<service-type>/<service-id>
    def get_all_services(self):
        r = []
        service_types = self.get_children('/services')
        for service_type in service_types:
            services = self.lookup_service(service_type)
            r.extend(services)
        return r
    # end

    def insert_client(self, service_type, service_id, client_id, blob, ttl):
        data = {'ttl': ttl, 'blob': blob}

        path = '/services/%s/%s/%s' % (service_type, service_id, client_id)
        self.create_node(path, value=json.dumps(data))

        path = '/clients/%s/%s/%s' % (service_type, client_id, service_id)
        self.create_node(path, value=json.dumps(data), makepath=True)
    # end insert_client

    def lookup_subscribers(self, service_type, service_id):
        path = '/services/%s/%s' % (service_type, service_id)
        if not self.exists_node(path):
            return None
        clients = self.get_children(path)
        return clients
    # end lookup_subscribers

    def lookup_client(self, service_type, client_id):
        try:
            datastr, stat = self.read_node(
                '/clients/%s/%s' % (service_type, client_id))
            data = json.loads(datastr) if datastr else None
        except ValueError:
            self.syslog('raise ValueError st=%s, cid=%s' %(service_type, client_id))
            data = None
        return data
    # end lookup_client

    def insert_client_data(self, service_type, client_id, cldata):
        path = '/clients/%s/%s' % (service_type, client_id)
        self.create_node(path, value=json.dumps(cldata), makepath=True)
    # end insert_client_data

    def lookup_subscription(self, service_type, client_id=None,
                            service_id=None, include_meta=False):
        if not self.exists_node('/clients/%s' % (service_type)):
            return None
        if client_id and service_id:
            try:
                datastr, stat = self.read_node(
                    '/clients/%s/%s/%s'
                    % (service_type, client_id, service_id))
                data = json.loads(datastr)
                blob = data['blob']
                if include_meta:
                    return (blob, stat, data['ttl'])
                else:
                    return blob
            except kazoo.exceptions.NoNodeException:
                return None
        elif client_id:
            # our version of Kazoo doesn't support include_data :-(
            try:
                services = self.get_children(
                    '/clients/%s/%s' % (service_type, client_id))
                r = []
                for service_id in services:
                    datastr, stat = self.read_node(
                        '/clients/%s/%s/%s'
                        % (service_type, client_id, service_id))
                    if datastr:
                        data = json.loads(datastr)
                        blob = data['blob']
                        r.append((service_id, blob, stat))
                # sort services in the order of assignment to this client
                # (based on modification time)
                rr = sorted(r, key=lambda entry: entry[2].last_modified)
                return [(service_id, blob) for service_id, blob, stat in rr]
            except kazoo.exceptions.NoNodeException:
                return None
        else:
            clients = self.get_children('/clients/%s' % (service_type))
            return clients
    # end lookup_subscription

    # delete client subscription. Cleanup path if possible
    def delete_subscription(self, service_type, client_id, service_id):
        path = '/clients/%s/%s/%s' % (service_type, client_id, service_id)
        self.delete_node(path)

        path = '/services/%s/%s/%s' % (service_type, service_id, client_id)
        self.delete_node(path)

        # delete client node if all subscriptions gone
        path = '/clients/%s/%s' % (service_type, client_id)
        if self.get_children(path):
            return
        self.delete_node(path)

        # purge in-memory cache - ideally we are not supposed to know about
        # this
        self._ds.delete_sub_data(client_id, service_type)

        # delete service node if all clients gone
        path = '/clients/%s' % (service_type)
        if self.get_children(path):
            return
        self.delete_node(path)
    # end

    # TODO use include_data available in new versions of kazoo
    # tree structure /clients/<service-type>/<client-id>/<service-id>
    # return tuple (service_type, client_id, service_id)
    def get_all_clients(self):
        r = []
        service_types = self.get_children('/clients')
        for service_type in service_types:
            clients = self.get_children('/clients/%s' % (service_type))
            for client_id in clients:
                services = self.get_children(
                    '/clients/%s/%s' % (service_type, client_id))
                rr = []
                for service_id in services:
                    (datastr, stat, ttl) = self.lookup_subscription(
                        service_type, client_id, service_id, include_meta=True)
                    rr.append(
                        (service_type, client_id, service_id,
                         stat.last_modified, ttl))
                rr = sorted(rr, key=lambda entry: entry[3])
                r.extend(rr)
        return r
    # end get_all_clients

    # reset in-use count of clients for each service
    def inuse_loop(self):
        while True:
            service_types = self.get_children('/clients')
            for service_type in service_types:
                clients = self.get_children('/clients/%s' % (service_type))
                for client_id in clients:
                    services = self.get_children(
                        '/clients/%s/%s' % (service_type, client_id))
                    for service_id in services:
                        path = '/clients/%s/%s/%s' % (
                            service_type, client_id, service_id)
                        datastr, stat = self.read_node(path)
                        data = json.loads(datastr)
                        now = time.time()
                        exp_t = stat.last_modified + data['ttl'] +\
                            disc_consts.TTL_EXPIRY_DELTA
                        if now > exp_t:
                            self.delete_subscription(
                                service_type, client_id, service_id)
                            self.syslog(
                                'Expiring st:%s sid:%s cid:%s'
                                % (service_type, service_id, client_id))
                            self._debug['subscription_expires'] += 1
            gevent.sleep(10)

    def service_oos_loop(self):
        if self._ds._args.hc_interval <= 0:
            return

        while True:
            for entry in self.service_entries():
                if not self._ds.service_expired(entry, include_down=False):
                    continue
                service_type = entry['service_type']
                service_id   = entry['service_id']
                path = '/election/%s/node-%s' % (
                    service_type, entry['sequence'])
                if not self.exists_node(path):
                    continue
                self.syslog('Deleting sequence node %s for service %s:%s' %
                        (path, service_type, service_id))
                self.delete_node(path)
                entry['sequence'] = -1
                self.update_service(service_type, service_id, entry)
                self._debug['oos_delete'] += 1
            gevent.sleep(self._ds._args.hc_interval)
コード例 #42
0
class Auction(ESCODBServiceMixin, RequestIDServiceMixin, EscoAuditServiceMixin,
              ESCOBiddersServiceMixin, DateTimeServiceMixin, EscoStagesMixin,
              EscoPostAuctionMixin):
    """ESCO Auction Worker Class"""
    def __init__(self,
                 tender_id,
                 worker_defaults,
                 auction_data={},
                 lot_id=None):
        self.generate_request_id()
        self.tender_id = tender_id
        self.lot_id = lot_id
        if lot_id:
            self.auction_doc_id = tender_id + "_" + lot_id
        else:
            self.auction_doc_id = tender_id
        self.tender_url = urljoin(
            worker_defaults["resource_api_server"],
            '/api/{}/{}/{}'.format(worker_defaults["resource_api_version"],
                                   worker_defaults["resource_name"],
                                   self.tender_id))
        if auction_data:
            self.debug = True
            LOGGER.setLevel(logging.DEBUG)
            self._auction_data = auction_data
        else:
            self.debug = False
        self._end_auction_event = Event()
        self.bids_actions = BoundedSemaphore()
        self.session = RequestsSession()
        self.worker_defaults = worker_defaults
        if self.worker_defaults.get('with_document_service', False):
            self.session_ds = RequestsSession()
        self._bids_data = {}
        self.db = Database(str(self.worker_defaults["COUCH_DATABASE"]),
                           session=Session(retry_delays=range(10)))
        self.audit = {}
        self.retries = 10
        self.bidders_count = 0
        self.bidders_data = []
        self.bidders_features = {}
        self.bidders_coeficient = {}
        self.features = None
        self.mapping = {}
        self.rounds_stages = []

    def schedule_auction(self):
        self.generate_request_id()
        self.get_auction_document()
        if self.debug:
            LOGGER.info("Get _auction_data from auction_document")
            self._auction_data = self.auction_document.get(
                'test_auction_data', {})
        self.get_auction_info()
        self.prepare_audit()
        self.prepare_auction_stages()
        self.save_auction_document()
        round_number = 0
        SCHEDULER.add_job(self.start_auction,
                          'date',
                          kwargs={"switch_to_round": round_number},
                          run_date=self.convert_datetime(
                              self.auction_document['stages'][0]['start']),
                          name="Start of Auction",
                          id="Start of Auction")
        round_number += 1

        SCHEDULER.add_job(self.end_first_pause,
                          'date',
                          kwargs={"switch_to_round": round_number},
                          run_date=self.convert_datetime(
                              self.auction_document['stages'][1]['start']),
                          name="End of Pause Stage: [0 -> 1]",
                          id="End of Pause Stage: [0 -> 1]")
        round_number += 1
        for index in xrange(2, len(self.auction_document['stages'])):
            if self.auction_document['stages'][index - 1]['type'] == 'bids':
                SCHEDULER.add_job(
                    self.end_bids_stage,
                    'date',
                    kwargs={"switch_to_round": round_number},
                    run_date=self.convert_datetime(
                        self.auction_document['stages'][index]['start']),
                    name="End of Bids Stage: [{} -> {}]".format(
                        index - 1, index),
                    id="End of Bids Stage: [{} -> {}]".format(
                        index - 1, index))
            elif self.auction_document['stages'][index - 1]['type'] == 'pause':
                SCHEDULER.add_job(
                    self.next_stage,
                    'date',
                    kwargs={"switch_to_round": round_number},
                    run_date=self.convert_datetime(
                        self.auction_document['stages'][index]['start']),
                    name="End of Pause Stage: [{} -> {}]".format(
                        index - 1, index),
                    id="End of Pause Stage: [{} -> {}]".format(
                        index - 1, index))
            round_number += 1
        LOGGER.info("Prepare server ...",
                    extra={
                        "JOURNAL_REQUEST_ID": self.request_id,
                        "MESSAGE_ID": AUCTION_WORKER_SERVICE_PREPARE_SERVER
                    })
        self.server = run_server(
            self,
            self.convert_datetime(
                self.auction_document['stages'][-2]['start']),
            LOGGER,
            form_handler=form_handler,
            bids_form=BidsForm,
            cookie_path="esco-tenders")

    def wait_to_end(self):
        self._end_auction_event.wait()
        LOGGER.info("Stop auction worker",
                    extra={
                        "JOURNAL_REQUEST_ID": self.request_id,
                        "MESSAGE_ID":
                        AUCTION_WORKER_SERVICE_STOP_AUCTION_WORKER
                    })

    def start_auction(self, switch_to_round=None):
        self.generate_request_id()
        self.audit['timeline']['auction_start']['time'] = datetime.now(
            tzlocal()).isoformat()
        LOGGER.info('---------------- Start auction ----------------',
                    extra={
                        "JOURNAL_REQUEST_ID": self.request_id,
                        "MESSAGE_ID": AUCTION_WORKER_SERVICE_START_AUCTION
                    })
        self.get_auction_info()
        self.get_auction_document()
        # Initital Bids
        bids = deepcopy(self.bidders_data)
        self.auction_document["initial_bids"] = []
        bids_info = sorting_start_bids_by_amount(bids, features=self.features)
        for index, bid in enumerate(bids_info):
            amount = str(Fraction(bid["value"]["amountPerformance"]))
            audit_info = {
                "bidder":
                bid["id"],
                "date":
                bid["date"],
                "amount":
                amount,
                "contractDuration": {
                    "years": bid["value"]["contractDuration"]["years"],
                    "days": bid["value"]["contractDuration"]["days"],
                },
                "yearlyPaymentsPercentage":
                bid["value"]["yearlyPaymentsPercentage"]
            }
            if self.features:
                amount_features = cooking(amount,
                                          self.features,
                                          self.bidders_features[bid["id"]],
                                          reverse=True)
                coeficient = self.bidders_coeficient[bid["id"]]
                audit_info["amount_features"] = str(amount_features)
                audit_info["coeficient"] = str(coeficient)
            else:
                coeficient = None
                amount_features = None

            self.audit['timeline']['auction_start']['initial_bids'].append(
                audit_info)
            self.auction_document["initial_bids"].append(
                prepare_initial_bid_stage(
                    time=bid["date"] if "date" in bid else self.startDate,
                    bidder_id=bid["id"],
                    bidder_name=self.mapping[bid["id"]],
                    amount=amount,
                    coeficient=coeficient,
                    amount_features=amount_features,
                    contractDurationDays=bid["value"]["contractDuration"]
                    ["days"],
                    contractDurationYears=bid["value"]["contractDuration"]
                    ["years"],
                    yearlyPaymentsPercentage=bid["value"]
                    ["yearlyPaymentsPercentage"],
                    annualCostsReduction=bid["value"]["annualCostsReduction"]))
        if isinstance(switch_to_round, int):
            self.auction_document["current_stage"] = switch_to_round
        else:
            self.auction_document["current_stage"] = 0

        all_bids = deepcopy(self.auction_document["initial_bids"])
        minimal_bids = []
        for bid_info in self.bidders_data:
            minimal_bids.append(
                get_latest_bid_for_bidder(all_bids, str(bid_info['id'])))

        minimal_bids = self.filter_bids_keys(
            sorting_by_amount(minimal_bids, reverse=False))
        self.update_future_bidding_orders(minimal_bids)
        self.save_auction_document()

    def end_first_pause(self, switch_to_round=None):
        self.generate_request_id()
        LOGGER.info('---------------- End First Pause ----------------',
                    extra={
                        "JOURNAL_REQUEST_ID": self.request_id,
                        "MESSAGE_ID": AUCTION_WORKER_SERVICE_END_FIRST_PAUSE
                    })
        self.bids_actions.acquire()
        self.get_auction_document()

        if isinstance(switch_to_round, int):
            self.auction_document["current_stage"] = switch_to_round
        else:
            self.auction_document["current_stage"] += 1

        self.save_auction_document()
        self.bids_actions.release()

    def end_auction(self):
        LOGGER.info('---------------- End auction ----------------',
                    extra={
                        "JOURNAL_REQUEST_ID": self.request_id,
                        "MESSAGE_ID": AUCTION_WORKER_SERVICE_END_AUCTION
                    })
        LOGGER.debug("Stop server",
                     extra={"JOURNAL_REQUEST_ID": self.request_id})
        if self.server:
            self.server.stop()
        LOGGER.debug("Clear mapping",
                     extra={"JOURNAL_REQUEST_ID": self.request_id})
        delete_mapping(self.worker_defaults, self.auction_doc_id)

        start_stage, end_stage = self.get_round_stages(ROUNDS)
        minimal_bids = deepcopy(
            self.auction_document["stages"][start_stage:end_stage])
        minimal_bids = self.filter_bids_keys(
            sorting_by_amount(minimal_bids, reverse=False))
        self.auction_document["results"] = []
        for item in minimal_bids:
            self.auction_document["results"].append(
                prepare_results_stage(**item))
        self.auction_document["current_stage"] = (
            len(self.auction_document["stages"]) - 1)
        LOGGER.debug(' '.join(
            ('Document in end_stage: \n',
             yaml_dump(json.loads(dumps(self.auction_document))))),
                     extra={"JOURNAL_REQUEST_ID": self.request_id})
        self.approve_audit_info_on_announcement()
        LOGGER.info('Audit data: \n {}'.format(
            yaml_dump(json.loads(dumps(self.audit)))),
                    extra={"JOURNAL_REQUEST_ID": self.request_id})
        if self.debug:
            LOGGER.debug('Debug: put_auction_data disabled !!!',
                         extra={"JOURNAL_REQUEST_ID": self.request_id})
            sleep(10)
            self.save_auction_document()
        else:
            if self.put_auction_data():
                self.save_auction_document()
        LOGGER.debug("Fire 'stop auction worker' event",
                     extra={"JOURNAL_REQUEST_ID": self.request_id})

    def cancel_auction(self):
        self.generate_request_id()
        if self.get_auction_document():
            LOGGER.info(
                "Auction {} canceled".format(self.auction_doc_id),
                extra={'MESSAGE_ID': AUCTION_WORKER_SERVICE_AUCTION_CANCELED})
            self.auction_document["current_stage"] = -100
            self.auction_document["endDate"] = datetime.now(
                tzlocal()).isoformat()
            LOGGER.info("Change auction {} status to 'canceled'".format(
                self.auction_doc_id),
                        extra={
                            'MESSAGE_ID':
                            AUCTION_WORKER_SERVICE_AUCTION_STATUS_CANCELED
                        })
            self.save_auction_document()
        else:
            LOGGER.info(
                "Auction {} not found".format(self.auction_doc_id),
                extra={'MESSAGE_ID': AUCTION_WORKER_SERVICE_AUCTION_NOT_FOUND})

    def reschedule_auction(self):
        self.generate_request_id()
        if self.get_auction_document():
            LOGGER.info(
                "Auction {} has not started and will be rescheduled".format(
                    self.auction_doc_id),
                extra={
                    'MESSAGE_ID': AUCTION_WORKER_SERVICE_AUCTION_RESCHEDULE
                })
            self.auction_document["current_stage"] = -101
            self.save_auction_document()
        else:
            LOGGER.info(
                "Auction {} not found".format(self.auction_doc_id),
                extra={'MESSAGE_ID': AUCTION_WORKER_SERVICE_AUCTION_NOT_FOUND})
コード例 #43
0
                    decoded['p']['v'] = Computation(decoded['p']['v'])
                except Exception as e:
                    decoded['p']['v'] = Nil
                    decoded['p']['h'] = True
                    decoded['p']['e'] = str(e) #''.join(traceback.format_exception( *sys.exc_info())[-2:]).strip().replace('\n',': ')
            encoded = json.dumps(decoded['p'])
            #print(resultKey)
            r.set(resultKey + decoded['i'], encoded)
            r.hdel(pipeline, pipelineId)
            #print(encoded)
            release()
            gevent.sleep(0)
            break #finish loop
        else:
            #print("No messages " + str(getcurrent()))
            notification_semaphore.acquire(timeout = 0.1)
            gevent.sleep(0)
    
while True:
    if(not concurrency_semaphore.acquire(timeout = 0.1)):
         #print("cannot acquire semaphore")
        pass
    else:
        #print("acquired concurrency semaphore")
        gevent.Greenlet.spawn(process)