コード例 #1
0
ファイル: library.py プロジェクト: mopidy/mopidy-gmusic
    def __init__(self, *args, **kwargs):
        super(GMusicLibraryProvider, self).__init__(*args, **kwargs)

        # tracks, albums, and artists here refer to what is explicitly
        # in our library.
        self.tracks = {}
        self.albums = {}
        self.artists = {}

        # aa_* caches are *only* used for temporary objects. Library
        # objects will never make it here.
        self.aa_artists = LRUCache(1024)
        self.aa_tracks = LRUCache(1024)
        self.aa_albums = LRUCache(1024)

        self._radio_stations_in_browse = (
            self.backend.config['gmusic']['radio_stations_in_browse'])
        self._radio_stations_count = (
            self.backend.config['gmusic']['radio_stations_count'])
        self._radio_tracks_count = (
            self.backend.config['gmusic']['radio_tracks_count'])

        self._top_tracks_count = (
            self.backend.config['gmusic']['top_tracks_count'])

        # Setup the root of library browsing.
        self._root = [
            Ref.directory(uri='gmusic:album', name='Albums'),
            Ref.directory(uri='gmusic:artist', name='Artists'),
            Ref.directory(uri='gmusic:track', name='Tracks')
        ]

        if self._radio_stations_in_browse:
            self._root.append(Ref.directory(uri='gmusic:radio',
                                            name='Radios'))
コード例 #2
0
class CoapLRUCache(CoapCache):
    def __init__(self, max_dim):
        """

        :param max_dim:
        """
        self.cache = LRUCache(maxsize=max_dim)

    def update(self, key, element):
        """

        :param key:
        :param element:
        :return:
        """
        self.cache.update([(key.hashkey, element)])

    def get(self, key):
        """

        :param key:
        :return: CacheElement
        """
        try:
            response = self.cache[key.hashkey]
        except KeyError:
            print "problem here"
            response = None
        return response

    def is_full(self):
        """

        :return:
        """
        if self.cache.currsize == self.cache.maxsize:
            return True
        return False

    def is_empty(self):
        """

        :return:
        """

        if self.cache.currsize == 0:
            return True
        return False

    def debug_print(self):
        """

        :return:
        """
        print "size = ", self.cache.currsize
コード例 #3
0
 def __init__(self, token, name):
   self.name = name
   self.token = token
   self.agent = Agent(reactor)
   self.last_update_id = -1
   self.message_handlers = []
   self.message_subscribers = LRUCache(maxsize=10000)
   self.message_prehandlers = []
   self.message_next_handlers = LRUCache(maxsize=1000)
   self.retry_update = 0
   self.running = False
   self.inline_query_handler = None
   self.callback_query_handler = None
   self.chosen_inline_result_handler = None
   self.botan = None
コード例 #4
0
    def test_lru(self):
        cached = Cached(LRUCache(maxsize=2))

        self.assertEqual(cached.get(0), 0)
        self.assertEqual(cached.get(1), 1)
        self.assertEqual(cached.get(1), 1)
        self.assertEqual(cached.get(1.0), 1)
        self.assertEqual(cached.get(1.0), 1)

        cached.cache.clear()
        self.assertEqual(cached.get(1), 2)
コード例 #5
0
 def __init__(self,
              capacity,
              cache_policy=None,
              ttl_policy=None,
              *args,
              **kwargs):
     self.cache_policy = cache_policy or self.DEFAULT_CACHE_POLICY
     self.ttl_policy = ttl_policy or self.DEFAULT_TTL_POLICY
     self.cache = LRUCache(maxsize=capacity, missing=self.missing)
     self.args = args
     self.kwargs = kwargs
コード例 #6
0
 def __init__(self, shape, dtype=np.float):
     dtype = np.dtype(dtype)
     super().__init__()
     self.current_list_size = 0
     self.shape = shape
     self.dtype = dtype
     self.array_byte_size = reduce(lambda x, y: x * y,
                                   shape) * dtype.itemsize
     self.open()
     self.cache = LRUCache(maxsize=128)
     self.misses = 0
コード例 #7
0
ファイル: abstract.py プロジェクト: brahimkhalile/PyMISP
    class MISPFileCache(object):
        # cache up to 150 JSON structures in class attribute

        @staticmethod
        @cached(cache=LRUCache(maxsize=150))
        def _load_json(path):
            if not os.path.exists(path):
                return None
            with open(path, 'r') as f:
                data = load(f)
            return data
コード例 #8
0
    def test_typed_lru(self):
        cached = Cached(LRUCache(maxsize=2))
        self.assertEqual(cached.cache, cached.get_typed.cache(cached))

        self.assertEqual(cached.get_typed(0), 0)
        self.assertEqual(cached.get_typed(1), 1)
        self.assertEqual(cached.get_typed(1), 1)
        self.assertEqual(cached.get_typed(1.0), 2)
        self.assertEqual(cached.get_typed(1.0), 2)
        self.assertEqual(cached.get_typed(0.0), 3)
        self.assertEqual(cached.get_typed(0), 4)
コード例 #9
0
class WxWork(object):
    def __init__(self, url):
        self.host = settings.get('WX_WORK_API_BASE_URL')
        self.url = f"{self.host}{url}"

    async def return_result(self, result):
        return result

    @cached(cache=LRUCache(maxsize=1024))
    async def get(self, *args, **kwargs):
        url = self.url
        payload = "{}"
        headers = {
            'Content-Type': "application/json",
            'cache-control': "no-cache"
        }

        response = requests.request("GET",
                                    url,
                                    data=payload,
                                    headers=headers,
                                    params=kwargs)
        result = await self.return_result(response)
        return result

    @cached(cache=LRUCache(maxsize=1024))
    async def post(self, *args, **kwargs):
        url = self.url
        payload = "{}"
        headers = {
            'Content-Type': "application/json",
            'cache-control': "no-cache"
        }

        response = requests.request("POST",
                                    url,
                                    data=payload,
                                    headers=headers,
                                    params=kwargs)
        result = await self.return_result(response)
        return result
コード例 #10
0
ファイル: demix_file.py プロジェクト: matchings/AllenSDK
class DemixFile(DataFile):
    """A DataFile which contains methods for accessing and loading
    demixed traces.
    """
    def __init__(self, filepath: Union[str, Path]):
        super().__init__(filepath=filepath)

    @classmethod
    @cached(cache=LRUCache(maxsize=10), key=from_json_cache_key)
    def from_json(cls, dict_repr: dict) -> "DemixFile":
        filepath = dict_repr["demix_file"]
        return cls(filepath=filepath)

    def to_json(self) -> Dict[str, str]:
        return {"demix_file": str(self.filepath)}

    @classmethod
    @cached(cache=LRUCache(maxsize=10), key=from_lims_cache_key)
    def from_lims(cls, db: PostgresQueryMixin,
                  ophys_experiment_id: Union[int, str]) -> "DemixFile":
        query = """
                SELECT wkf.storage_directory || wkf.filename AS demix_file
                FROM ophys_experiments oe
                JOIN well_known_files wkf ON wkf.attachable_id = oe.id
                JOIN well_known_file_types wkft
                ON wkft.id = wkf.well_known_file_type_id
                WHERE wkf.attachable_type = 'OphysExperiment'
                AND wkft.name = 'DemixedTracesFile'
                AND oe.id = {};
                """.format(ophys_experiment_id)
        filepath = db.fetchone(query, strict=True)
        return cls(filepath=filepath)

    @staticmethod
    def load_data(filepath: Union[str, Path]) -> pd.DataFrame:
        with h5py.File(filepath, 'r') as in_file:
            traces = in_file['data'][()]
            roi_id = in_file['roi_names'][()]
            idx = pd.Index(roi_id, name='cell_roi_id', dtype=int)
            return pd.DataFrame({'corrected_fluorescence': list(traces)},
                                index=idx)
コード例 #11
0
    def __init__(self):
        super(MetricCassandraRepository, self).__init__()

        self._lock = threading.RLock()

        LOG.debug("prepare cql statements...")

        self._measurement_insert_stmt = self._session.prepare(MEASUREMENT_INSERT_CQL)
        self._measurement_insert_stmt.is_idempotent = True

        self._measurement_update_stmt = self._session.prepare(MEASUREMENT_UPDATE_CQL)
        self._measurement_update_stmt.is_idempotent = True

        self._metric_insert_stmt = self._session.prepare(METRICS_INSERT_CQL)
        self._metric_insert_stmt.is_idempotent = True

        self._metric_update_stmt = self._session.prepare(METRICS_UPDATE_CQL)
        self._metric_update_stmt.is_idempotent = True

        self._dimension_stmt = self._session.prepare(DIMENSION_INSERT_CQL)
        self._dimension_stmt.is_idempotent = True

        self._dimension_metric_stmt = self._session.prepare(DIMENSION_METRIC_INSERT_CQL)
        self._dimension_metric_stmt.is_idempotent = True

        self._metric_dimension_stmt = self._session.prepare(METRIC_DIMENSION_INSERT_CQL)
        self._metric_dimension_stmt.is_idempotent = True

        self._retrieve_metric_dimension_stmt = self._session.prepare(RETRIEVE_METRIC_DIMENSION_CQL)

        self._metric_batch = MetricBatch(
            self._cluster.metadata,
            self._cluster.load_balancing_policy,
            self._max_batches)

        self._metric_id_cache = LRUCache(self._cache_size)
        self._dimension_cache = LRUCache(self._cache_size)
        self._metric_dimension_cache = LRUCache(self._cache_size)

        self._load_dimension_cache()
        self._load_metric_dimension_cache()
コード例 #12
0
ファイル: database.py プロジェクト: solomon1215/books_backend
class Database(object):
    def __init__(self):
        self.db_user = settings.get('DB_USER')
        self.db_pass = settings.get('DB_PASS')
        self.db_name = settings.get('DB_NAME')
        self.db_host = settings.get('DB_HOST')
        self.db_port = settings.get('DB_PORT')

    def get_db_host(self):
        return self.db_host

    def get_db_user(self):
        return self.db_user

    def get_db_pass(self):
        return self.db_pass

    def get_db_name(self):
        return self.db_name

    @cached(cache=LRUCache(maxsize=1024))
    async def execute(self, sql):
        conn = await asyncpg.connect(user=self.db_user,
                                     password=self.db_pass,
                                     database=self.db_name,
                                     host=self.db_host,
                                     port=self.db_port)
        values = await conn.fetch(sql)
        await conn.close()
        return values

    @cached(cache=LRUCache(maxsize=1024))
    async def fetchrow(self, sql):
        conn = await asyncpg.connect(user=self.db_user,
                                     password=self.db_pass,
                                     database=self.db_name,
                                     host=self.db_host,
                                     port=self.db_port)
        values = await conn.fetchrow(sql)
        await conn.close()
        return values
コード例 #13
0
 def __init__(self,
              trading_calendar,
              reader,
              adjustment_reader,
              sid_cache_size=1000):
     self.trading_calendar = trading_calendar
     self._reader = reader
     self._adjustments_reader = adjustment_reader
     self._window_blocks = {
         field: ExpiringCache(LRUCache(maxsize=sid_cache_size))
         for field in self.FIELDS
     }
コード例 #14
0
ファイル: hkm_nn.py プロジェクト: RESDEC/AlgoritmosPy
    def load_model(model_dir, leaf_cache_size=10, points_cache_size=100000):
        """
        Loads a model from `index_dir` and returns an instance of `HKMNearestNeighbor`
        :param model_dir location where model is saved
        :param leaf_cache_size how many leaves to keep in the cache
        :param points_cache_size how many individual points to keep in cache
        """
        # load skeleton
        file_name = os.path.join(model_dir, 'skeleton.pickle')
        new_hkmnn_model = joblib.load(file_name)
        new_hkmnn_model.model_dir = model_dir
        # compute inverse index
        new_hkmnn_model.inverse_idx = new_hkmnn_model._get_idx_paths()

        # cache calls to get_vector and _get_nn_model
        get_nn_model_cache = LRUCache(maxsize=leaf_cache_size)
        get_vector_cache = LRUCache(maxsize=points_cache_size)
        new_hkmnn_model.get_vector = cached(get_vector_cache)(new_hkmnn_model.get_vector)
        new_hkmnn_model._get_nn_model = cached(get_nn_model_cache)(new_hkmnn_model._get_nn_model)

        return new_hkmnn_model
コード例 #15
0
class ProfanityCommand(Command):
    extract = re.compile(r"1:.*?\s+warning\s+(.*?)\s\S+$")
    handlers = "!analyze", "!sjw", "!profanity"
    lru = LRUCache(maxsize=20)

    def handles(self, cmd):
        return True

    @staticmethod
    def alex(string):
        with Popen("alex -t".split(" "), stdout=PIPE, stdin=PIPE) as process:
            try:
                string = string.encode("utf-8")
                result = str(process.communicate(string, timeout=3)[0], "utf-8").split("\n")
            except TimeoutExpired:
                process.kill()
                raise
        result = (ProfanityCommand.extract.search(l.strip()) for l in result)
        result = set(m.group(1).strip() for m in result if m)
        return result

    def handle_cmd(self, cmd, remainder, msg):
        if cmd not in self.handlers:
            self.lru[msg.nick.casefold()] = (msg.nick, msg.msg)
            return False

        if not self.allowed(msg):
            return False
        try:
            if remainder:
                user = remainder.strip().casefold()
                user, text = self.lru.get(user, (None, None))
            else:
                user, text = self.lru.items()[0][1]
            if not text:
                return False
            anal = ", ".join(self.alex(text))
            if not anal:
                anal = "No issues found, SJW approved message"
            lanal = len(anal)
            if lanal > 220:
                anal = anal[0:219] + "…"
                lanal = len(anal)
            lrem = 295 - lanal
            quote = ">{user}: {text}".format(user=user, text=text)
            if len(quote) > lrem:
                quote = quote[0:lrem] + "…"
            self.post("{}\n{}", quote, anal)
            return True

        except Exception:
            LOGGER.exception("failed to analyze")
            return False
コード例 #16
0
ファイル: cache_sim_old2.py プロジェクト: ziel5122/rl-cache
def get_cache(cache_type, cache_size):
    caches = {
        'lfu': LFUCache(cache_size),
        'lru': LRUCache(cache_size),
        #'rl' : RLCache(cache_size),
        'rr': RRCache(cache_size)
    }

    try:
        return caches[cache_type]
    except KeyError:
        return default()
コード例 #17
0
ファイル: root_state.py プロジェクト: braveheart12/QuarkChain
    def __init__(self, db, quark_chain_config, count_minor_blocks=False):
        # TODO: evict old blocks from memory
        self.db = db
        self.quark_chain_config = quark_chain_config
        self.max_num_blocks_to_recover = (
            quark_chain_config.ROOT.MAX_ROOT_BLOCKS_IN_MEMORY)
        self.count_minor_blocks = count_minor_blocks
        # TODO: May store locally to save memory space (e.g., with LRU cache)
        self.tip_header = None

        self.__recover_from_db()
        self.rblock_cache = LRUCache(maxsize=1024)
コード例 #18
0
    def load_media_cache(self, server):
        server, _ = Servers.get_or_create(name=server)
        media_cache = LRUCache(maxsize=MAX_LOADED_MEDIA)

        for i, m in enumerate(server.media):
            if i > MAX_LOADED_MEDIA:
                break

            media = MediaInfo(m.mxc_server, m.mxc_path, m.key, m.iv, m.hashes)
            media_cache[(m.mxc_server, m.mxc_path)] = media

        return media_cache
コード例 #19
0
class RigidMotionTransformFile(DataFile):
    """A DataFile which contains methods for accessing and loading
    rigid motion transform output.
    """

    def __init__(self, filepath: Union[str, Path]):
        super().__init__(filepath=filepath)

    @classmethod
    @cached(cache=LRUCache(maxsize=10), key=from_json_cache_key)
    def from_json(cls, dict_repr: dict) -> "RigidMotionTransformFile":
        filepath = dict_repr["rigid_motion_transform_file"]
        return cls(filepath=filepath)

    def to_json(self) -> Dict[str, str]:
        return {"rigid_motion_transform_file": str(self.filepath)}

    @classmethod
    @cached(cache=LRUCache(maxsize=10), key=from_lims_cache_key)
    def from_lims(
        cls, db: PostgresQueryMixin,
        ophys_experiment_id: Union[int, str]
    ) -> "RigidMotionTransformFile":
        query = """
                SELECT wkf.storage_directory || wkf.filename AS transform_file
                FROM ophys_experiments oe
                JOIN well_known_files wkf ON wkf.attachable_id = oe.id
                JOIN well_known_file_types wkft
                ON wkft.id = wkf.well_known_file_type_id
                WHERE wkf.attachable_type = 'OphysExperiment'
                AND wkft.name = 'OphysMotionXyOffsetData'
                AND oe.id = {};
                """.format(ophys_experiment_id)
        filepath = safe_system_path(db.fetchone(query, strict=True))
        return cls(filepath=filepath)

    @staticmethod
    def load_data(filepath: Union[str, Path]) -> pd.DataFrame:
        motion_correction = pd.read_csv(filepath)
        return motion_correction[['x', 'y']]
コード例 #20
0
ファイル: miner.py プロジェクト: twelveze/pyquarkchain
    def __init__(
        self,
        consensus_type: ConsensusType,
        create_block_async_func: Callable[..., Awaitable[Optional[Block]]],
        add_block_async_func: Callable[[Block], Awaitable[None]],
        get_mining_param_func: Callable[[], Dict[str, Any]],
        get_header_tip_func: Callable[[], Header],
        remote: bool = False,
        root_signer_private_key: Optional[KeyAPI.PrivateKey] = None,
    ):
        """Mining will happen on a subprocess managed by this class

        create_block_async_func: takes no argument, returns a block (either RootBlock or MinorBlock)
        add_block_async_func: takes a block, add it to chain
        get_mining_param_func: takes no argument, returns the mining-specific params
        """
        self.consensus_type = consensus_type

        self.create_block_async_func = create_block_async_func
        self.add_block_async_func = add_block_async_func
        self.get_mining_param_func = get_mining_param_func
        self.get_header_tip_func = get_header_tip_func
        self.enabled = False
        self.process = None

        self.input_q = AioQueue()  # [(MiningWork, param dict)]
        self.output_q = AioQueue()  # [MiningResult]

        # header hash -> block under work
        # max size (tx max 258 bytes, gas limit 12m) ~= ((12m / 21000) * 258) * 128 = 18mb
        self.work_map = LRUCache(maxsize=128)

        if not remote and consensus_type != ConsensusType.POW_SIMULATE:
            Logger.warning("Mining locally, could be slow and error-prone")
        # remote miner specific attributes
        self.remote = remote
        # coinbase address -> header hash
        # key can be None, meaning default coinbase address from local config
        self.current_works = LRUCache(128)
        self.root_signer_private_key = root_signer_private_key
コード例 #21
0
    def preprocess(self, current_file):
        """On-demand feature extraction

        Parameters
        ----------
        current_file : dict
            Generated by a pyannote.database.Protocol

        Returns
        -------
        current_file : dict
            Current file with additional "features" entry

        Notes
        -----
        Does nothing when self.feature_extraction is a
        pyannote.audio.features.Precomputed instance.
        """

        # if "features" are precomputed on disk, do nothing
        # as "process_segment" will load just the part we need
        if isinstance(self.feature_extraction, Precomputed):
            return current_file

        # if (by chance) current_file already contains "features"
        # do nothing.
        if 'features' in current_file:
            return current_file

        # if we get there, it means that we need to extract features
        # for current_file. let's create a cache to store them...
        if not hasattr(self, 'preprocessed_'):
            self.preprocessed_ = LRUCache(maxsize=CACHE_MAXSIZE)

        # this is the key that will be used to know if "features"
        # already exist in cache
        uri = get_unique_identifier(current_file)

        # if "features" are not cached for current file
        # compute and cache them...
        if uri not in self.preprocessed_:
            features = self.feature_extraction(current_file)
            self.preprocessed_[uri] = features

        # create copy of current_file to prevent "features"
        # from consuming increasing memory...
        preprocessed = dict(current_file)

        # add "features" key
        preprocessed['features'] = self.preprocessed_[uri]

        return preprocessed
コード例 #22
0
def backend(worker_pool, **kwargs):
    app = Flask(__name__)
    app.config['SECRET_KEY'] = os.environ['SECRET_KEY']
    socketio = SocketIO(app)

    buf = LRUCache(1024)

    @socketio.on('message', namespace="/upload")
    def on_message(data):
        if request.sid not in buf:
            buf[request.sid] = []
        buf[request.sid].append(json.loads(data))

    @socketio.on('reset', namespace="/upload")
    def on_reset():
        if request.sid in buf:
            buf[request.sid] = []

    @app.route('/anim/<path:path>')
    def send_anim(path):
        return send_from_directory('anim', path)

    @app.route('/dist/<path:path>')
    def send_dist(path):
        return send_from_directory('dist', path)

    @socketio.on('render', namespace="/upload")
    def on_render():
        if request.sid not in buf or buf[request.sid] == []:
            emit("render_error", "empty_data")
        else:
            room = str(uuid4())
            path = f"anim/{room}.mp4"
            join_room(room)
            try:
                res = worker_pool.apply_async(
                    func=render,
                    args=(
                        path,
                        buf[request.sid],
                    ),
                    error_callback=lambda x: socketio.emit(
                        "render_error", "server_error", room=room
                    )  # Outside request context -> cannot use top-level emit()
                )
                res.get(timeout=60)
                emit("render_done", path),
            except:
                emit("render_error", "server_error")
                raise

    return socketio.run(app, **kwargs)
コード例 #23
0
def _query(
        sparql, timeout, q, cache=LRUCache(maxsize=config.CACHE_SIZE), **_):
    """Cached low level function to perform a single SPARQL query.

    :param sparql: SPARQLWrapper endpoint
    :param timeout: a timeout in seconds. The endpoint 'timeout' parameter will
        be set to 3/4 this value in ms (Virtuoso seems to treat non zero
        timeouts < 1000ms as 1000ms), instructing the server to give us a
        partial result up to this soft limit. We also set a hard timeout via the
        socket to really cancel the request if there's no result after timeout
        seconds.
    :param q: The SPARQL Query as string
    :param cache: a cache object like cachetools.LRUCache or None
    :return: a (t, res) pair with querytime t as float and res as dict.
    """

    assert isinstance(sparql, SPARQLWrapper.SPARQLWrapper)
    assert isinstance(q, six.string_types)
    sparql.resetQuery()
    sparql.setTimeout(timeout)
    sparql.setReturnFormat(SPARQLWrapper.JSON)
    # sparql.setMethod(SPARQLWrapper.POST)
    # sparql.setRequestMethod(SPARQLWrapper.POSTDIRECTLY)

    # set query timeout parameter to half the hard timeout time
    sparql.addParameter('timeout', str(int(timeout * 1000 * 3 / 4)))

    logger.debug('performing sparql query: \n%s', q)
    c = cache.get(q) if cache is not None else None
    if c is None:
        logger.debug('cache miss')
        try:
            q_short = ' '.join((line.strip() for line in q.split('\n')))
            sparql.setQuery(q_short)
            c = time(sparql.queryAndConvert)
        except socket.timeout:
            c = (timeout, {})
        except ValueError:
            # e.g. if the endpoint gives us bad JSON for some unicode chars
            logger.warning(
                'Could not parse result for query, assuming empty result...\n'
                'Query:\n%s\nException:', q,
                exc_info=1,  # appends exception to message
            )
            c = (timeout, {})
        if cache is not None:
            cache[q] = c
    else:
        logger.debug('cache hit')
    t, res = c
    logger.debug('orig query took %.4f s, result:\n%s\n', t, res)
    return t, res
コード例 #24
0
class DiscordMessage:
    @classmethod
    @CacheManager.attach_cachedmethod(self2cache=lambda x: LRUCache(maxsize=DiscordMessageCache.Constant.MAXSIZE),
                                      )
    def id2message(cls, channel_id, message_id):
        channel = DiscordChannel.id2channel(channel_id)
        return channel.messages.fetch(message_id)

    @classmethod
    def add_message2cache(cls, message):
        channel = message.channel
        DiscordChannel.add_channel2cache(channel)
        CacheManager.add2cache(cls.id2message, message, [channel.id, message.id])
コード例 #25
0
class CommitFileMapper(ArtifactMapper):
    def __init__(self):
        super().__init__(CommitFile, foreign_key="commit_id")

    cache = LRUCache(512)

    def map(self, x: DataPoint) -> Optional[CommitFile]:
        return CommitFile(
            x.filename,
            x.status,
            x.patch if not isinstance(x.patch, float) else None,
            x.change if not isinstance(x.change, float) else None,
        )
コード例 #26
0
    def init(self, args, db, hosts, logger):
        self.args = args
        self.db = db

        self.hosts = hosts
        self.logger = logger

        self.bind = args.bind
        self.forward = args.forward

        self.peers = {}
        self.requests = {}
        self.cache = LRUCache(maxsize=args.cachesize)

        if args.daemon:
            Daemon(args.pidfile).register(self)

        if args.debug:
            Debugger(events=args.verbose, logger=logger).register(self)

        self.transport = UDPServer(self.bind).register(self)
        self.protocol = DNS().register(self)
コード例 #27
0
 def __init__(
     self,
     logger: Logger = None,
     num_of_attempts: int = 1,
     session: Session = Session(),
     verify: bool = True,
     cachesize: int = 128,
 ):
     self.logger = get_logger(
         self.__class__.__name__) if logger is None else logger
     self.num_of_attempts = num_of_attempts
     self.session = session
     self.verify = verify
     self.cache = LRUCache(maxsize=cachesize)
コード例 #28
0
 def __init__(self,
              host="127.0.0.1",
              port=9033,
              model="en",
              embeddings_path=None,
              verbose=False,
              attributes=None):
     super(Client, self).__init__(model, embeddings_path)
     self.host = host
     self.port = port
     self.rpc = RPCClient(host, port)
     self.verbose = verbose
     self.attributes = attributes
     self.cache = LRUCache(maxsize=3000000)
コード例 #29
0
ファイル: base.py プロジェクト: ndl/pumaduct
 def __init__(self, conf, glib, matrix_client, clients, db_session,
              account_storage, message_storage):
     self.glib = glib
     self.matrix_client = matrix_client
     self.clients = clients
     self.db_session = db_session
     self.account_storage = account_storage
     self.message_storage = message_storage
     self.networks = conf["networks"]
     self.hs_host = _parse_hs_host(conf["hs_server"])
     self.users_blacklist = conf["users_blacklist"]
     self.users_whitelist = conf["users_whitelist"]
     self.accounts = defaultdict(list)
     self.rooms = defaultdict(Room)
     self.transaction_callbacks = defaultdict(list)
     self.clients_callbacks = defaultdict(list)
     self.mxids_to_ext_contacts = LRUCache(maxsize=conf["max_cache_items"])
     self.ext_contacts_to_mxids = LRUCache(maxsize=conf["max_cache_items"])
     self.senders_access = LRUCache(maxsize=conf["max_cache_items"])
     if "user_power_level" in conf:
         self.user_power_level = conf["user_power_level"]
     else:
         self.user_power_level = None
コード例 #30
0
    def default():

        processor = getattr(AnalysisProcessor._context, 'analysis_processor', None)

        if processor is None:
            logger.warning("Default analysis processor not set for thread %s, using a simple one.", threading.current_thread().name)
            from vot.utilities import ThreadPoolExecutor
            from cachetools import LRUCache
            executor = ThreadPoolExecutor(1)
            cache = LRUCache(1000)
            processor = AnalysisProcessor(executor, cache)
            AnalysisProcessor._context.analysis_processor = processor

        return processor
コード例 #31
0
ファイル: distribution.py プロジェクト: dpohanlon/Glasnost
    def __init__(self, parameters=None, name=''):

        self.name = gl.utils.nameScope.rstrip(
            '/') if not name else gl.utils.nameScope + name

        if parameters:
            for k, p in parameters.items():
                if type(p) != gl.Parameter:
                    # Assume this is int, float, ...
                    parameters[k] = gl.Parameter(p, name=self.name + '/' + k)

        self.parameters = parameters

        self.cache = LRUCache(maxsize=128)
コード例 #32
0
 def __init__(self, token, name, skip_offset=False, allowed_updates=None, agent=None, timeout=None):
   self.id = int(token.split(':')[0])
   self.name = name
   self.token = token
   self.agent = agent
   self.last_update_id = -2 if skip_offset else -1
   self.update_prehandlers = []
   self.message_handlers = []
   self.message_subscribers = LRUCache(maxsize=10000)
   self.message_prehandlers = []
   self.message_next_handlers = LRUCache(maxsize=1000)
   self.retry_update = 0
   self.allowed_updates = allowed_updates
   self.running = False
   self.inline_query_handler = None
   self.callback_query_handler = None
   self.chosen_inline_result_handler = None
   self.channel_post_handler = None
   self.on_updated_listener = None
   self.on_api_request_listener = None
   self.botan = None
   self.timeout = timeout
   self._noisy = False
コード例 #33
0
class StimulusFile(DataFile):
    """A DataFile which contains methods for accessing and loading visual
    behavior stimulus *.pkl files.

    This file type contains a number of parameters collected during a behavior
    session including information about stimulus presentations, rewards,
    trials, and timing for all of the above.
    """

    def __init__(self, filepath: Union[str, Path]):
        super().__init__(filepath=filepath)

    @classmethod
    @cached(cache=LRUCache(maxsize=10), key=from_json_cache_key)
    def from_json(cls, dict_repr: dict) -> "StimulusFile":
        filepath = dict_repr["behavior_stimulus_file"]
        return cls(filepath=filepath)

    def to_json(self) -> Dict[str, str]:
        return {"behavior_stimulus_file": str(self.filepath)}

    @classmethod
    @cached(cache=LRUCache(maxsize=10), key=from_lims_cache_key)
    def from_lims(
        cls, db: PostgresQueryMixin,
        behavior_session_id: Union[int, str]
    ) -> "StimulusFile":
        query = STIMULUS_FILE_QUERY_TEMPLATE.format(
            behavior_session_id=behavior_session_id
        )
        filepath = db.fetchone(query, strict=True)
        return cls(filepath=filepath)

    @staticmethod
    def load_data(filepath: Union[str, Path]) -> dict:
        filepath = safe_system_path(file_name=filepath)
        return pd.read_pickle(filepath)
コード例 #34
0
    def __init__(self, capp, io_loop):
        """Monitors events that are received from celery.

        capp - The celery app
        io_loop - The event loop to use for dispatch
        """
        super().__init__()

        self.capp = capp
        self.timer = PeriodicCallback(self.on_enable_events,
                                      self.events_enable_interval)

        self.monitor = EventMonitor(self.capp, io_loop)
        self.listeners = {}
        self.finished_tasks = LRUCache(self.max_finished_history)
コード例 #35
0
ファイル: mongodb.py プロジェクト: 0xDEC0DE8/openreil
    def __init__(self, arch, collection, db = None, host = None, port = None):
        
        self.arch = arch
        self.db_name = self.DEF_DB if db is None else db
        self.collection_name = collection

        self.host = self.DEF_HOST if host is None else host
        self.port = self.DEF_PORT if port is None else port

        # instructions cache
        self.cache = LRUCache(maxsize = self.CACHE_SIZE)

        # connect to the server
        self.client = pymongo.Connection(self.host, self.port)
        self.db = self.client[self.db_name]

        # get collection        
        self.collection = self.db[self.collection_name]
        self.collection.ensure_index(self.INDEX)     
コード例 #36
0
ファイル: texts.py プロジェクト: henningko/textacy
 def __init__(self, text, spacy_pipeline=None, lang='auto',
              metadata=None, max_cachesize=5):
     self.metadata = {} if metadata is None else metadata
     self.lang = text_utils.detect_language(text) if lang == 'auto' else lang
     if spacy_pipeline is None:
         self.spacy_pipeline = data.load_spacy_pipeline(lang=self.lang)
     else:
         # check for match between text and supplied spacy pipeline language
         if spacy_pipeline.lang != self.lang:
             msg = 'TextDoc.lang {} != spacy_pipeline.lang {}'.format(
                 self.lang, spacy_pipeline.lang)
             raise ValueError(msg)
         else:
             self.spacy_pipeline = spacy_pipeline
     self.spacy_vocab = self.spacy_pipeline.vocab
     self.spacy_stringstore = self.spacy_vocab.strings
     self.spacy_doc = self.spacy_pipeline(text)
     self._term_counts = Counter()
     self._cache = LRUCache(maxsize=max_cachesize)
コード例 #37
0
ファイル: server.py プロジェクト: n3storm/udns
    def init(self, args, db, hosts, logger):
        self.args = args
        self.db = db

        self.hosts = hosts
        self.logger = logger

        self.bind = args.bind
        self.forward = args.forward

        self.peers = {}
        self.requests = {}
        self.cache = LRUCache(maxsize=args.cachesize)

        if args.daemon:
            Daemon(args.pidfile).register(self)

        if args.debug:
            Debugger(events=args.verbose, logger=logger).register(self)

        self.transport = UDPServer(self.bind).register(self)
        self.protocol = DNS().register(self)
コード例 #38
0
ファイル: mongodb.py プロジェクト: 0xDEC0DE8/openreil
class CodeStorageMongo(REIL.CodeStorageMem):

    # mongodb host
    DEF_HOST = '127.0.0.1'
    DEF_PORT = 27017

    # defult database name
    DEF_DB = 'openreil'

    # index for instructions collection
    INDEX = [('addr', pymongo.ASCENDING), ('inum', pymongo.ASCENDING)]

    CACHE_SIZE = 1024

    def __init__(self, arch, collection, db = None, host = None, port = None):
        
        self.arch = arch
        self.db_name = self.DEF_DB if db is None else db
        self.collection_name = collection

        self.host = self.DEF_HOST if host is None else host
        self.port = self.DEF_PORT if port is None else port

        # instructions cache
        self.cache = LRUCache(maxsize = self.CACHE_SIZE)

        # connect to the server
        self.client = pymongo.Connection(self.host, self.port)
        self.db = self.client[self.db_name]

        # get collection        
        self.collection = self.db[self.collection_name]
        self.collection.ensure_index(self.INDEX)     

    def __iter__(self):

        for item in self.collection.find().sort(self.INDEX): 

            yield REIL.Insn(self._insn_from_item(item))

    def _insn_to_item(self, insn):

        insn = REIL.Insn(insn)        

        def _arg_in(arg):

            if arg.type == REIL.A_NONE:  

                return ()
            
            elif arg.type == REIL.A_CONST:

                return ( arg.type, arg.size, _U64IN(arg.val) )
            
            else:

                return ( arg.type, arg.size, arg.name )

        if insn.has_attr(REIL.IATTR_BIN):

            # JSON doesn't support binary data
            insn.set_attr(REIL.IATTR_BIN, base64.b64encode(insn.get_attr(REIL.IATTR_BIN)))

        # JSON doesn't support numeric keys
        attr = [ (key, val) for key, val in insn.attr.items() ]

        return {

            'addr': _U64IN(insn.addr), 'size': insn.size, 'inum': insn.inum, 'op': insn.op, \
            'a': _arg_in(insn.a), 'b': _arg_in(insn.b), 'c': _arg_in(insn.c), \
            'attr': attr
        }

    def _insn_from_item(self, item):

        attr, attr_dict = item['attr'], {}

        def _arg_out(arg):

            if len(arg) == 0: 

                return ()

            elif REIL.Arg_type(arg) == REIL.A_CONST:

                arg = ( REIL.Arg_type(arg), REIL.Arg_size(arg), _U64OUT(REIL.Arg_val(arg)) )

            return arg                

        for key, val in attr:

            attr_dict[key] = val

        if attr_dict.has_key(REIL.IATTR_BIN):

            # get instruction binary data from base64
            attr_dict[REIL.IATTR_BIN] = base64.b64decode(attr_dict[REIL.IATTR_BIN])

        return ( 

            ( _U64OUT(item['addr']), item['size'] ), item['inum'], item['op'], \
            ( _arg_out(item['a']), _arg_out(item['b']), _arg_out(item['c']) ), \
            attr_dict
        ) 

    def _get_key(self, ir_addr):

        return { 'addr': ir_addr[0], 'inum': ir_addr[1] }

    def _find(self, ir_addr):

        return self.collection.find_one(self._get_key(ir_addr))

    def _get_insn(self, ir_addr): 

        # get item from cache
        try: return self.cache[ir_addr]
        except KeyError: pass

        # get item from collection
        insn = self._find(ir_addr)
        if insn is not None: 

            insn = self._insn_from_item(insn)

            # update cache
            self.cache[ir_addr] = insn

            return insn

        else:

            raise REIL.StorageError(*ir_addr)

    def _del_insn(self, ir_addr):

        insn = self._find(ir_addr)
        if insn is not None: 

            # remove item from collection
            self.collection.remove(self._get_key(ir_addr))

            # remove item from cache
            try: del self.cache[ir_addr]
            except KeyError: pass

        else:

            raise REIL.StorageError(*ir_addr)

    def _put_insn(self, insn):

        ir_addr = REIL.Insn_ir_addr(insn)

        if self._find(ir_addr) is not None:

            # update existing item
            self.collection.update(self._get_key(ir_addr), self._insn_to_item(insn))            

        else:

            # add a new item
            self.collection.insert(self._insn_to_item(insn))

        # update cache
        self.cache[ir_addr] = insn

    def size(self): 

        return self.collection.find().count()

    def clear(self): 

        self.cache.clear()

        # remove all items of collection
        return self.collection.remove()
コード例 #39
0
ファイル: server.py プロジェクト: carriercomm/udns
class Server(Component):

    channel  = "server"

    def init(self, args, db, hosts, logger):
        self.args = args
        self.db = db

        self.hosts = hosts
        self.logger = logger

        self.bind = args.bind
        self.forward = args.forward

        self.peers = {}
        self.requests = {}
        self.cache = LRUCache(maxsize=args.cachesize)

        if args.daemon:
            Daemon(args.pidfile).register(self)

        if args.debug:
            Debugger(events=args.verbose, logger=logger).register(self)

        self.transport = UDPServer(
            self.bind, channel=self.channel
        ).register(self)
        self.protocol = DNS(channel=self.channel).register(self)

    def ready(self, server, bind):
        self.logger.info(
            "DNS Server Ready! Listening on {0:s}:{1:d}".format(*bind)
        )

        # Timer(1, Event.create("ttl"), persist=True, channel=self.channel).register(self)

    def ttl(self):
        for k, rrs in self.cache.items()[:]:
            if any(rr.ttl == 0 for rr in rrs):
                qname, qtype, qclass = k
                self.logger.info(
                    "Expired Entry: {0:s} {1:s} {2:s}".format(
                        CLASS.get(qclass), QTYPE.get(qtype), qname
                    )
                )
                del self.cache[k]
            else:
                for rr in rrs:
                    rr.ttl -= 1

    def request(self, peer, request):
        qname = str(request.q.qname)
        qtype = request.q.qtype
        qclass = request.q.qclass

        key = (qname, qtype, qclass)

        if key in self.cache:
            self.logger.info(
                "Cached Request ({0:s}): {1:s} {2:s} {3:s}".format(
                    "{0:s}:{1:d}".format(*peer),
                    CLASS.get(qclass), QTYPE.get(qtype), qname
                )
            )

            reply = request.reply()
            for rr in self.cache[key]:
                reply.add_answer(rr)
            self.fire(write(peer, reply.pack()))
            return

        if key in self.hosts:
            self.logger.info(
                "Local Hosts Request ({0:s}): {1:s} {2:s} {3:s}".format(
                    "{0:s}:{1:d}".format(*peer),
                    CLASS.get(qclass), QTYPE.get(qtype), qname
                )
            )

            reply = request.reply()
            for rdata in self.hosts[key]:
                rr = RR(
                    qname,
                    rclass=CLASS.IN,
                    rtype=QTYPE.AAAA if ":" in rdata else QTYPE.A,
                    rdata=AAAA(rdata) if ":" in rdata else A(rdata)
                )
                reply.add_answer(rr)

            self.cache[key] = rr

            self.fire(write(peer, reply.pack()))

            return

        records = Record.objects.filter(rname=qname)

        if not records:
            self.logger.info(
                "Request ({0:s}): {1:s} {2:s} {3:s} -> {4:s}:{5:d}".format(
                    "{0:s}:{1:d}".format(*peer),
                    CLASS.get(qclass), QTYPE.get(qtype), qname,
                    self.forward, 53
                )
            )

            lookup = DNSRecord(q=DNSQuestion(qname, qtype, qclass))
            id = lookup.header.id
            self.peers[id] = peer
            self.requests[id] = request

            self.fire(write((self.forward, 53), lookup.pack()))

            return

        self.logger.info(
            "Authoritative Request ({0:s}): {1:s} {2:s} {3:s}".format(
                "{0:s}:{1:d}".format(*peer),
                CLASS.get(qclass), QTYPE.get(qtype), qname
            )
        )

        rr = []
        reply = request.reply()

        if len(records) == 1 and records[0].rtype == CNAME:
            rr.append(records[0].rr)
            records = Record.objects.filter(rname=records[0].rdata)

        for record in records:
            rr.append(record.rr)

        reply.add_answer(*rr)

        self.cache[key] = rr

        self.fire(write(peer, reply.pack()))

    def response(self, peer, response):
        id = response.header.id

        qname = str(response.q.qname)
        qtype = response.q.qtype
        qclass = response.q.qclass

        if id not in self.peers:
            self.logger.info(
                "Unknown Response ({0:s}): {1:s} {2:s} {3:s}".format(
                    "{0:s}:{1:d}".format(*peer),
                    CLASS.get(qclass), QTYPE.get(qtype), qname
                )
            )

            return

        peer = self.peers[id]
        request = self.requests[id]

        key = (str(request.q.qname), request.q.qtype, request.q.qclass)

        reply = request.reply()

        reply.add_answer(*response.rr)

        self.cache[key] = reply.rr

        self.fire(write(peer, reply.pack()))

        del self.peers[id]
        del self.requests[id]
コード例 #40
0
ファイル: _core.py プロジェクト: sherwoodwang/dispatching-dns
    def __init__(self, upstreams, cache_size=None):
        super().__init__()
        self._upstreams = upstreams

        self._cache = LRUCache(cache_size) if cache_size else None
        self._cache_lock = RLock()
コード例 #41
0
ファイル: texts.py プロジェクト: henningko/textacy
class TextDoc(object):
    """
    Class that tokenizes, tags, and parses a text document, and provides an easy
    interface to information extraction, alternative document representations,
    and statistical measures of the text.

    Args:
        text (str)
        spacy_pipeline (``spacy.<lang>.<Lang>()``, optional)
        lang (str, optional)
        metadata (dict, optional)
        max_cachesize (int, optional)
    """
    def __init__(self, text, spacy_pipeline=None, lang='auto',
                 metadata=None, max_cachesize=5):
        self.metadata = {} if metadata is None else metadata
        self.lang = text_utils.detect_language(text) if lang == 'auto' else lang
        if spacy_pipeline is None:
            self.spacy_pipeline = data.load_spacy_pipeline(lang=self.lang)
        else:
            # check for match between text and supplied spacy pipeline language
            if spacy_pipeline.lang != self.lang:
                msg = 'TextDoc.lang {} != spacy_pipeline.lang {}'.format(
                    self.lang, spacy_pipeline.lang)
                raise ValueError(msg)
            else:
                self.spacy_pipeline = spacy_pipeline
        self.spacy_vocab = self.spacy_pipeline.vocab
        self.spacy_stringstore = self.spacy_vocab.strings
        self.spacy_doc = self.spacy_pipeline(text)
        self._term_counts = Counter()
        self._cache = LRUCache(maxsize=max_cachesize)

    def __repr__(self):
        return 'TextDoc({} tokens: {})'.format(
            len(self.spacy_doc), repr(self.text[:50].replace('\n',' ').strip() + '...'))

    def __len__(self):
        return self.n_tokens

    def __getitem__(self, index):
        return self.spacy_doc[index]

    def __iter__(self):
        for tok in self.spacy_doc:
            yield tok

    @property
    def tokens(self):
        """Yield the document's tokens as tokenized by spacy; same as ``__iter__``."""
        for tok in self.spacy_doc:
            yield tok

    @property
    def sents(self):
        """Yield the document's sentences as segmented by spacy."""
        for sent in self.spacy_doc.sents:
            yield sent

    def merge(self, spans):
        """
        Merge spans *in-place* within doc so that each takes up a single token.
        Note: All cached methods on this doc will be cleared.

        Args:
            spans (iterable(``spacy.Span``)): for example, the results from
                :func:`extract.named_entities() <textacy.extract.named_entities>`
                or :func:`extract.pos_regex_matches() <textacy.extract.pos_regex_matches>`
        """
        with LOCK:
            self._cache.clear()
        spacy_utils.merge_spans(spans)

    ###############
    # DOC AS TEXT #

    @property
    def text(self):
        """Return the document's raw text."""
        return self.spacy_doc.text_with_ws

    @property
    def tokenized_text(self):
        """Return text as an ordered, nested list of tokens per sentence."""
        return [[token.text for token in sent]
                for sent in self.spacy_doc.sents]

    @property
    def pos_tagged_text(self):
        """Return text as an ordered, nested list of (token, POS) pairs per sentence."""
        return [[(token.text, token.pos_) for token in sent]
                for sent in self.spacy_doc.sents]

    #######################
    # DOC REPRESENTATIONS #

    def as_bag_of_terms(self, weighting='tf', normalized=True, binary=False,
                        idf=None, lemmatize='auto',
                        ngram_range=(1, 1),
                        include_nes=False, include_nps=False, include_kts=False):
        """
        Represent doc as a "bag of terms", an unordered set of (term id, term weight)
        pairs, where term weight may be by TF or TF*IDF.

        Args:
            weighting (str {'tf', 'tfidf'}, optional): weighting of term weights,
                either term frequency ('tf') or tf * inverse doc frequency ('tfidf')
            idf (dict, optional): if `weighting` = 'tfidf', idf's must be supplied
                externally, such as from a `TextCorpus` object
            lemmatize (bool or 'auto', optional): if True, lemmatize all terms
                when getting their frequencies
            ngram_range (tuple(int), optional): (min n, max n) values for n-grams
                to include in terms list; default (1, 1) only includes unigrams
            include_nes (bool, optional): if True, include named entities in terms list
            include_nps (bool, optional): if True, include noun phrases in terms list
            include_kts (bool, optional): if True, include key terms in terms list
            normalized (bool, optional): if True, normalize term freqs by the
                total number of unique terms
            binary (bool optional): if True, set all (non-zero) term freqs equal to 1

        Returns:
            :class:`collections.Counter <collections.Counter>`: mapping of term ids
                to corresponding term weights
        """
        term_weights = self.term_counts(
            lemmatize=lemmatize, ngram_range=ngram_range, include_nes=include_nes,
            include_nps=include_nps, include_kts=include_kts)

        if binary is True:
            term_weights = Counter({key: 1 for key in term_weights.keys()})
        elif normalized is True:
            # n_terms = sum(term_freqs.values())
            n_tokens = self.n_tokens
            term_weights = Counter({key: val / n_tokens
                                    for key, val in term_weights.items()})

        if weighting == 'tfidf' and idf:
            term_weights = Counter({key: val * idf[key]
                                    for key, val in term_weights.items()})

        return term_weights

    def as_bag_of_concepts(self):
        raise NotImplementedError()

    def as_semantic_network(self):
        raise NotImplementedError()

    ##########################
    # INFORMATION EXTRACTION #

    @cachedmethod(attrgetter('_cache'), key=partial(hashkey, 'words'))
    def words(self, **kwargs):
        """
        Extract an ordered list of words from a spacy-parsed doc, optionally
        filtering words by part-of-speech (etc.) and frequency.

        .. seealso:: :func:`extract.words() <textacy.extract.words>` for all function kwargs.
        """
        return list(extract.words(self.spacy_doc, **kwargs))

    @cachedmethod(attrgetter('_cache'), key=partial(hashkey, 'ngrams'))
    def ngrams(self, n, **kwargs):
        """
        Extract an ordered list of n-grams (``n`` consecutive words) from doc,
        optionally filtering n-grams by the types and parts-of-speech of the
        constituent words.

        Args:
            n (int): number of tokens to include in n-grams;
                1 => unigrams, 2 => bigrams

        .. seealso:: :func:`extract.ngrams() <textacy.extract.ngrams>` for all function kwargs.
        """
        return list(extract.ngrams(self.spacy_doc, n, **kwargs))

    @cachedmethod(attrgetter('_cache'), key=partial(hashkey, 'named_entities'))
    def named_entities(self, **kwargs):
        """
        Extract an ordered list of named entities (PERSON, ORG, LOC, etc.) from
        doc, optionally filtering by the entity types and frequencies.

        .. seealso:: :func:`extract.named_entities() <textacy.extract.named_entities>`
        for all function kwargs.
        """
        return list(extract.named_entities(self.spacy_doc, **kwargs))

    @cachedmethod(attrgetter('_cache'), key=partial(hashkey, 'noun_chunks'))
    def noun_chunks(self, **kwargs):
        """
        Extract an ordered list of noun phrases from doc, optionally
        filtering by frequency and dropping leading determiners.

        .. seealso:: :func:`extract.noun_chunks() <textacy.extract.noun_chunks>`
        for all function kwargs.
        """
        return list(extract.noun_chunks(self.spacy_doc, **kwargs))

    @cachedmethod(attrgetter('_cache'), key=partial(hashkey, 'pos_regex_matches'))
    def pos_regex_matches(self, pattern):
        """
        Extract sequences of consecutive tokens from a spacy-parsed doc whose
        part-of-speech tags match the specified regex pattern.

        Args:
            pattern (str): Pattern of consecutive POS tags whose corresponding words
                are to be extracted, inspired by the regex patterns used in NLTK's
                ``nltk.chunk.regexp``. Tags are uppercase, from the universal tag set;
                delimited by < and >, which are basically converted to parentheses
                with spaces as needed to correctly extract matching word sequences;
                white space in the input doesn't matter.

                Examples (see :obj:`POS_REGEX_PATTERNS <textacy.regexes_etc.POS_REGEX_PATTERNS>`):

                * noun phrase: r'<DET>? (<NOUN>+ <ADP|CONJ>)* <NOUN>+'
                * compound nouns: r'<NOUN>+'
                * verb phrase: r'<VERB>?<ADV>*<VERB>+'
                * prepositional phrase: r'<PREP> <DET>? (<NOUN>+<ADP>)* <NOUN>+'
        """
        return list(extract.pos_regex_matches(self.spacy_doc, pattern))

    @cachedmethod(attrgetter('_cache'), key=partial(hashkey, 'subject_verb_object_triples'))
    def subject_verb_object_triples(self):
        """
        Extract an *un*ordered list of distinct subject-verb-object (SVO) triples
        from doc.
        """
        return list(extract.subject_verb_object_triples(self.spacy_doc))

    @cachedmethod(attrgetter('_cache'), key=partial(hashkey, 'acronyms_and_definitions'))
    def acronyms_and_definitions(self, **kwargs):
        """
        Extract a collection of acronyms and their most likely definitions,
        if available, from doc. If multiple definitions are found for a given acronym,
        only the most frequently occurring definition is returned.

        .. seealso:: :func:`extract.acronyms_and_definitions() <textacy.extract.acronyms_and_definitions>`
        for all function kwargs.
        """
        return extract.acronyms_and_definitions(self.spacy_doc, **kwargs)

    @cachedmethod(attrgetter('_cache'), key=partial(hashkey, 'semistructured_statements'))
    def semistructured_statements(self, entity, **kwargs):
        """
        Extract "semi-structured statements" from doc, each as a (entity, cue, fragment)
        triple. This is similar to subject-verb-object triples.

        Args:
            entity (str): a noun or noun phrase of some sort (e.g. "President Obama",
                "global warming", "Python")

        .. seealso:: :func:`extract.semistructured_statements() <textacy.extract.semistructured_statements>`
        for all function kwargs.
        """
        return list(extract.semistructured_statements(
            self.spacy_doc, entity, **kwargs))

    @cachedmethod(attrgetter('_cache'), key=partial(hashkey, 'direct_quotations'))
    def direct_quotations(self):
        """
        Baseline, not-great attempt at direction quotation extraction (no indirect
        or mixed quotations) using rules and patterns. English only.
        """
        return list(extract.direct_quotations(self.spacy_doc))

    @cachedmethod(attrgetter('_cache'), key=partial(hashkey, 'key_terms'))
    def key_terms(self, algorithm='sgrank', n=10):
        """
        Extract key terms from a document using `algorithm`.

        Args:
            algorithm (str {'sgrank', 'textrank', 'singlerank'}, optional): name
                of algorithm to use for key term extraction
            n (int or float, optional): if int, number of top-ranked terms to return
                as keyterms; if float, must be in the open interval (0.0, 1.0),
                representing the fraction of top-ranked terms to return as keyterms

        Raises:
            ValueError: if ``algorithm`` not in {'sgrank', 'textrank', 'singlerank'}
        """
        if algorithm == 'sgrank':
            return keyterms.sgrank(self.spacy_doc, window_width=1500, n_keyterms=n)
        elif algorithm == 'textrank':
            return keyterms.textrank(self.spacy_doc, n_keyterms=n)
        elif algorithm == 'singlerank':
            return keyterms.singlerank(self.spacy_doc, n_keyterms=n)
        else:
            raise ValueError('algorithm {} not a valid option'.format(algorithm))

    ##############
    # STATISTICS #

    @cachedmethod(attrgetter('_cache'), key=partial(hashkey, 'term_counts'))
    def term_counts(self, lemmatize='auto', ngram_range=(1, 1),
                    include_nes=False, include_nps=False, include_kts=False):
        """
        Get the number of occurrences ("counts") of each unique term in doc;
        terms may be words, n-grams, named entities, noun phrases, and key terms.

        Args:
            lemmatize (bool or 'auto', optional): if True, lemmatize all terms
                when getting their frequencies; if 'auto', lemmatize all terms
                that aren't proper nouns or acronyms
            ngram_range (tuple(int), optional): (min n, max n) values for n-grams
                to include in terms list; default (1, 1) only includes unigrams
            include_nes (bool, optional): if True, include named entities in terms list
            include_nps (bool, optional): if True, include noun phrases in terms list
            include_kts (bool, optional): if True, include key terms in terms list

        Returns:
            :class:`collections.Counter() <collections.Counter>`: mapping of unique
                term ids to corresponding term counts
        """
        if lemmatize == 'auto':
            get_id = lambda x: self.spacy_stringstore[spacy_utils.normalized_str(x)]
        elif lemmatize is True:
            get_id = lambda x: self.spacy_stringstore[x.lemma_]
        else:
            get_id = lambda x: self.spacy_stringstore[x.text]

        for n in range(ngram_range[0], ngram_range[1] + 1):
            if n == 1:
                self._term_counts = self._term_counts | Counter(
                    get_id(word) for word in self.words())
            else:
                self._term_counts = self._term_counts | Counter(
                    get_id(ngram) for ngram in self.ngrams(n))
        if include_nes is True:
            self._term_counts = self._term_counts | Counter(
                get_id(ne) for ne in self.named_entities())
        if include_nps is True:
            self._term_counts = self._term_counts | Counter(
                get_id(np) for np in self.noun_chunks())
        if include_kts is True:
            # HACK: key terms are currently returned as strings
            # TODO: cache key terms, and return them as spacy spans
            get_id = lambda x: self.spacy_stringstore[x]
            self._term_counts = self._term_counts | Counter(
                get_id(kt) for kt, _ in self.key_terms())

        return self._term_counts

    def term_count(self, term):
        """
        Get the number of occurrences ("count") of term in doc.

        Args:
            term (str or ``spacy.Token`` or ``spacy.Span``)

        Returns:
            int
        """
        # figure out what object we're dealing with here; convert as necessary
        if isinstance(term, str):
            term_text = term
            term_id = self.spacy_stringstore[term_text]
            term_len = term_text.count(' ') + 1
        elif isinstance(term, spacy_token):
            term_text = spacy_utils.normalized_str(term)
            term_id = self.spacy_stringstore[term_text]
            term_len = 1
        elif isinstance(term, spacy_span):
            term_text = spacy_utils.normalized_str(term)
            term_id = self.spacy_stringstore[term_text]
            term_len = len(term)

        term_count_ = self._term_counts[term_id]
        if term_count_ > 0:
            return term_count_
        # have we not already counted the appropriate `n` n-grams?
        if not any(self.spacy_stringstore[t].count(' ') == term_len
                   for t in self._term_counts):
            get_id = lambda x: self.spacy_stringstore[spacy_utils.normalized_str(x)]
            if term_len == 1:
                self._term_counts += Counter(get_id(w) for w in self.words())
            else:
                self._term_counts += Counter(get_id(ng) for ng in self.ngrams(term_len))
            term_count_ = self._term_counts[term_id]
            if term_count_ > 0:
                return term_count_
        # last resort: try a regular expression
        return sum(1 for _ in re.finditer(re.escape(term_text), self.text))

    @property
    def n_tokens(self):
        """The number of tokens in the document -- including punctuation."""
        return len(self.spacy_doc)

    def n_words(self, filter_stops=False, filter_punct=True, filter_nums=False):
        """
        The number of words in the document, with optional filtering of stop words,
        punctuation (on by default), and numbers.
        """
        return len(self.words(filter_stops=filter_stops,
                              filter_punct=filter_punct,
                              filter_nums=filter_nums))

    @property
    def n_sents(self):
        """The number of sentences in the document."""
        return sum(1 for _ in self.spacy_doc.sents)

    def n_paragraphs(self, pattern=r'\n\n+'):
        """The number of paragraphs in the document, as delimited by ``pattern``."""
        return sum(1 for _ in re.finditer(pattern, self.text)) + 1

    @cachedmethod(attrgetter('_cache'), key=partial(hashkey, 'readability_stats'))
    def readability_stats(self):
        return text_stats.readability_stats(self)
コード例 #42
0
ファイル: detoursCache.py プロジェクト: akshah/netra
 def __init__(self,cachefilename,CACHE_SIZE,logger=logger('detoursCache.log')):
     self.lock = threading.RLock()
     self.cachefilename = cachefilename
     self.entry = LRUCache(maxsize=CACHE_SIZE)
     self.logger=logger
     self.hitcount=0
コード例 #43
0
ファイル: detoursCache.py プロジェクト: akshah/netra
class Cache():

    def __init__(self,cachefilename,CACHE_SIZE,logger=logger('detoursCache.log')):
        self.lock = threading.RLock()
        self.cachefilename = cachefilename
        self.entry = LRUCache(maxsize=CACHE_SIZE)
        self.logger=logger
        self.hitcount=0
       
    def hit(self):
        self.lock.acquire(blocking=1)
        try:
            self.hitcount+=1
        finally:
            self.lock.release()
            
    def reset(self):
        self.lock.acquire(blocking=1)
        try:
            self.hitcount=0
        finally:
            self.lock.release()
            
    def push(self,key,val):
        self.lock.acquire(blocking=1)
        try:
            self.entry[key]=val
        except:
            return
        finally:
            self.lock.release()
    
    def get(self,key):
        self.lock.acquire(blocking=1)
        try:
            return self.entry[key]
        except:
            return False
        finally:
            self.lock.release()
            
    def write_to_disk(self):
        self.lock.acquire(blocking=1)
        try:
            cachefile = open(self.cachefilename,'w')
            for key,val in self.entry.items():
                print(key+'\t'+val,file=cachefile)
            cachefile.close()
        finally:
            self.lock.release()
            
    def load_from_disk(self):
        self.lock.acquire(blocking=1)
        try:
            if os.path.exists(self.cachefilename):
                with open(self.cachefilename, 'r') as f:
                    for line in f:
                        if line == "":
                            continue
                        rline = line.strip()
                        splitvals=rline.split('\t')
                        if len(splitvals) == 2:
                            key=splitvals[0]
                            valstr=splitvals[1] 
                            self.entry[key]=valstr    
                        else:
                            continue
        except:
            self.logger.error("Failed to read existing cache file")
            raise("Error in loading previous cache file")
        
        finally:
            self.lock.release()
コード例 #44
0
ファイル: library.py プロジェクト: mopidy/mopidy-gmusic
class GMusicLibraryProvider(backend.LibraryProvider):
    root_directory = Ref.directory(uri='gmusic:directory', name='Google Music')

    def __init__(self, *args, **kwargs):
        super(GMusicLibraryProvider, self).__init__(*args, **kwargs)

        # tracks, albums, and artists here refer to what is explicitly
        # in our library.
        self.tracks = {}
        self.albums = {}
        self.artists = {}

        # aa_* caches are *only* used for temporary objects. Library
        # objects will never make it here.
        self.aa_artists = LRUCache(1024)
        self.aa_tracks = LRUCache(1024)
        self.aa_albums = LRUCache(1024)

        self._radio_stations_in_browse = (
            self.backend.config['gmusic']['radio_stations_in_browse'])
        self._radio_stations_count = (
            self.backend.config['gmusic']['radio_stations_count'])
        self._radio_tracks_count = (
            self.backend.config['gmusic']['radio_tracks_count'])

        self._top_tracks_count = (
            self.backend.config['gmusic']['top_tracks_count'])

        # Setup the root of library browsing.
        self._root = [
            Ref.directory(uri='gmusic:album', name='Albums'),
            Ref.directory(uri='gmusic:artist', name='Artists'),
            Ref.directory(uri='gmusic:track', name='Tracks')
        ]

        if self._radio_stations_in_browse:
            self._root.append(Ref.directory(uri='gmusic:radio',
                                            name='Radios'))

    @property
    def all_access(self):
        return self.backend.session.all_access

    def _browse_tracks(self):
        tracks = list(self.tracks.values())
        tracks.sort(key=lambda ref: ref.name)
        refs = []
        for track in tracks:
            refs.append(track_to_ref(track))
        return refs

    def _browse_albums(self):
        refs = []
        for album in self.albums.values():
            refs.append(album_to_ref(album))
        refs.sort(key=lambda ref: ref.name)
        return refs

    def _browse_album(self, uri):
        refs = []
        for track in self._lookup_album(uri):
            refs.append(track_to_ref(track, True))
        return refs

    def _browse_artists(self):
        refs = []
        for artist in self.artists.values():
            refs.append(artist_to_ref(artist))
        refs.sort(key=lambda ref: ref.name)
        return refs

    def _browse_artist(self, uri):
        refs = []
        for album in self._get_artist_albums(uri):
            refs.append(album_to_ref(album))
            refs.sort(key=lambda ref: ref.name)
        if len(refs) > 0:
            refs.insert(0, Ref.directory(uri=uri + ':all', name='All Tracks'))
            is_all_access = uri.startswith('gmusic:artist:A')
            if is_all_access:
                refs.insert(1, Ref.directory(uri=uri + ':top', name='Top Tracks'))
            return refs
        else:
            # Show all tracks if no album is available
            return self._browse_artist_all_tracks(uri)

    def _browse_artist_all_tracks(self, uri):
        artist_uri = ':'.join(uri.split(':')[:3])
        refs = []
        tracks = self._lookup_artist(artist_uri, True)
        for track in tracks:
            refs.append(track_to_ref(track))
        return refs

    def _browse_artist_top_tracks(self, uri):
        artist_uri = ':'.join(uri.split(':')[:3])
        refs = []
        tracks = self._get_artist_top_tracks(artist_uri)
        for track in tracks:
            refs.append(track_to_ref(track))
        return refs

    def _browse_radio_stations(self, uri):
        stations = self.backend.session.get_radio_stations(
            self._radio_stations_count)
        # create Ref objects
        refs = []
        for station in stations:
            refs.append(Ref.directory(uri='gmusic:radio:' + station['id'],
                                      name=station['name']))
        return refs

    def _browse_radio_station(self, uri):
        station_id = uri.split(':')[2]
        tracks = self.backend.session.get_station_tracks(
            station_id, self._radio_tracks_count)

        # create Ref objects
        refs = []
        for track in tracks:
            mopidy_track = self._to_mopidy_track(track)
            self.aa_tracks[mopidy_track.uri] = mopidy_track
            refs.append(track_to_ref(mopidy_track))
        return refs

    def browse(self, uri):
        logger.debug('browse: %s', str(uri))
        if not uri:
            return []
        if uri == self.root_directory.uri:
            return self._root

        parts = uri.split(':')

        # tracks
        if uri == 'gmusic:track':
            return self._browse_tracks()

        # albums
        if uri == 'gmusic:album':
            return self._browse_albums()

        # a single album
        # uri == 'gmusic:album:album_id'
        if len(parts) == 3 and parts[1] == 'album':
            return self._browse_album(uri)

        # artists
        if uri == 'gmusic:artist':
            return self._browse_artists()

        # a single artist
        # uri == 'gmusic:artist:artist_id'
        if len(parts) == 3 and parts[1] == 'artist':
            return self._browse_artist(uri)

        # all tracks of a single artist
        # uri == 'gmusic:artist:artist_id:all'
        if len(parts) == 4 and parts[1] == 'artist' and parts[3] == 'all':
            return self._browse_artist_all_tracks(uri)

        # top tracks of a single artist
        # uri == 'gmusic:artist:artist_id:top'
        if len(parts) == 4 and parts[1] == 'artist' and parts[3] == 'top':
            return self._browse_artist_top_tracks(uri)

        # all radio stations
        if uri == 'gmusic:radio':
            return self._browse_radio_stations(uri)

        # a single radio station
        # uri == 'gmusic:radio:station_id'
        if len(parts) == 3 and parts[1] == 'radio':
            return self._browse_radio_station(uri)

        logger.debug('Unknown uri for browse request: %s', uri)

        return []

    def lookup(self, uri):
        if uri.startswith('gmusic:track:'):
            return self._lookup_track(uri)
        elif uri.startswith('gmusic:album:'):
            return self._lookup_album(uri)
        elif uri.startswith('gmusic:artist:'):
            return self._lookup_artist(uri)
        else:
            return []

    def _lookup_track(self, uri):
        is_all_access = uri.startswith('gmusic:track:T')

        try:
            return [self.tracks[uri]]
        except KeyError:
            logger.debug('Track not a library track %r', uri)
            pass

        if is_all_access and self.all_access:
            track = self.aa_tracks.get(uri)
            if track:
                return [track]
            song = self.backend.session.get_track_info(uri.split(':')[2])
            if song is None:
                logger.warning('There is no song %r', uri)
                return []
            if 'artistId' not in song:
                logger.warning('Failed to lookup %r', uri)
                return []
            mopidy_track = self._to_mopidy_track(song)
            self.aa_tracks[mopidy_track.uri] = mopidy_track
            return [mopidy_track]
        else:
            return []

    def _lookup_album(self, uri):
        is_all_access = uri.startswith('gmusic:album:B')
        if self.all_access and is_all_access:
            tracks = self.aa_albums.get(uri)
            if tracks:
                return tracks
            album = self.backend.session.get_album_info(
                uri.split(':')[2], include_tracks=True)
            if album and album.get('tracks'):
                tracks = [self._to_mopidy_track(track)
                          for track in album['tracks']]
                for track in tracks:
                    self.aa_tracks[track.uri] = track
                tracks = sorted(tracks, key=lambda t: (t.disc_no, t.track_no))
                self.aa_albums[uri] = tracks
                return tracks

            logger.warning('Failed to lookup all access album %r: %r',
                           uri, album)

        # Even if the album has an all access ID, we need to look it
        # up here (as a fallback) because purchased tracks can have a
        # store ID, but only show up in your library.
        try:
            album = self.albums[uri]
        except KeyError:
            logger.debug('Failed to lookup %r', uri)
            return []

        tracks = self._find_exact(
            dict(album=album.name,
                 artist=[artist.name for artist in album.artists],
                 date=album.date)).tracks
        return sorted(tracks, key=lambda t: (t.disc_no, t.track_no))

    def _get_artist_top_tracks(self, uri):
        is_all_access = uri.startswith('gmusic:artist:A')
        artist_id = uri.split(':')[2]

        if not is_all_access:
            logger.debug("Top Tracks not available for non-all-access artists")
            return []

        artist_info = self.backend.session.get_artist_info(artist_id,
                                                           include_albums=False,
                                                           max_top_tracks=self._top_tracks_count,
                                                           max_rel_artist=0)
        top_tracks = []

        for track_dict in artist_info['topTracks']:
            top_tracks.append(self._to_mopidy_track(track_dict))

        return top_tracks

    def _get_artist_albums(self, uri):
        is_all_access = uri.startswith('gmusic:artist:A')

        artist_id = uri.split(':')[2]
        if is_all_access:
            # all access
            artist_infos = self.backend.session.get_artist_info(
                artist_id, max_top_tracks=0, max_rel_artist=0)
            if artist_infos is None or 'albums' not in artist_infos:
                return []
            albums = []
            for album in artist_infos['albums']:
                albums.append(
                    self._aa_search_album_to_mopidy_album({'album': album}))
            return albums
        elif self.all_access and artist_id in self.aa_artists:
            albums = self._get_artist_albums(
                'gmusic:artist:%s' % self.aa_artists[artist_id])
            if len(albums) > 0:
                return albums
            # else fall back to non aa albums
        if uri in self.artists:
            artist = self.artists[uri]
            return [album for album in self.albums.values()
                    if artist in album.artists]
        else:
            logger.debug('0 albums available for artist %r', uri)
            return []

    def _lookup_artist(self, uri, exact_match=False):
        def sorter(track):
            return (
                track.album.date,
                track.album.name,
                track.disc_no,
                track.track_no,
            )

        if self.all_access:
            try:
                all_access_id = self.aa_artists[uri.split(':')[2]]
                artist_infos = self.backend.session.get_artist_info(
                    all_access_id, max_top_tracks=0, max_rel_artist=0)
                if not artist_infos or not artist_infos['albums']:
                    logger.warning('Failed to lookup %r', artist_infos)
                tracks = [
                    self._lookup_album('gmusic:album:' + album['albumId'])
                    for album in artist_infos['albums']]
                tracks = reduce(lambda a, b: (a + b), tracks)
                return sorted(tracks, key=sorter)
            except KeyError:
                pass
        try:
            artist = self.artists[uri]
        except KeyError:
            logger.debug('Failed to lookup %r', uri)
            return []

        tracks = self._find_exact(
            dict(artist=artist.name)).tracks
        if exact_match:
            tracks = filter(lambda t: artist in t.artists, tracks)
        return sorted(tracks, key=sorter)

    def refresh(self, uri=None):
        self.tracks = {}
        self.albums = {}
        self.artists = {}

        album_tracks = {}
        for track in self.backend.session.get_all_songs():
            mopidy_track = self._to_mopidy_track(track)

            self.tracks[mopidy_track.uri] = mopidy_track
            self.albums[mopidy_track.album.uri] = mopidy_track.album

            # We don't care about the order because we're just using
            # this as a temporary variable to grab the proper album
            # artist out of the album.
            if mopidy_track.album.uri not in album_tracks:
                album_tracks[mopidy_track.album.uri] = []

            album_tracks[mopidy_track.album.uri].append(mopidy_track)

        # Yes, this is awful. No, I don't have a better solution. Yes,
        # I'm annoyed at Google for not providing album artist IDs.
        for album in self.albums.values():
            artist_found = False
            for album_artist in album.artists:
                for track in album_tracks[album.uri]:
                    for artist in track.artists:
                        if album_artist.name == artist.name:
                            artist_found = True
                            self.artists[artist.uri] = artist

            if not artist_found:
                for artist in album.artists:
                    self.artists[artist.uri] = artist

    def search(self, query=None, uris=None, exact=False):
        if exact:
            return self._find_exact(query=query, uris=uris)

        lib_tracks, lib_artists, lib_albums = self._search_library(query, uris)

        if query:
            aa_tracks, aa_artists, aa_albums = self._search(query, uris)
            for aa_artist in aa_artists:
                lib_artists.add(aa_artist)

            for aa_album in aa_albums:
                lib_albums.add(aa_album)

            lib_tracks = set(lib_tracks)

            for aa_track in aa_tracks:
                lib_tracks.add(aa_track)

        return SearchResult(uri='gmusic:search',
                            tracks=lib_tracks,
                            artists=lib_artists,
                            albums=lib_albums)

    def _find_exact(self, query=None, uris=None):
        # Find exact can only be done on gmusic library,
        # since one can't filter all access searches
        lib_tracks, lib_artists, lib_albums = self._search_library(query, uris)

        return SearchResult(uri='gmusic:search',
                            tracks=lib_tracks,
                            artists=lib_artists,
                            albums=lib_albums)

    def _search(self, query=None, uris=None):
        for (field, values) in query.iteritems():
            if not hasattr(values, '__iter__'):
                values = [values]

            # Since gmusic does not support search filters, just search for the
            # first 'searchable' filter
            if field in [
                    'track_name', 'album', 'artist', 'albumartist', 'any']:
                logger.info(
                    'Searching Google Play Music for: %s',
                    values[0])
                res = self.backend.session.search(values[0], max_results=50)
                if res is None:
                    return [], [], []

                albums = [
                    self._aa_search_album_to_mopidy_album(album_res)
                    for album_res in res['album_hits']]
                artists = [
                    self._aa_search_artist_to_mopidy_artist(artist_res)
                    for artist_res in res['artist_hits']]
                tracks = [
                    self._aa_search_track_to_mopidy_track(track_res)
                    for track_res in res['song_hits']]

                return tracks, artists, albums

        return [], [], []

    def _search_library(self, query=None, uris=None):
        if query is None:
            query = {}
        self._validate_query(query)
        result_tracks = self.tracks.values()

        for (field, values) in query.iteritems():
            if not hasattr(values, '__iter__'):
                values = [values]
            # FIXME this is bound to be slow for large libraries
            for value in values:
                if field == 'track_no':
                    q = self._convert_to_int(value)
                else:
                    q = value.strip().lower()

                def uri_filter(track):
                    return q in track.uri.lower()

                def track_name_filter(track):
                    return q in track.name.lower()

                def album_filter(track):
                    return q in getattr(track, 'album', Album()).name.lower()

                def artist_filter(track):
                    return (
                        any(q in a.name.lower() for a in track.artists) or
                        albumartist_filter(track))

                def albumartist_filter(track):
                    album_artists = getattr(track, 'album', Album()).artists
                    return any(q in a.name.lower() for a in album_artists)

                def track_no_filter(track):
                    return track.track_no == q

                def date_filter(track):
                    return track.date and track.date.startswith(q)

                def any_filter(track):
                    return any([
                        uri_filter(track),
                        track_name_filter(track),
                        album_filter(track),
                        artist_filter(track),
                        albumartist_filter(track),
                        date_filter(track),
                    ])

                if field == 'uri':
                    result_tracks = filter(uri_filter, result_tracks)
                elif field == 'track_name':
                    result_tracks = filter(track_name_filter, result_tracks)
                elif field == 'album':
                    result_tracks = filter(album_filter, result_tracks)
                elif field == 'artist':
                    result_tracks = filter(artist_filter, result_tracks)
                elif field == 'albumartist':
                    result_tracks = filter(albumartist_filter, result_tracks)
                elif field == 'track_no':
                    result_tracks = filter(track_no_filter, result_tracks)
                elif field == 'date':
                    result_tracks = filter(date_filter, result_tracks)
                elif field == 'any':
                    result_tracks = filter(any_filter, result_tracks)
                else:
                    raise LookupError('Invalid lookup field: %s' % field)

        result_artists = set()
        result_albums = set()
        for track in result_tracks:
            result_artists |= track.artists
            result_albums.add(track.album)

        return result_tracks, result_artists, result_albums

    def _validate_query(self, query):
        for (_, values) in query.iteritems():
            if not values:
                raise LookupError('Missing query')
            for value in values:
                if not value:
                    raise LookupError('Missing query')

    def _to_mopidy_track(self, song):
        track_id = song.get('id', song.get('nid'))
        if track_id is None:
            raise ValueError
        if track_id[0] != "T" and "-" not in track_id:
            track_id = "T"+track_id
        return Track(
            uri='gmusic:track:' + track_id,
            name=song['title'],
            artists=[self._to_mopidy_artist(song)],
            album=self._to_mopidy_album(song),
            track_no=song.get('trackNumber', 1),
            disc_no=song.get('discNumber', 1),
            date=unicode(song.get('year', 0)),
            length=int(song['durationMillis']),
            bitrate=320)

    def _to_mopidy_album(self, song):
        name = song.get('album', '')
        artist = self._to_mopidy_album_artist(song)
        date = unicode(song.get('year', 0))

        album_id = song.get('albumId')
        if album_id is None:
            album_id = create_id(artist.name + name + date)

        uri = 'gmusic:album:' + album_id
        images = get_images(song)
        return Album(
            uri=uri,
            name=name,
            artists=[artist],
            num_tracks=song.get('totalTrackCount'),
            num_discs=song.get('totalDiscCount'),
            date=date,
            images=images)

    def _to_mopidy_artist(self, song):
        name = song.get('artist', '')
        artist_id = song.get('artistId')
        if artist_id is not None:
            artist_id = artist_id[0]
        else:
            artist_id = create_id(name)
        uri = 'gmusic:artist:' + artist_id
        return Artist(uri=uri, name=name)

    def _to_mopidy_album_artist(self, song):
        name = song.get('albumArtist', '')
        if name.strip() == '':
            name = song.get('artist', '')
        uri = 'gmusic:artist:' + create_id(name)
        return Artist(uri=uri, name=name)

    def _aa_search_track_to_mopidy_track(self, search_track):
        track = search_track['track']

        aa_artist_id = create_id(track['artist'])
        if 'artistId' in track:
            aa_artist_id = track['artistId'][0]
        else:
            logger.warning('No artistId for Track %r', track)

        artist = Artist(
            uri='gmusic:artist:' + aa_artist_id,
            name=track['artist'])

        album = Album(
            uri='gmusic:album:' + track['albumId'],
            name=track['album'],
            artists=[artist],
            date=unicode(track.get('year', 0)))

        return Track(
            uri='gmusic:track:' + track['storeId'],
            name=track['title'],
            artists=[artist],
            album=album,
            track_no=track.get('trackNumber', 1),
            disc_no=track.get('discNumber', 1),
            date=unicode(track.get('year', 0)),
            length=int(track['durationMillis']),
            bitrate=320)

    def _aa_search_artist_to_mopidy_artist(self, search_artist):
        artist = search_artist['artist']
        uri = 'gmusic:artist:' + artist['artistId']
        return Artist(uri=uri, name=artist['name'])

    def _aa_search_album_to_mopidy_album(self, search_album):
        album = search_album['album']
        uri = 'gmusic:album:' + album['albumId']
        name = album['name']
        artist = self._aa_search_artist_album_to_mopidy_artist_album(album)
        date = unicode(album.get('year', 0))
        return Album(
            uri=uri,
            name=name,
            artists=[artist],
            date=date)

    def _aa_search_artist_album_to_mopidy_artist_album(self, album):
        name = album.get('albumArtist', '')
        if name.strip() == '':
            name = album.get('artist', '')
        uri = 'gmusic:artist:' + create_id(name)
        return Artist(uri=uri, name=name)

    def _convert_to_int(self, string):
        try:
            return int(string)
        except ValueError:
            return object()
コード例 #45
0
ファイル: backend_z3.py プロジェクト: iamahuman/claripy
 def popitem(self):
     key, val = LRUCache.popitem(self)
     if self._evict:
         self._evict(key, val)
     return key, val
コード例 #46
0
class TelegramBot:
  def __init__(self, token, name):
    self.name = name
    self.token = token
    self.agent = Agent(reactor)
    self.last_update_id = -1
    self.message_handlers = []
    self.message_subscribers = LRUCache(maxsize=10000)
    self.message_prehandlers = []
    self.message_next_handlers = LRUCache(maxsize=1000)
    self.retry_update = 0
    self.running = False
    self.inline_query_handler = None
    self.callback_query_handler = None
    self.chosen_inline_result_handler = None
    self.botan = None

  def method_url(self, method):
    return API_URL + 'bot' + self.token + '/' + method

  def start_update(self):
    self.running = True

    @inlineCallbacks
    def update_bot():
      if not self.running:
        return

      try:
        yield self.get_update()

        self.retry_update = 0
        reactor.callWhenRunning(update_bot)
      except:
        log.failure("Couldn't get updates. Delaying for %d seconds" % self.retry_update)
        reactor.callLater(self.retry_update, update_bot)
        self.retry_update = min(self.retry_update + 3, 20)

    reactor.callWhenRunning(update_bot)

  def stop_update(self):
    self.running = False

  @inlineCallbacks
  def get_update(self):
    payload = {'timeout': 20, 'offset': self.last_update_id + 1}
    updates = yield _request(self.token, 'getUpdates', params=payload, timeout=25)

    new_messages_ids = set()
    new_messages = []
    for update in updates:
      log.debug("New update. ID: {update_id}", update_id=update['update_id'])
      if update['update_id'] > self.last_update_id:
        self.last_update_id = update['update_id']

      if 'inline_query' in update.keys():
        inline_query = InlineQuery.de_json(update['inline_query'])
        self.process_inline_query(inline_query)
      elif 'chosen_inline_result' in update.keys():
        chosen_inline_result = ChosenInlineResult.de_json(update['chosen_inline_result'])
        self.process_chosen_inline_query(chosen_inline_result)
      elif 'message' in update.keys():
        msg = Message.de_json(update['message'])
        msg.bot_name = self.name
        if not msg.from_user.id in new_messages_ids:
          new_messages.append(msg)
          new_messages_ids.add(msg.from_user.id)
      elif 'callback_query' in update.keys():
        callback_query = CallbackQuery.de_json(update['callback_query'])
        self.process_callback_query(callback_query)
      else:
        log.debug("Unknown update type: {update}",
                  update=json.dumps(update, skipkeys=True, ensure_ascii=False, default=lambda o: o.__dict__))

    if len(new_messages) > 0:
      self.process_new_messages(new_messages)

  def process_callback_query(self, callback_query):
    if self.callback_query_handler:
      self.callback_query_handler(callback_query, self)

  def process_new_messages(self, new_messages):
    self._notify_message_prehandlers(new_messages)

    not_processed = []
    for message in new_messages:
      if not self._notify_message_next_handler(message):
        not_processed.append(message)
    new_messages = not_processed

    self._notify_command_handlers(new_messages)
    self._notify_message_subscribers(new_messages)

  def process_inline_query(self, inline_query):
    if self.inline_query_handler:
      self.inline_query_handler(inline_query, self)

  def process_chosen_inline_query(self, chosen_inline_result):
    if self.chosen_inline_result_handler:
      self.chosen_inline_result_handler(chosen_inline_result, self)

  def _notify_message_prehandlers(self, new_messages):
    for message in new_messages:
      for handler in self.message_prehandlers:
        handler(message, self)

  def _notify_command_handlers(self, new_messages):
    for message in new_messages:
      for message_handler in self.message_handlers:
        if self._test_message_handler(message_handler, message):
          message_handler['function'](message, self)
          break

  def _notify_message_subscribers(self, new_messages):
    for message in new_messages:
      if not hasattr(message, 'reply_to_message'):
        continue

      handler = self.message_subscribers.pop(message.reply_to_message.message_id, None)
      if handler is not None:
        handler(message, self)

  def _notify_message_next_handler(self, message):
    handler = self.message_next_handlers.pop(message.chat.id, None)
    if handler is not None:
      handler(message, self)
      return True
    return False

  def register_message_handler(self, fn, commands=None, regexp=None, func=None, content_types=None):
    if not content_types:
      content_types = ['text']
    func_dict = {'function': fn, 'content_types': content_types}
    if regexp:
      func_dict['regexp'] = regexp if 'text' in content_types else None
    if func:
      func_dict['lambda'] = func
    if commands:
      func_dict['commands'] = commands if 'text' in content_types else None
    self.message_handlers.append(func_dict)

  def message_handler(self, commands=None, regexp=None, func=None, content_types=None):
    def decorator(fn):
      self.register_message_handler(fn, commands, regexp, func, content_types)
      return fn

    return decorator

  @staticmethod
  def _test_message_handler(message_handler, message):
    if message.content_type not in message_handler['content_types']:
      return False
    if 'commands' in message_handler and message.content_type == 'text':
      cmd = extract_command(message.text)
      if cmd:
        for command_pattern in message_handler['commands']:
          if not command_pattern.endswith('$'):
            command_pattern += '$'
          if re.match(command_pattern, cmd):
            return True
        return False
    if 'regexp' in message_handler \
        and message.content_type == 'text' \
        and re.search(message_handler['regexp'], message.text):
      return True
    if 'lambda' in message_handler:
      return message_handler['lambda'](message)
    return False

  @inlineCallbacks
  def send_message(self, chat_id, text,
                   disable_web_page_preview=None,
                   reply_to_message_id=None,
                   reply_markup=None,
                   parse_mode=None):
    method = r'sendMessage'

    payload = {'chat_id': str(chat_id), 'text': text}
    if disable_web_page_preview:
      payload['disable_web_page_preview'] = disable_web_page_preview
    if reply_to_message_id:
      payload['reply_to_message_id'] = reply_to_message_id
    if reply_markup:
      if isinstance(reply_markup, JsonSerializable):
        payload['reply_markup'] = reply_markup.to_json()
      elif isinstance(reply_markup, dict):
        payload['reply_markup'] = json.dumps(reply_markup)
    if parse_mode:
      payload['parse_mode'] = parse_mode
    request = yield _request(self.token, method, 'POST', params=payload)
    returnValue(Message.de_json(request))

  @inlineCallbacks
  def answer_to_inline_query(self, query_id, results, personal=False):
    request = yield _request(self.token, 'answerInlineQuery', 'POST', params={
      'inline_query_id': str(query_id),
      'results': json.dumps(results, ensure_ascii=False),
      'is_personal': personal
    })
    returnValue(request)

  @inlineCallbacks
  def edit_message_text(self, chat_id, message_id, text,
                        parse_mode=None,
                        disable_web_page_preview=None,
                        reply_markup=None):
    method = r'editMessageText'

    payload = {'chat_id': str(chat_id), 'message_id': str(message_id), 'text': text}
    if disable_web_page_preview:
      payload['disable_web_page_preview'] = disable_web_page_preview
    if reply_markup:
      if isinstance(reply_markup, JsonSerializable):
        payload['reply_markup'] = reply_markup.to_json()
      elif isinstance(reply_markup, dict):
        payload['reply_markup'] = json.dumps(reply_markup)
    if parse_mode:
      payload['parse_mode'] = parse_mode
    request = yield _request(self.token, method, 'POST', params=payload)
    returnValue(Message.de_json(request))

  @inlineCallbacks
  def answer_callback_query(self, callback_query_id,
                            text=None,
                            show_alert=None):
    method = r'answerCallbackQuery'

    payload = {'callback_query_id': str(callback_query_id)}
    if text:
      payload['text'] = text
    if show_alert:
      payload['show_alert'] = show_alert
    request = yield _request(self.token, method, 'POST', params=payload)
    returnValue(request)

  @inlineCallbacks
  def get_file(self, file_id):
    method = r'getFile'

    payload = {'file_id': str(file_id)}

    request = yield _request(self.token, method, 'POST', params=payload)
    returnValue(File.de_json(request))


  def get_file_url(self, file):
    return "https://api.telegram.org/file/bot%s/%s" % (self.token, file.path)


  @inlineCallbacks
  def send_audio(self, chat_id, audio,
                 filename='audio',
                 duration=None,
                 performer=None,
                 title=None,
                 reply_to_message_id=None,
                 reply_markup=None,
                 timeout=30):
    method = r'sendAudio'

    payload = {'chat_id': chat_id}
    files = None
    if not is_string(audio):
      files = {'audio': (filename, audio)}
    else:
      payload['audio'] = audio
    if duration:
      payload['duration'] = duration
    if performer:
      payload['performer'] = performer
    if title:
      payload['title'] = title
    if reply_to_message_id:
      payload['reply_to_message_id'] = reply_to_message_id
    # if reply_markup:
    #   payload['reply_markup'] = _convert_markup(reply_markup)

    request = yield _request(self.token, method, 'POST', params=payload, files=files, timeout=timeout)
    returnValue(Message.de_json(request))

  def reply_to(self, message, text, **kwargs):
    return self.send_message(message.chat.id, text, reply_to_message_id=message.message_id, **kwargs)

  def send_chat_action(self, chat_id, action):
    method = r'sendChatAction'

    payload = {'chat_id': chat_id, 'action': action}
    return _make_request(self.token, method, 'POST', params=payload)

  def register_for_reply(self, message, callback):
    self.message_subscribers[message.message_id] = callback

  def register_next_chat_handler(self, chat_id, callback):
    self.message_next_handlers[chat_id] = callback
コード例 #47
0
ファイル: _core.py プロジェクト: sherwoodwang/dispatching-dns
class ProxyResolver(BaseResolver):
    _Record = recordclass('_Record', 'expire data')

    def __init__(self, upstreams, cache_size=None):
        super().__init__()
        self._upstreams = upstreams

        self._cache = LRUCache(cache_size) if cache_size else None
        self._cache_lock = RLock()

    def _query_cache(self, key):
        with self._cache_lock:
            records = self._cache.get(key, None)
            if records is None:
                records = []
            else:
                records = [record for record in records if record.expire > time()]
                if len(records):
                    records = records[1:] + [records[0]]
                    self._cache[key] = records
                else:
                    del self._cache[key]
            return records

    def _add_to_cache(self, key, record):
        if self._cache is None:
            return

        with self._cache_lock:
            records = self._cache.get(key, None)
            if records is None:
                self._cache[key] = [record]
            else:
                for erecord in records:
                    if erecord.data == record.data:
                        erecord.expire = record.expire
                        break
                else:
                    records.append(record)

    def _resolve_in_cache(self, questions, oq, oa, now):
        for q in questions:
            key = (q.qname, QTYPE.CNAME, q.qclass)
            cnames = self._query_cache(key)
            if len(cnames):
                recursive_questions = []
                for cname in cnames:
                    oa.add_answer(RR(ttl=max(cname.expire - now, 0), **cname.data))
                    recursive_questions.append(DNSQuestion(
                        qname=cname.data['rdata'].label,
                        qtype=q.qtype,
                        qclass=q.qclass
                    ))

                    self._resolve_in_cache(recursive_questions, oq, oa, now)
            else:
                if q.qtype != QTYPE.CNAME:
                    key = (q.qname, q.qtype, q.qclass)
                    record_list = self._query_cache(key)
                    if len(record_list):
                        for record in record_list:
                            oa.add_answer(RR(ttl=max(record.expire - now, 0), **record.data))
                    else:
                        oq.add_question(q)
                else:
                    oq.add_question(q)

    def resolve(self, request, handler):
        now = int(time())
        a = request.reply()

        uq = DNSRecord()
        self._resolve_in_cache(request.questions, uq, a, now)

        if len(uq.questions):
            for upstream in self._upstreams:
                try:
                    ua_pkt = uq.send(
                        str(upstream.address),
                        upstream.port,
                        upstream.tcp,
                        upstream.timeout,
                        upstream.ipv6
                    )
                    ua = DNSRecord.parse(ua_pkt)
                except:
                    continue

                for rr in ua.rr:
                    key = (rr.rname, rr.rtype, rr.rclass)
                    cr = self._Record(now + rr.ttl, {
                        'rname': rr.rname,
                        'rtype': rr.rtype,
                        'rclass': rr.rclass,
                        'rdata': rr.rdata,
                    })
                    self._add_to_cache(key, cr)
                a.add_answer(*ua.rr)
                break
            else:
                raise IOError

        return a
コード例 #48
0
ファイル: backend_z3.py プロジェクト: iamahuman/claripy
 def __init__(self, maxsize, getsizeof=None, evict=None):
     LRUCache.__init__(self, maxsize, getsizeof)
     self._evict = evict
コード例 #49
0
class MetricCassandraRepository(abstract_repository.AbstractCassandraRepository):
    def __init__(self):
        super(MetricCassandraRepository, self).__init__()

        self._lock = threading.RLock()

        LOG.debug("prepare cql statements...")

        self._measurement_insert_stmt = self._session.prepare(MEASUREMENT_INSERT_CQL)
        self._measurement_insert_stmt.is_idempotent = True

        self._measurement_update_stmt = self._session.prepare(MEASUREMENT_UPDATE_CQL)
        self._measurement_update_stmt.is_idempotent = True

        self._metric_insert_stmt = self._session.prepare(METRICS_INSERT_CQL)
        self._metric_insert_stmt.is_idempotent = True

        self._metric_update_stmt = self._session.prepare(METRICS_UPDATE_CQL)
        self._metric_update_stmt.is_idempotent = True

        self._dimension_stmt = self._session.prepare(DIMENSION_INSERT_CQL)
        self._dimension_stmt.is_idempotent = True

        self._dimension_metric_stmt = self._session.prepare(DIMENSION_METRIC_INSERT_CQL)
        self._dimension_metric_stmt.is_idempotent = True

        self._metric_dimension_stmt = self._session.prepare(METRIC_DIMENSION_INSERT_CQL)
        self._metric_dimension_stmt.is_idempotent = True

        self._retrieve_metric_dimension_stmt = self._session.prepare(RETRIEVE_METRIC_DIMENSION_CQL)

        self._metric_batch = MetricBatch(
            self._cluster.metadata,
            self._cluster.load_balancing_policy,
            self._max_batches)

        self._metric_id_cache = LRUCache(self._cache_size)
        self._dimension_cache = LRUCache(self._cache_size)
        self._metric_dimension_cache = LRUCache(self._cache_size)

        self._load_dimension_cache()
        self._load_metric_dimension_cache()

    def process_message(self, message):
        (dimensions, metric_name, region, tenant_id, time_stamp, value,
         value_meta) = parse_measurement_message(message)

        with self._lock:
            dim_names = []
            dim_list = []
            for name in sorted(dimensions.iterkeys()):
                dim_list.append('%s\t%s' % (name, dimensions[name]))
                dim_names.append(name)

            hash_string = '%s\0%s\0%s\0%s' % (region, tenant_id, metric_name, '\0'.join(dim_list))
            metric_id = hashlib.sha1(hash_string.encode('utf8')).hexdigest()

            metric = Metric(id=metric_id,
                            region=region,
                            tenant_id=tenant_id,
                            name=metric_name,
                            dimension_list=dim_list,
                            dimension_names=dim_names,
                            time_stamp=time_stamp,
                            value=value,
                            value_meta=json.dumps(value_meta, ensure_ascii=False))

            id_bytes = bytearray.fromhex(metric.id)
            if self._metric_id_cache.get(metric.id, None):
                measurement_bound_stmt = self._measurement_update_stmt.bind((self._retention,
                                                                             metric.value,
                                                                             metric.value_meta,
                                                                             id_bytes,
                                                                             metric.time_stamp))
                self._metric_batch.add_measurement_query(measurement_bound_stmt)

                metric_update_bound_stmt = self._metric_update_stmt.bind((self._retention,
                                                                          metric.time_stamp,
                                                                          metric.region,
                                                                          metric.tenant_id,
                                                                          metric.name,
                                                                          metric.dimension_list,
                                                                          metric.dimension_names))
                self._metric_batch.add_metric_query(metric_update_bound_stmt)

                return metric

            self._metric_id_cache[metric.id] = metric.id

            metric_insert_bound_stmt = self._metric_insert_stmt.bind((self._retention,
                                                                      id_bytes,
                                                                      metric.time_stamp,
                                                                      metric.time_stamp,
                                                                      metric.region,
                                                                      metric.tenant_id,
                                                                      metric.name,
                                                                      metric.dimension_list,
                                                                      metric.dimension_names))
            self._metric_batch.add_metric_query(metric_insert_bound_stmt)

            for dim in metric.dimension_list:
                (name, value) = dim.split('\t')
                dim_key = self._get_dimnesion_key(metric.region, metric.tenant_id, name, value)
                if not self._dimension_cache.get(dim_key, None):
                    dimension_bound_stmt = self._dimension_stmt.bind((metric.region,
                                                                      metric.tenant_id,
                                                                      name,
                                                                      value))
                    self._metric_batch.add_dimension_query(dimension_bound_stmt)
                    self._dimension_cache[dim_key] = dim_key

                metric_dim_key = self._get_metric_dimnesion_key(
                    metric.region, metric.tenant_id, metric.name, name, value)
                if not self._metric_dimension_cache.get(metric_dim_key, None):
                    dimension_metric_bound_stmt = self._dimension_metric_stmt.bind(
                        (metric.region, metric.tenant_id, name, value, metric.name))
                    self._metric_batch.add_dimension_metric_query(dimension_metric_bound_stmt)

                    metric_dimension_bound_stmt = self._metric_dimension_stmt.bind(
                        (metric.region, metric.tenant_id, metric.name, name, value))
                    self._metric_batch.add_metric_dimension_query(metric_dimension_bound_stmt)

                    self._metric_dimension_cache[metric_dim_key] = metric_dim_key

            measurement_insert_bound_stmt = self._measurement_insert_stmt.bind(
                (self._retention,
                 metric.value,
                 metric.value_meta,
                 metric.region,
                 metric.tenant_id,
                 metric.name,
                 metric.dimension_list,
                 id_bytes,
                 metric.time_stamp))
            self._metric_batch.add_measurement_query(measurement_insert_bound_stmt)

            return metric

    def write_batch(self, metrics):

        with self._lock:
            batch_list = self._metric_batch.get_all_batches()

            results = execute_concurrent(self._session, batch_list, raise_on_first_error=True)

            self._handle_results(results)

            self._metric_batch.clear()

            LOG.info("flushed %s metrics", len(metrics))

    @staticmethod
    def _handle_results(results):
        for (success, result) in results:
            if not success:
                raise result

    def _load_dimension_cache(self):

        rows = self._session.execute(RETRIEVE_DIMENSION_CQL)

        if not rows:
            return

        for row in rows:
            key = self._get_dimnesion_key(row.region, row.tenant_id, row.name, row.value)
            self._dimension_cache[key] = key

        LOG.info(
            "loaded %s dimension entries cache from database into cache." %
            self._dimension_cache.currsize)

    @staticmethod
    def _get_dimnesion_key(region, tenant_id, name, value):
        return '%s\0%s\0%s\0%s' % (region, tenant_id, name, value)

    def _load_metric_dimension_cache(self):
        qm = token_range_query_manager.TokenRangeQueryManager(RETRIEVE_METRIC_DIMENSION_CQL,
                                                              self._process_metric_dimension_query)

        token_ring = self._cluster.metadata.token_map.ring

        qm.query(token_ring)

    def _process_metric_dimension_query(self, rows):

        cnt = 0
        for row in rows:
            key = self._get_metric_dimnesion_key(
                row.region,
                row.tenant_id,
                row.metric_name,
                row.dimension_name,
                row.dimension_value)
            self._metric_dimension_cache[key] = key
            cnt += 1

        LOG.info("loaded %s metric dimension entries from database into cache." % cnt)
        LOG.info(
            "total loaded %s metric dimension entries in cache." %
            self._metric_dimension_cache.currsize)

    @staticmethod
    def _get_metric_dimnesion_key(region, tenant_id, metric_name, dimension_name, dimension_value):

        return '%s\0%s\0%s\0%s\0%s' % (region, tenant_id, metric_name,
                                       dimension_name, dimension_value)
コード例 #50
0
    def __init__(self, max_dim):
        """

        :param max_dim:
        """
        self.cache = LRUCache(maxsize=max_dim)
コード例 #51
0
 def popitem(self):
     key, val = LRUCache.popitem(self)
     val.Reset()
     return key, val