Beispiel #1
0
    def __init__(self):
        """ Cache of alias """
        # TODO: un-hardcode this
        self.alias_cache = LRU(50000)
        """ Cache of EE info """
        self.ee_info_map = None
        """ GRPC clients """
        self.ready_clients = dict()
        """ Cache of classes. TODO: is it used? -> Yes, in StubUtils and ClientObjectLoader"""
        self.local_available_classes = dict()
        """  Heap manager. Since it is abstract it must be initialized by sub-classes. 
        DataClay-Java uses abstract functions to get the field in the proper type (EE or client) 
        due to type-check. Not needed here. """
        self.dataclay_heap_manager = None
        """ Object loader. """
        self.dataclay_object_loader = None
        """  Locker Pool in runtime. This pool is used to provide thread-safe implementations in dataClay. """
        self.locker_pool = LockerPool()
        """ Indicates if runtime was initialized. TODO: check if same in dataclay.api -> NO """
        self.__initialized = False
        """ Indicates volatiles being send - to avoid race-conditions """
        self.volatile_parameters_being_send = set()

        # Local info of thread
        self.thread_local_info = threadLocal
        """ Cache of metadata """
        self.metadata_cache = LRU(50)
Beispiel #2
0
    def __init__(self, pdffile=None, loadfile=None):
        GObject.GObject.__init__(self)

        self.pdffile = pdffile
        self.loadfile = loadfile
        self.header_text = "HEADER TEXT"
        self._boxes = []
        self._rendered_boxes = LRU(200)
        self._rendered_pages = LRU(5)

        self._box_render_process = GProcess(target=self._box_render_proc,
                                            childcb=self._box_rendered_wakeup)
        self._page_render_process = GProcess(
            target=self._page_render_proc, childcb=self._page_rendered_wakeup)

        # These are only for the main process, to prevent an item from being
        # queued twice.
        self._box_render_queue = []
        self._page_render_queue = []

        self._box_render_pipe_p, self._box_render_pipe_c = multiprocessing.Pipe(
        )
        self._page_render_pipe_p, self._page_render_pipe_c = multiprocessing.Pipe(
        )

        if loadfile:
            self._load_from_file()

        self.document = \
         Poppler.Document.new_from_file('file://' + self.pdffile, None)

        self._box_render_process.start()
        self._page_render_process.start()
Beispiel #3
0
    def test_callback(self):

        counter = [0]

        first_key = 'a'
        first_value = 1

        def callback(key, value):
            self.assertEqual(key, first_key)
            self.assertEqual(value, first_value)
            counter[0] += 1

        l = LRU(1, callback=callback)
        l[first_key] = first_value
        l['b'] = 1  # test calling the callback

        self.assertEqual(counter[0], 1)
        self.assertEqual(l.keys(), ['b'])

        l['b'] = 2  # doesn't call callback
        self.assertEqual(counter[0], 1)
        self.assertEqual(l.keys(), ['b'])
        self.assertEqual(l.values(), [2])

        l = LRU(1, callback=callback)
        l[first_key] = first_value

        l.set_callback(None)
        l['c'] = 1  # doesn't call callback
        self.assertEqual(counter[0], 1)
        self.assertEqual(l.keys(), ['c'])

        l.set_callback(callback)
        del l['c']  # doesn't call callback
        self.assertEqual(l.keys(), [])
Beispiel #4
0
    def test_lru(self):
        l = LRU(1)
        l['a'] = 1
        l['a']
        self.assertEqual(l.keys(), ['a'])
        l['b'] = 2
        self.assertEqual(l.keys(), ['b'])

        l = LRU(2)
        l['a'] = 1
        l['b'] = 2
        self.assertEqual(len(l), 2)
        l['a']                  # Testing the first one
        l['c'] = 3
        self.assertEqual(sorted(l.keys()), ['a', 'c'])
        l['c']
        self.assertEqual(sorted(l.keys()), ['a', 'c'])

        l = LRU(3)
        l['a'] = 1
        l['b'] = 2
        l['c'] = 3
        self.assertEqual(len(l), 3)
        l['b']                  # Testing the middle one
        l['d'] = 4
        self.assertEqual(sorted(l.keys()), ['b', 'c', 'd'])
        l['d']                  # Testing the last one
        self.assertEqual(sorted(l.keys()), ['b', 'c', 'd'])
        l['e'] = 5
        self.assertEqual(sorted(l.keys()), ['b', 'd', 'e'])
Beispiel #5
0
    def __init__(self, db: AtomicDatabaseAPI) -> None:
        self.db = db
        self._block_cache = LRU(BLOCK_CACHE_SIZE)
        self._state_cache = LRU(STATE_CACHE_SIZE)
        self._state_bytes_written = 0

        self._genesis_time, self._genesis_validators_root = self._get_genesis_data(
        )
Beispiel #6
0
    def __init__(self):
        '''
        '''
        self._views = LRU(50)
        # tile cache - enough for 1 MFOV for 10 parallel users
        self._tiles = LRU(61 * 10)

        self._client_tiles = {}
Beispiel #7
0
    def __init__(self, config, attr):
        self.attr = attr
        self.api_key = config.register("api_key")
        self.osuapi = osuapi.OsuApi(
            self.api_key(),
            connector=osuapi.AHConnector(
                aiohttp.ClientSession(loop=asyncio.get_event_loop())))
        self._osu_presence_username_cache = LRU(4 << 10)

        self._beatmap_cache = LRU(256)
Beispiel #8
0
 def __init__(self, integration_matchers: list[BluetoothMatcher]) -> None:
     """Initialize the matcher."""
     self._integration_matchers = integration_matchers
     # Some devices use a random address so we need to use
     # an LRU to avoid memory issues.
     self._matched: MutableMapping[str, IntegrationMatchHistory] = LRU(
         MAX_REMEMBER_ADDRESSES
     )
     self._matched_connectable: MutableMapping[str, IntegrationMatchHistory] = LRU(
         MAX_REMEMBER_ADDRESSES
     )
     self._index = BluetoothMatcherIndex()
Beispiel #9
0
 def __init__(self,
              txn,
              tm,
              request,
              container,
              last_tid=-2,
              index_scroll='15m',
              hits_scroll='5m',
              use_tid_query=True):
     self.txn = txn
     self.tm = tm
     self.request = request
     self.container = container
     self.orphaned = set()
     self.missing = set()
     self.out_of_date = set()
     self.utility = getUtility(ICatalogUtility)
     self.migrator = Migrator(self.utility,
                              self.container,
                              full=True,
                              bulk_size=10)
     self.cache = LRU(200)
     self.last_tid = last_tid
     print(f'Last TID: {self.last_tid}')
     self.use_tid_query = use_tid_query
     self.last_zoid = None
     # for state tracking so we get boundries right
     self.last_result_set = []
     self.index_scroll = index_scroll
     self.hits_scroll = hits_scroll
Beispiel #10
0
    def __init__(self,
                 cache_path="cache",
                 serializer=PickleCacheSerializer,
                 redis_host="localhost",
                 redis_port=6379,
                 redis_db=0,
                 redis_password="",
                 enabled=True):
        """Connect to cache."""

        self.enabled = enabled
        try:
            if redis_password:
                self.redis = redis.StrictRedis(host=redis_host,
                                               port=redis_port,
                                               db=redis_db,
                                               password=redis_password)
            else:
                self.redis = redis.StrictRedis(host=redis_host,
                                               port=redis_port,
                                               db=redis_db)

            self.redis.get('x')
            logger.info("Cache connected to redis at %s:%s/%s", redis_host,
                        redis_port, redis_db)
        except Exception:
            self.redis = None
            logger.error("Failed to connect to redis at %s:%s/%s", redis_host,
                         redis_port, redis_db)
        self.cache_path = cache_path
        if not os.path.exists(self.cache_path):
            os.makedirs(self.cache_path)
        self.cache = LRU(1000)
        self.serializer = serializer()
Beispiel #11
0
    def __init__(self, bot, session):
        self.bot = bot
        self.loop = bot.loop
        self.session = session

        self.lock = asyncio.Lock(loop=bot.loop)
        self.cache = LRU(64)
Beispiel #12
0
    def __init__(self,
                 circle,
                 fcp,
                 total_chunks,
                 totalsize=0,
                 signature=False):
        BaseTask.__init__(self, circle)
        self.circle = circle
        self.fcp = fcp
        self.totalsize = totalsize
        self.signature = signature
        if self.signature:
            self.bfsign = BFsignature(total_chunks)

        # cache
        self.fd_cache = LRU(512)

        # failed
        self.failed = {}

        self.failcnt = 0

        # debug
        self.d = {"rank": "rank %s" % circle.rank}
        self.logger = utils.getLogger(__name__)

        # reduce
        self.vsize = 0

        assert len(circle.workq) == 0

        if self.circle.rank == 0:
            print("\nChecksum verification ...")
    def __init__(self, size, type): # the number of items
        self.size = size # actual size of the cache
        self.lru = LRU(size)

        self.hits = 0.0
        self.reqs = 0.0
        self.cache_stack_size = 0 # how much of the cache is occupied
Beispiel #14
0
    def test_stats(self):
        for size in SIZES:
            l = LRU(size)
            for i in range(size):
                l[i] = str(i)

            self.assertTrue(l.get_stats() == (0, 0))

            val = l[0]
            self.assertTrue(l.get_stats() == (1, 0))

            val = l.get(0, None)
            self.assertTrue(l.get_stats() == (2, 0))

            val = l.get(-1, None)
            self.assertTrue(l.get_stats() == (2, 1))

            try:
                val = l[-1]
            except:
                pass

            self.assertTrue(l.get_stats() == (2, 2))

            l.clear()
            self.assertTrue(len(l) == 0)
            self.assertTrue(l.get_stats() == (0, 0))
Beispiel #15
0
    def __init__(self, network: AlexandriaNetworkAPI) -> None:
        self._network = network
        self._node_ad_radius: Dict[NodeID, int] = LRU(8129)  # ~ 0.5 mb

        self._ack_ready = trio.Event()
        self._ping_ready = trio.Event()
        self._pong_ready = trio.Event()
Beispiel #16
0
    def decorator(func):
        if maxsize is None:
            cache = {}
            get_stats = lambda: (0, 0)
        else:
            cache = LRU(maxsize)
            get_stats = cache.get_stats

        def wrap_and_store(key, coro):
            async def func():
                value = await coro
                cache[key] = value
                return value

            return func()

        def wrap_new(value):
            async def new_coroutine():
                return value

            return new_coroutine()

        @functools.wraps(func)
        def wrapper(*args, **kwargs):
            key = make_key(args, kwargs)
            # try/except might be slow if the key is constantly not in the cache.
            # I wonder if it's faster to use cache.get and compare to a sentinel.
            try:
                value = cache[key]
            except KeyError:
                value = func(*args, **kwargs)

                if inspect.isawaitable(value):
                    return wrap_and_store(key, value)

                cache[key] = value
                return value
            else:
                if asyncio.iscoroutinefunction(func):
                    return wrap_new(value)
                return value

        def invalidate(*args, **kwargs):
            # LRU.pop isn't a thing :(
            # Implementation if LRU.pop existed would be much simpler:
            #
            # _sentinel = object()
            # return cache.pop(make_key(args, kwargs), _sentinel) is not _sentinel
            try:
                del cache[make_key(args, kwargs)]
            except KeyError:
                return False
            else:
                return True

        wrapper.cache = cache
        wrapper.get_key = lambda *a, **kw: make_key(a, kw)
        wrapper.invalidate = invalidate
        wrapper.get_stats = get_stats
        return wrapper
Beispiel #17
0
    def __init__(
        self,
        local_private_key: keys.PrivateKey,
        local_node_id: NodeID,
        enr_db: ENRDatabaseAPI,
        outbound_envelope_send_channel: trio.abc.SendChannel[OutboundEnvelope],
        inbound_message_send_channel: trio.abc.SendChannel[AnyInboundMessage],
        session_cache_size: int,
        message_type_registry: MessageTypeRegistry = v51_registry,
        events: EventsAPI = None,
    ) -> None:
        self.local_private_key = local_private_key
        self.local_node_id = local_node_id

        self._enr_db = enr_db
        self._message_type_registry = message_type_registry

        if events is None:
            events = Events()
        self._events = events

        self._sessions = LRU(session_cache_size, callback=self._evict_session)
        self._sessions_by_endpoint = collections.defaultdict(set)

        self._outbound_packet_send_channel = outbound_envelope_send_channel
        self._inbound_message_send_channel = inbound_message_send_channel
Beispiel #18
0
    def outer(func):
        @functools.wraps(func)
        async def inner(
            cls, query: Union[int, str]
        ):  # Very specific but it works and will work for most get_ methods
            query = _make_cache_key(query)

            for key, value in cache.items():
                if query in key:
                    return value

            val = await func(cls, query)
            if with_name:
                cache[(_make_cache_key(val.name),
                       _make_cache_key(val.id))] = val
            else:
                cache[(_make_cache_key(val.id), )] = val

            return val

        if LRU:
            cache = LRU(maxsize)
        else:
            cache = {}
            warnings.warn(
                "lru-dict is not installed, so the cache will not have a maxsize."
            )

        inner.cache = cache

        return inner
Beispiel #19
0
 def test_add_multiple(self):
     for size in SIZES:
         l = LRU(size)
         for i in range(size):
             l[i] = str(i)
         l[size] = str(size)
         self._check_kvi(range(size,0,-1), l)
    def __init__(self, start=start_default, end=end_default):
        # Midnight in UTC for each trading day.

        # In pandas 0.18.1, pandas calls into its own code here in a way that
        # fires a warning. The calling code in pandas tries to suppress the
        # warning, but does so incorrectly, causing it to bubble out here.
        # Actually catch and suppress the warning here:
        with warnings.catch_warnings():
            warnings.simplefilter('ignore')
            _all_days = date_range(start, end, freq=self.day, tz='UTC')

        # `DatetimeIndex`s of standard opens/closes for each day.
        self._opens = days_at_time(_all_days, self.open_time, self.tz,
                                   self.open_offset)
        self._closes = days_at_time(
            _all_days, self.close_time, self.tz, self.close_offset
        )

        # `DatetimeIndex`s of nonstandard opens/closes
        _special_opens = self._calculate_special_opens(start, end)
        _special_closes = self._calculate_special_closes(start, end)

        # Overwrite the special opens and closes on top of the standard ones.
        _overwrite_special_dates(_all_days, self._opens, _special_opens)
        _overwrite_special_dates(_all_days, self._closes, _special_closes)

        # In pandas 0.16.1 _opens and _closes will lose their timezone
        # information. This looks like it has been resolved in 0.17.1.
        # http://pandas.pydata.org/pandas-docs/stable/whatsnew.html#datetime-with-tz  # noqa
        self.schedule = DataFrame(
            index=_all_days,
            columns=['market_open', 'market_close'],
            data={
                'market_open': self._opens,
                'market_close': self._closes,
            },
            dtype='datetime64[ns]',
        )

        # Simple cache to avoid recalculating the same minute -> session in
        # "next" mode. Analysis of current zipline code paths show that
        # `minute_to_session_label` is often called consecutively with the same
        # inputs.
        self._minute_to_session_label_cache = LRU(1)

        self.market_opens_nanos = self.schedule.market_open.values.\
            astype(np.int64)

        self.market_closes_nanos = self.schedule.market_close.values.\
            astype(np.int64)

        self._trading_minutes_nanos = self.all_minutes.values.\
            astype(np.int64)

        self.first_trading_session = _all_days[0]
        self.last_trading_session = _all_days[-1]

        self._early_closes = pd.DatetimeIndex(
            _special_closes.map(self.minute_to_session_label)
        )
Beispiel #21
0
    def __init__(self,
                 datadir,
                 label_file,
                 batch_size=4,
                 n_frame=32,
                 crop_size=112,
                 scale_size=128,
                 train=True,
                 lru_buffer=120,
                 gpu_id=0):
        super(MeituClipBatchIter, self).__init__(batch_size)
        self.datadir = datadir
        self.label_file = label_file
        self.batch_size = batch_size
        self.n_frame = n_frame
        self.crop_size = crop_size
        self.scale_size = scale_size
        self.train = train
        self.max_label = 0
        self.clip_lst = []
        self.load_data()

        def evicted(k, v):
            print('pop shape', k)
            del v

        self.nvvl_loader_dict = LRU(lru_buffer, evicted)
        self.gpu_id = gpu_id
        self.data_size = len(self.clip_lst)
        self.process_pool = multiprocessing.Pool(processes=10)
Beispiel #22
0
def cached_mnubo_object_exists(device_id):
    """ Method to wrap the object existence checking in a cached object
    :param device_id: The device id of the object
    :return: True of the object exists or False if it doesn't
    """
    global global_cache
    global config
    now = int(time.time())

    if not isinstance(global_cache, LRU):
        if not isinstance(config['cache_max_entries'], int):
            raise ValueError('cache_max_entries must be an integer')
        global_cache = LRU(config['cache_max_entries'])

    if not isinstance(config['cache_validity_period'], int):
        raise ValueError('cache_validity_period must be an integer')

    found = global_cache.get(device_id, None)
    if found and found > now:
        rc = True
    else:
        rc = mnubo_object_exists(device_id)
        if rc:
            global_cache[device_id] = now + config['cache_validity_period']
    return rc
Beispiel #23
0
 def __init__(self,
              trading_calendar,
              reader,
              equity_adjustment_reader,
              asset_finder,
              roll_finders=None,
              sid_cache_size=1000,
              prefetch_length=0):
     self.trading_calendar = trading_calendar
     self._asset_finder = asset_finder
     self._reader = reader
     self._adjustment_readers = {}
     if equity_adjustment_reader is not None:
         self._adjustment_readers[Equity] = \
             HistoryCompatibleUSEquityAdjustmentReader(
                 equity_adjustment_reader)
     if roll_finders:
         self._adjustment_readers[ContinuousFuture] =\
             ContinuousFutureAdjustmentReader(trading_calendar,
                                              asset_finder,
                                              reader,
                                              roll_finders,
                                              self._frequency)
     self._window_blocks = {
         field: ExpiringCache(LRU(sid_cache_size))
         for field in self.FIELDS
     }
     self._prefetch_length = prefetch_length
Beispiel #24
0
class BaseTreeherderSource(DataSource, ABC):
    lock = threading.Lock()
    groups_cache: Dict[str, List[str]] = LRU(5000)

    @abstractmethod
    def get_push_test_groups(self, branch: str,
                             rev: str) -> Dict[str, List[str]]:
        ...

    def run_test_task_groups(self, branch, rev, task):
        # Use a lock since push.py invokes this across many threads (which is
        # useful for the 'errorsummary' data source, but not here). This ensures
        # we don't make more than one request to Treeherder.
        with self.lock:
            if task.id not in self.groups_cache:
                self.groups_cache.update(self.get_push_test_groups(
                    branch, rev))

        try:
            # TODO: Once https://github.com/mozilla/mozci/issues/662 is fixed, we should return the actual duration instead of None.
            return {
                group: (status, None)
                for group, status in self.groups_cache.pop(task.id).items()
            }
        except KeyError:
            raise ContractNotFilled(self.name, "test_task_groups",
                                    "groups are missing")
Beispiel #25
0
 def test_unhashable(self):
     l = LRU(1)
     self.assertRaises(TypeError, lambda: l[{'a': 'b'}])
     with self.assertRaises(TypeError):
         l[['1']] = '2'
     with self.assertRaises(TypeError):
         del l[{'1': '1'}]
Beispiel #26
0
    def __init__(self,
                 pop_size=5000,
                 size_next_gen=300,
                 lucky_per=0.10,
                 unique_pop=False,
                 add_naive=False,
                 **kwargs):
        """
        Create a genetic programming class that will help solve the circuit minimization problem

        :param pop_size: population max size
        :param size_next_gen: num of offspring in each generation
        :param kwargs: passed on to Util
        """
        self.pop_size = pop_size
        self.size_next_gen = size_next_gen

        self.size_best_parents = (1 - lucky_per) * self.size_next_gen
        if self.size_best_parents % 2 == 1: self.size_best_parents += 1
        self.size_lucky_parents = self.size_next_gen - self.size_best_parents

        self.unique_pop = unique_pop
        self.add_naive = add_naive
        self.util = Util(**kwargs)
        self.cache = LRU(10000)
Beispiel #27
0
    def __init__(self,
                 db: AtomicDatabaseAPI,
                 state_root: Hash32 = BLANK_ROOT_HASH) -> None:
        r"""
        Internal implementation details (subject to rapid change):
        Database entries go through several pipes, like so...

        .. code::

            db > _batchdb ---------------------------> _journaldb ----------------> code lookups
             \
              -> _batchtrie -> _trie -> _trie_cache -> _journaltrie --------------> account lookups

        Journaling sequesters writes at the _journal* attrs ^, until persist is called.

        _batchtrie enables us to prune all trie changes while building
        state,  without deleting old trie roots.

        _batchdb and _batchtrie together enable us to make the state root,
        without saving everything to the database.

        _journaldb is a journaling of the keys and values used to store
        code and account storage.

        _trie is a hash-trie, used to generate the state root

        _trie_cache is a cache tied to the state root of the trie. It
        is important that this cache is checked *after* looking for
        the key in _journaltrie, because the cache is only invalidated
        after a state root change.

        _journaltrie is a journaling of the accounts (an address->rlp mapping,
        rather than the nodes stored by the trie). This enables
        a squashing of all account changes before pushing them into the trie.

        .. NOTE:: StorageDB works similarly

        AccountDB synchronizes the snapshot/revert/persist of both of the
        journals.
        """
        self._raw_store_db = KeyAccessLoggerAtomicDB(db,
                                                     log_missing_keys=False)
        self._batchdb = BatchDB(self._raw_store_db)
        self._batchtrie = BatchDB(self._raw_store_db,
                                  read_through_deletes=True)
        self._journaldb = JournalDB(self._batchdb)
        self._trie = HashTrie(
            HexaryTrie(self._batchtrie, state_root, prune=True))
        self._trie_logger = KeyAccessLoggerDB(self._trie,
                                              log_missing_keys=False)
        self._trie_cache = CacheDB(self._trie_logger)
        self._journaltrie = JournalDB(self._trie_cache)
        self._account_cache = LRU(2048)
        self._account_stores: Dict[Address, AccountStorageDatabaseAPI] = {}
        self._dirty_accounts: Set[Address] = set()
        self._root_hash_at_last_persist = state_root
        self._accessed_accounts: Set[Address] = set()
        self._accessed_bytecodes: Set[Address] = set()
        # Track whether an account or slot have been accessed during a given transaction:
        self._reset_access_counters()
Beispiel #28
0
    def __init__(
        self,
        model_type: t.Type[T],
        order_by: QueryableAttribute,
        *,
        columns: t.Optional[t.Sequence[ConvertibleColumn]] = None,
        page_size: int = 64,
        auto_commit: bool = True,
    ):
        super().__init__()
        self._model_type = model_type
        self._order_by_column = order_by
        self._page_size = page_size
        self._auto_commit = auto_commit

        if columns is not None:
            self._columns = columns

        if not self._columns:
            raise ValueError('Specify at least one column')

        self._header_names = tuple(' '.join(v.capitalize()
                                            for v in c.column.name.split('_'))
                                   for c in self._columns)

        self._cache = LRU(int(page_size * 2))
        self._cached_size = None
Beispiel #29
0
 def test_contains(self):
     for size in SIZES:
         l = LRU(size)
         for i in range(size):
             l[i] = str(i)
         for i in range(size):
             self.assertTrue(i in l)
Beispiel #30
0
def lru(size):
	hitCounter=missCounter=0
	#Size in GB
 	size = spaceLeft= int(size)*1024*1024*1024
        hashmap={}
	cache = LRU(size)
	for key in trace:
		if key in hashmap:
			hitCounter+=int(dict[key])
			cache[key]=dict[key]
		else:
			missCounter +=int(dict[key])
			# Miss no Eviction
			if (int(dict[key]) <= spaceLeft):
				cache[key]=dict[key]
				hashmap[key]=dict[key]
				spaceLeft -= int(dict[key])
			else:
			# Miss - Cache Eviction	
				while(dict[key] > spaceLeft):
					id = cache.peek_last_item()[0]	
				 	spaceLeft+=int(hashmap[id])
                                        del cache[id]
					del hashmap[id]
				hashmap[key]=dict[key]
                                cache[key]=dict[key]
                                spaceLeft -= int(dict[key])
        now = datetime.datetime.now()
	print "Hit_Counter:"+str(hitCounter)+",Miss_Counter:"+str(missCounter)
	print "Miss_Ratio:"+ str(float(missCounter)/ float(hitCounter+missCounter))
	logging.info( str(now)[:19]+"	Hit_Counter:"+str(hitCounter)+",Miss_Counter:"+str(missCounter) )	
        logging.info( str(now)[:19]+"	Miss_Ratio:"+ str(float(missCounter)/float(hitCounter+missCounter)) )