def setup_nodes(g): for cdn in cdns: g.node[cdn]["storage"] = CDNStorage() g.node[cdn]["color"] = "#ff0000" g.node[cdn]["capacity"] = cdn_capacity g.node[cdn]["size"] = 50 g.node[cdn]["type"] = "CDN" for vcdn_node in vcdns: g.node[vcdn_node]["storage"] = pylru.lrucache(vcdn_cache_size) #g.node[vcdn_node]["storage"] = CDNStorage() g.node[vcdn_node]["capacity"] = vcdn_capacity g.node[vcdn_node]["type"] = "VCDN" g.node[vcdn_node]["color"] = "#00ff00" g.node[vcdn_node]["size"] = 20 for mucdn_node in mucdns: g.node[mucdn_node]["storage"] = pylru.lrucache(mucdn_cache_size) #g.node[mucdn_node]["storage"] = CDNStorage() g.node[mucdn_node]["capacity"] = mucdn_capacity g.node[mucdn_node]["size"] = 10 g.node[mucdn_node]["type"] = "MUCDN" g.node[mucdn_node]["color"] = "#aaaaff" for consumer in consumers: g.node[consumer]["color"] = "#000000" g.node[consumer]["size"] = 5 g.node[mucdn_node]["type"] = "CONSUMER"
def __init__(self, left, right, target, max_size=128): super().__init__() self.__cache_left = pylru.lrucache(size=max_size) self.__cache_right = pylru.lrucache(size=max_size) self.__left = left self.__right = right self.__target = target self.__max_size = max_size
def __init__(self, available_registers, spill_memory_base_label, spill_memory_size, word_size=4): """ The constructor. Creates a RegisterUseTable with it's allowed set of registers and available spill memory. Arbitrarily sized words are allowed, but (sensible) 4 byte words are used by default. :param available_registers: A sequence (preferably set or tuple) of the registers the table is allowed to use without reservation. :spill_memory_base_label: The MIPS label where the spill memory is located. :param spill_memory_size: The amount of spill memory available in bytes. :word_size: The size of a word (register capacity) in bytes. :return: A constructed RegisterUseTable object. """ self.spill_mem_base_label = spill_memory_base_label self.spill_mem_size = spill_memory_size self.available_registers = list(reversed(available_registers)) self.available_spill_memory_words = list(reversed(range(0, spill_memory_size, word_size))) self.lru_cache = pylru.lrucache(size=len(available_registers), callback=self._spill) self.spilled_registers = {} self._spill_code = [] self._freed_physical_register = None
def __init__(self): self.table_array = [ "aka_name", "aka_title", "cast_info", "char_name", "comp_cast_type", "company_name", "company_type", "complete_cast", "employee", "info_type", "keyword", "kind_type", "link_type", "movie_companies", "movie_info", "movie_info_idx", "movie_keyword", "movie_link", "name", "person_info", "role_type", "title" ] self.query_array = [ f for f in listdir("src/dopamine/envs/env/job") if ((isfile(join("src/dopamine/envs/env/job", f))) and re.match("^[0-9]+.*sql$", f)) ] self.action_to_table = dict() self.CREATE_VIEW_ACTION = 1 print(self.query_array) super(DatabaseEnv, self).__init__() self.env = self # Number of actions that the database can take # { Create View, Do nothing } self.N_DISCRETE_ACTIONS = 2 #Form a dictionary that gives a number (between 0 and 210) to each pair of tables. temp = [list(x) for x in itertools.combinations(self.table_array, 2)] for i in range(0, len(temp)): self.action_to_table[i] = temp[i] # Number of tables in the database being considered self.N_TABLES = 21 #args['n_tables'] self.N_JOIN_COMBINATIONS = int( math.factorial(self.N_TABLES) / (math.factorial(self.N_TABLES - 2) * math.factorial(2))) # Maximum number of steps in an episode self.N_MAX_STEPS = 100 #args['max_steps'] self.N_MAX_VIEWS = 15 #N_MAX_JOINS = 10 #Max mat. views self.current_candidate = 0 # Define action and observation space # They must be gym.spaces objects self.action_space = spaces.Discrete(self.N_DISCRETE_ACTIONS) self.observation_space = spaces.Box(low=0, high=1, shape=( 2, self.N_JOIN_COMBINATIONS, ), dtype=np.uint8) # Capture information about episode to replay the same # on the real database self.history = {} self.current_step = 0 self.max_steps = self.N_MAX_STEPS self.current_views = [] self.candidate_cost = 100 self.obs_space = np.zeros(2, self.N_JOIN_COMBINATIONS) self.cache = pylru.lrucache(size) self.candidate_pool = Queue.Queue() self._candidate_picker()
def LoadCacheFromFile(filename): """ This function returns a pylru.lrucache object containing (key, value) pairs. The keys are strings containing the URLs of submissions. The values are just dummy values to be ignored. We try to read the file whose name is given as an argument. If we can, we return a pylru.lrucache object containing its contents, with the top line of the file being the most recently used entry and the last line of the file being the least recently used entry. If we cannot read the file, we return an empty pylru.lrucache object. This function should return a cache containing the same state as the cache last passed to StoreCacheToFile(). """ cache = pylru.lrucache(MAX_SUBMISSIONS) try: f = open(filename) contents = f.readlines() f.close() except: # Can't read the file; give up and start from scratch. print "WARNING: Unable to load cache. Starting over with an empty cache." return cache # The most recently used line is first in the file, which means it should be # inserted into the cache last. for line in reversed(contents): cache[line.strip()] = True # Dummy value return cache
def __init__(self, basefolder, create=False): """ Initializes a ``LocalFileStorage`` instance under the given ``basefolder``, creating the necessary folder if necessary and ``create`` is set to ``True``. :param string basefolder: the path to the folder under which to create the storage :param bool create: ``True`` if the folder should be created if it doesn't exist yet, ``False`` otherwise """ self._logger = logging.getLogger(__name__) self.basefolder = os.path.realpath(os.path.abspath(basefolder)) if not os.path.exists(self.basefolder) and create: os.makedirs(self.basefolder) if not os.path.exists(self.basefolder) or not os.path.isdir( self.basefolder): raise RuntimeError( "{basefolder} is not a valid directory".format(**locals())) import threading self._metadata_lock = threading.Lock() self._metadata_cache = pylru.lrucache(10) self._old_metadata = None self._initialize_metadata()
def register_cache(cls, key, size=512): """Create a new cache for this entity, keyed by attribute. Currently these caches are not searched, they merely serve as another reference to keep their entries in _instances alive. There is support for caching UIDs and Attribute values when they change, if you want to register anything else (such as bare properties not tied to an Attribute) then you'll need to make sure to update the cache yourself when their values change. :param str key: The attribute name to use as a key :param int size: The size of the cache to create :returns None: :raises KeyError: If a cache already exists for `key` """ if key in cls._caches: raise AlreadyExists(key, cls._caches[key]) cache = lrucache(size, cls._cache_eject_callback) cls._caches[key] = cache # Fill the cache with any existing entity data. for entity in cls._instances.values(): attr_value = getattr(entity, key) if attr_value not in (None, Unset): if attr_value not in cache: cache[attr_value] = {entity} else: cache[attr_value].add(entity)
def load_cache(self): '''Create a pylru.lrucache object prepopulated with saved data.''' cache = pylru.lrucache(self.size) # There should be a more efficient way to do this, by hooking into # the json module directly. self._populate_cache_from_file(self.filename, cache) return cache
def run(self): config_file = open(os.path.dirname(os.path.abspath(__file__)) + '/config.yaml') config = yaml.safe_load(config_file) config_file.close() name = 'yisa_face_info_to_yitu' logging.basicConfig(level=logging.DEBUG) handler = RotatingFileHandler('/var/log/%s.log' % name, maxBytes=134217728, backupCount=7) formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(lineno)d - %(message)s') handler.setFormatter(formatter) logging.getLogger().addHandler(handler) requests_log = logging.getLogger("requests.packages.urllib3") requests_log.setLevel(logging.ERROR) logging.warning('启动 [%s]', name) logging.warning('主线程 Pid [%d]', os.getpid()) self.cache = pylru.lrucache(5000) process = [] #cpu_count = multiprocessing.cpu_count() #cpu_count = min(config['worker']['max_process'],cpu_count) cpu_count = 4 logging.warning('CPU线程数 [%d]', cpu_count) for x in xrange(0, cpu_count): process.append(Dispatcher(config,cpu_count)) for p in process: p.start() try: for p in process: p.join() except KeyboardInterrupt: for p in process: p.terminate() logging.warning('Ctrl+C,终止运行')
def __init__(self, memory_blocks=10): """ Initialize the LRU storage with the maximum number of key/value pairs you want the storage to hold. :param memory_blocks: Integer size of the storage. """ self.storage = lrucache(memory_blocks)
def start(): PORXY_DNS.deq_cache = Queue(maxsize=deq_size) if deq_size > 0 else Queue() PORXY_DNS.dns_cache = pylru.lrucache(lru_size) gevent.spawn(_init_cache_queue) print 'Start DNS server at %s:%d\n' % (server_ip, server_port) dns_server = SocketServer.UDPServer((server_ip, server_port), DNSHandler) dns_server.serve_forever()
def __init__(self,file_cache_count = 200,threshold=1000,buckets=60): self.file_cache = pylru.lrucache(file_cache_count) self.buckets = buckets self.threshold = threshold self.event_index = 0 self.event_counts = {} self.event_total = 0
def __init__(self, basefolder, create=False): """ Initializes a ``LocalFileStorage`` instance under the given ``basefolder``, creating the necessary folder if necessary and ``create`` is set to ``True``. :param string basefolder: the path to the folder under which to create the storage :param bool create: ``True`` if the folder should be created if it doesn't exist yet, ``False`` otherwise """ self._logger = logging.getLogger(__name__) self.basefolder = os.path.realpath(os.path.abspath(basefolder)) if not os.path.exists(self.basefolder) and create: os.makedirs(self.basefolder) if not os.path.exists(self.basefolder) or not os.path.isdir(self.basefolder): raise RuntimeError("{basefolder} is not a valid directory".format(**locals())) import threading self._metadata_lock = threading.Lock() self._metadata_cache = pylru.lrucache(10) from slugify import Slugify self._slugify = Slugify() self._slugify.safe_chars = "-_.() " self._old_metadata = None self._initialize_metadata()
def __init__(self, env): '''Initialize everything that doesn't require the event loop.''' super().__init__(env) if aiorpcx_version < self.AIORPCX_MIN: raise RuntimeError('ElectrumX requires aiorpcX >= ' f'{version_string(self.AIORPCX_MIN)}') self.logger.info(f'software version: {self.VERSION}') self.logger.info(f'aiorpcX version: {version_string(aiorpcx_version)}') self.logger.info(f'supported protocol versions: ' f'{self.PROTOCOL_MIN}-{self.PROTOCOL_MAX}') self.logger.info(f'event loop policy: {env.loop_policy}') self.coin = env.coin self.servers = {} self.tasks = TaskSet() self.sessions = set() self.cur_group = SessionGroup(0) self.txs_sent = 0 self.next_log_sessions = 0 self.state = self.CATCHING_UP self.max_sessions = env.max_sessions self.low_watermark = self.max_sessions * 19 // 20 self.max_subs = env.max_subs # Cache some idea of room to avoid recounting on each subscription self.subs_room = 0 self.next_stale_check = 0 self.history_cache = pylru.lrucache(256) self.header_cache = pylru.lrucache(8) self.cache_height = 0 self.cache_mn_height = 0 self.mn_cache = pylru.lrucache(256) env.max_send = max(350000, env.max_send) # Set up the RPC request handlers cmds = ('add_peer daemon_url disconnect getinfo groups log peers ' 'reorg sessions stop'.split()) self.rpc_handlers = {cmd: getattr(self, 'rpc_' + cmd) for cmd in cmds} self.loop = asyncio.get_event_loop() self.executor = ThreadPoolExecutor() self.loop.set_default_executor(self.executor) # The complex objects. Note PeerManager references self.loop (ugh) self.daemon = self.coin.DAEMON(env) self.bp = self.coin.BLOCK_PROCESSOR(env, self, self.daemon) self.mempool = MemPool(self.bp, self) self.peer_mgr = PeerManager(env, self)
def __init__(self, size=10): """ :param size: int, size of the cache, should match the actual concurrent users, as we should only assign one key-value for one user :return: """ self.cache = pylru.lrucache(size) self.size = size
def lrucache(size): try: #raise ImportError("no pylru") import pylru return pylru.lrucache(size) except ImportError: warnings.warn("pylru not available; using simple cache with no size limit") return {}
def __init__(self, cuda=True): self.net = self.load_net() self.cuda = cuda if self.cuda: self.net = self.net.cuda() self.net.eval() self.cache = pylru.lrucache(CACHE) self.prefetch = {}
def __init__(self, options): """Initializes an LruBackend. Args: options: a dictionary that contains configuration options. """ capacity = options["capacity"] if "capacity" in options else 200 self._cache = pylru.lrucache(capacity)
def __init__(self, size=1024): try: import pylru except ImportError: raise ImportError("Could not find pylru. This packages is " + "required when using ores.score_caches.LRU.") self.lru = pylru.lrucache(size)
def __init__(self, config): self.conf = config self.lru_size = int(config.GetItem("lru_size")) # LRU Cache,使用近期最少使用覆盖原则 self.dns_cache = pylru.lrucache(self.lru_size) # backend self.bktype = config.GetItem("backend") self.bkend = backends.CreateBackend(self.conf, self.bktype)
def __init__(self, root, cache_size=1000, cached_max_size=80000): self.root = root self.cache = pylru.lrucache(cache_size) self.cached_max_size = cached_max_size self.incoming_dir = os.path.join(self.root, 'incoming') ensure_directory(self.incoming_dir) self.in_db = Dbf(incoming_meta, self.incoming_dir + '.db') self.in_db.ensure_db()
def __init__(self, config=None): self.config = config or {} self.db: Database = self.config.get('db') or Database( os.path.join(self.path, "blockchain.db") ) self.db.ledger = self self.headers: Headers = self.config.get('headers') or self.headers_class( os.path.join(self.path, "headers") ) self.headers.checkpoints = self.checkpoints self.network: Network = self.config.get('network') or Network(self) self.network.on_header.listen(self.receive_header) self.network.on_status.listen(self.process_status_update) self.network.on_connected.listen(self.join_network) self.accounts = [] self.fee_per_byte: int = self.config.get('fee_per_byte', self.default_fee_per_byte) self._on_transaction_controller = StreamController() self.on_transaction = self._on_transaction_controller.stream self.on_transaction.listen( lambda e: log.debug( '(%s) on_transaction: address=%s, height=%s, is_verified=%s, tx.id=%s', self.get_id(), e.address, e.tx.height, e.tx.is_verified, e.tx.id ) ) self._on_address_controller = StreamController() self.on_address = self._on_address_controller.stream self.on_address.listen( lambda e: log.info('(%s) on_address: %s', self.get_id(), e.addresses) ) self._on_header_controller = StreamController() self.on_header = self._on_header_controller.stream self.on_header.listen( lambda change: log.info( '%s: added %s header blocks, final height %s', self.get_id(), change, self.headers.height ) ) self._download_height = 0 self._on_ready_controller = StreamController() self.on_ready = self._on_ready_controller.stream self._tx_cache = pylru.lrucache(self.config.get("tx_cache_size", 100_000)) self._update_tasks = TaskGroup() self._other_tasks = TaskGroup() # that we dont need to start self._utxo_reservation_lock = asyncio.Lock() self._header_processing_lock = asyncio.Lock() self._address_update_locks: DefaultDict[str, asyncio.Lock] = defaultdict(asyncio.Lock) self.coin_selection_strategy = None self._known_addresses_out_of_sync = set() self.fee_per_name_char = self.config.get('fee_per_name_char', self.default_fee_per_name_char) self._balance_cache = pylru.lrucache(100000)
def __init__(self, root="::", bitlen=0, max_nodes=16): self.max_nodes = max_nodes - 2 self.free_nodes = self.max_nodes self.maxbits = 128 self.packet_count = 0 self.cache = pylru.lrucache(self.max_nodes) self.last_leaf_cache = self.cache.head.prev self._init_lru() self._init_head(root, bitlen)
def __init__(self, repo, git_dir, template_env): """Initialisation.""" if not os.path.isdir(git_dir): Git().clone(repo, git_dir) Git(git_dir).fetch() # this introduces a slight delay if done in index. May be acceptible self.git_dir = git_dir self.fs_lock = threading.Lock() self.tag_cache = pylru.lrucache(50) self.template = template_env.get_template("html/gittags.html")
def main(): print "local_addr:%s dns:%s" % (local_addr, dns) size = 1024 global cache cache = pylru.lrucache(size) server = ThreadingUDPServer((local_addr, 53), ThreadedUDPRequestHandler) server.serve_forever()
def __init__(self, via=None): self.via = via self.removed = None def callback(key, value): self.removed = (key, value) import pylru self.addr2client = pylru.lrucache(256, callback)
def __init__(self, env): '''Initialize everything that doesn't require the event loop.''' super().__init__(env) if _version < (0, 5, 5): raise RuntimeError('ElectrumX requires aiorpcX 0.5.5') self.logger.info(f'software version: {self.VERSION}') self.logger.info(f'supported protocol versions: ' f'{self.PROTOCOL_MIN}-{self.PROTOCOL_MAX}') self.logger.info(f'event loop policy: {env.loop_policy}') self.coin = env.coin #类似 -》 {'NAME':coin_name,'NET':network,'TX_COUNT':'','TX_COUNT_HEIGHT':'','TX_PER_BLOCK':'',REORG_LIMIT:''} self.servers = {} self.tasks = TaskSet() #TODO:TaskSet? self.sessions = set() #TODO:set? self.cur_group = SessionGroup(0) # {gid:0,semaphore:class[限制并发数-》20]} self.txs_sent = 0 self.next_log_sessions = 0 self.state = self.CATCHING_UP #0 self.max_sessions = env.max_sessions # 获取环境变量和配置文件中二者最小的sessions值 self.low_watermark = self.max_sessions * 19 // 20 self.max_subs = env.max_subs # Cache some idea of room to avoid recounting on each subscription self.subs_room = 0 self.next_stale_check = 0 self.history_cache = pylru.lrucache(256) self.header_cache = pylru.lrucache(8) self.cache_height = 0 env.max_send = max(350000, env.max_send) # Set up the RPC request handlers cmds = ( 'add_peer daemon_url disconnect getinfo groups log peers reorg ' 'sessions stop'.split()) self.rpc_handlers = {cmd: getattr(self, 'rpc_' + cmd) for cmd in cmds} self.loop = asyncio.get_event_loop() self.executor = ThreadPoolExecutor() self.loop.set_default_executor(self.executor) # The complex objects. Note PeerManager references self.loop (ugh) self.daemon = self.coin.DAEMON(env) self.bp = self.coin.BLOCK_PROCESSOR(env, self, self.daemon) self.mempool = MemPool(self.bp, self) self.peer_mgr = PeerManager(env, self)
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) settings = get_project_settings() topic = settings.get('SNIPPETSPIDER_URL_TOPIC') bs = settings.get('ONEURL_KAFKA_BOOTSTRAP') grp_id = settings.get('SNIPPETSPIDER_CONSUMER_GROUP') self.logger.info("Reading URLs from {}".format(topic)) self.logger.info("Consumer group for URLs {}".format(grp_id)) self.logger.info("Bootstrapping via {}".format(bs)) self.consumer = KafkaConsumer( topic, bootstrap_servers=bs, group_id=grp_id, value_deserializer=json_value_deserializer(), max_poll_interval_ms=30000000 ) # crank max poll to ensure no kafkapython timeout self.producer = KafkaProducer(value_serializer=json_value_serializer(), bootstrap_servers=bs) host = settings.get('MONGO_HOST') port = settings.get('MONGO_PORT') user = settings.get('SNIPPETSPIDER_MONGO_USER') password = str(Password(Password.DEFAULT) ) # NB: read from environment variable iff specified mongo = pymongo.MongoClient(host, port, username=user, password=password) self.db = mongo[settings.get('MONGO_DB')] self.recent_cache = pylru.lrucache(10 * 1024) self.cache = pylru.lrucache(500) self.update_blacklist() self.disinterest_topic = settings.get('OVERREPRESENTED_HOSTS_TOPIC') self.overrepresented_hosts = self.init_overrepresented_hosts( self.disinterest_topic, bs) save_pidfile('pid.snippetspider') self.host_rejection_criteria = ( ( 'host_blacklisted', self.is_blacklisted ), # ignore hosts outside scope or which are manually deemed irrelevant ( 'host_long_term_ban', self.is_long_term_banned ), # we obey long-term bans to avoid contacting the site in concert across spiders )
def __init__(self, x, y, p, depth, norm='l1', root_node=(0, 0), bc=4, plot_2d=True, pvar_dist_mem=None, cache_size=1024, allow_randomization=True, min_step=3, max_step=5): """Inputs: - x, y: input paths - p: p for p-variation - depth: signature truncation depth - norm: norm for pairwise signature distance - root_node: node at the start of the tree - bc: boundary conditon for starting using the tight bound - plot_2d: whether to plot results of 1-d or 2-d paths - pvar_dist_mem: memoization dictionary for p-var distances """ # input paths self.x = np.array(x) self.y = np.array(y) self.m = len(self.x) self.n = len(self.y) assert self.m > 0 assert self.n > 0 self.p = p # p for p-variation self.depth = depth # signature depth self.norm = norm # l1 or l2 norm self.path = [(0, 0), (0, 0)] # lattice path self.best_node_value = math.inf # keep track of best bound self.i0, self.j0 = root_node # tuple of indeces of root node self.plot_2d = plot_2d # flag to plot paths in 1-d or 2-d case self.cache_size = cache_size # boundary condition, i.e. necessary min distance from root node to start using tight bound self.bc = bc if pvar_dist_mem is None: #self.pvar_dist_mem = defaultdict(float) self.pvar_dist_mem = pylru.lrucache(self.cache_size) else: # feed to the class called recursively the current state of the mem dictionary self.pvar_dist_mem = pvar_dist_mem self.allow_randomization = allow_randomization self.min_step = min_step self.max_step = max_step
def reindex_all(): events_fns = sorted(glob.glob(os.path.join(config["data_dir"], "watch", "date", "*"))) sym_dir = os.path.join(config["data_dir"], "watch", "syms-" + pendulum.now().isoformat()[:10]) if os.path.exists(sym_dir): raise Exception(sym_dir + " exists!") ensure_dirs([sym_dir]) cache = pylru.lrucache(5000) dup_cnt = 0 too_many_cnt = 0 corrupt_cnt = 0 syms_seen = set() for fn in tqdm.tqdm(events_fns): with open(fn, "r") as f: for line in f.readlines(): if len(line) == 0: continue try: evt = json.loads(line.strip()) except Exception as e: corrupt_cnt += 1 evt_type = evt.get("type", "unk") evt_symbols = evt.get("symbols", []) if evt_type == "error": continue if len(evt_symbols) is None: syms = ["_NONE"] else: syms = [s for s in evt_symbols if '.' not in s] if len(syms) >= 5: # discard events that have too many labels too_many_cnt += 1 continue cache_key = ( ",".join(syms) + evt_type + evt.get("source", "") + evt.get("title", "") + evt.get("text", "")[:100] + evt.get("name", "") + str(evt.get("value", "")) ) if cache_key in cache: dup_cnt += 1 continue cache[cache_key] = evt for sym in syms: syms_seen.add(sym) try: with open(os.path.join(sym_dir, sym), "a") as f2: f2.write(line) except Exception as e: print(os.path.join(sym_dir, sym), e) print("Duplicates", dup_cnt) print("Corruptions", corrupt_cnt) print("Too many labels", too_many_cnt) print("Unique Symbols", len(syms_seen))
def get_cache(size=10000): if memcache_host: host = memcache_host.split(":") kw = { "binary": True, "behaviors": {"tcp_nodelay": True, "ketama": True}, } return pylibmc.Client(host, **kw) else: return pylru.lrucache(size)
def __init__(self, name, maxSize = 1000000): self._store = pylru.lrucache(maxSize) self._name = name self._start = time.time() self._numWrites = 0 self._numReads = 0 self._numHits = 0 self._numMisses = 0 self.getStats()
def __init__(self, target_addr, bind_addr, via=None): self.target_addr = target_addr self.bind_addr = bind_addr self.via = via or ViaNamespace(ClientClass=UDPClient) self.removed = None def callback(key, value): self.removed = (key, value) self.via_clients = pylru.lrucache(256, callback)
def pmemoize(obj): cache = obj.cache = pylru.lrucache(500) @functools.wraps(obj) def memoizer(*args, **kwargs): key = marshal.dumps([args, kwargs]) if key not in cache: cache[key] = obj(*args, **kwargs) return cache[key] return memoizer
def __init__(self, bind_addr, via=None): self.bind_addr = bind_addr self.via = via or ViaNamespace(ClientClass=UDPClient) self.removed = None def callback(key, value): self.removed = (key, value) self.via_clients = pylru.lrucache(256, callback) self.bind_socks = weakref.WeakValueDictionary()
def __init__(self, cache_dir, cache_name, cache_size, cache_callback=None, log=None): self._cache = pylru.lrucache(cache_size, callback=cache_callback) self._cache_filename = mozpath.join(cache_dir, cache_name + '-cache.pickle') self._log = log
def __init__(self, videoFilepath): self._vidcap = cv2.VideoCapture(videoFilepath) self.__imagesCache = pylru.lrucache( 4) #set the size of cache to be 10 images large print("cv2 version", cv2.__version__) print("num_of_frames", self.num_of_frames()) print("frame_height", self.frame_height()) print("frame_width", self.frame_width()) print("frames_per_second", self.frames_per_second())
def __init__(self, index_dir): self.index_dir = index_dir self.catalog_map = None self.doc_id_map = None self.id_doc_map = dict() self.dlen_map = None self.token_id_map = None self.index_file = None self.mem_cache = pylru.lrucache(10000) self.load_meta()
def __init__(self, coin, url, max_workqueue=10, init_retry=0.25, max_retry=4.0): self.coin = coin self.logger = class_logger(__name__, self.__class__.__name__) self.set_url(url) # Limit concurrent RPC calls to this number. # See DEFAULT_HTTP_WORKQUEUE in bitcoind, which is typically 16 self.workqueue_semaphore = asyncio.Semaphore(value=max_workqueue) self.init_retry = init_retry self.max_retry = max_retry self._height = None self.available_rpcs = {} self.connector = aiohttp.TCPConnector() self._block_hash_cache = lrucache(1000000) self._block_cache = lrucache(100000)
def main(args, consumer=None, producer=None, db=None, cache=None): if args.v: print(args) if consumer is None: consumer = KafkaConsumer( args.consume_from, bootstrap_servers=args.bootstrap, group_id=args.group, auto_offset_reset=args.start, consumer_timeout_ms=10 * 1000, # 10s or we terminate consumption since we have caught up with all available messages value_deserializer=json_value_deserializer()) if producer is None: producer = KafkaProducer(value_serializer=json_value_serializer(), bootstrap_servers=args.bootstrap) if cache is None: cache = pylru.lrucache(500) if db is None: mongo = pymongo.MongoClient(args.db, args.port, username=args.dbuser, password=str(args.dbpassword)) db = mongo[args.dbname] from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings(InsecureRequestWarning) random_ua = random_user_agent() print("Using random user agent for all requests: {}".format(random_ua)) n_good = n_bad = 0 for artefact_url, cited_on, http_status in iterate(consumer, args.n, verbose=args.v, cache=cache): artefact = None for strategy in [strategy_1_pyrequests, strategy_2_wget]: up = urlparse(cited_on) artefact = strategy(db, producer, artefact_url, cited_on, ua=random_ua, referrer=up.hostname, to=args.to) if artefact is not None: print("Successfully retrieved: {}".format(artefact)) n_good += 1 break if artefact is None: n_bad += 1 sleep(5) # 5s delay between requests mongo.close() consumer.close() print("Processed {} artefacts successfully, {} could not be downloaded". format(n_good, n_bad)) return 0
def __init__(self, cipher, bind_addr, via=None, **kwargs): self.cipher = cipher self.bind_addr = bind_addr self.via = via or ViaNamespace(ClientClass=UDPClient) self.removed = None self.kwargs = kwargs def callback(key, value): self.removed = (key, value) self.via_clients = pylru.lrucache(256, callback)
def __init__(self, **kwargs): """ Initializer ARGS: kwargs - MAX_MSG_TYPE_CACHE_SIZE: Maximum size of the LRU cache around msg_types """ _msg_type_cache_size = kwargs.get('MAX_MSG_TYPE_CACHE_SIZE', 1024) self._msg_type_cache = pylru.lrucache(_msg_type_cache_size)
def run(weights, num_bins, capacity=1, lower_bound=-1): """Finds the best feasible upper bound within given limits Keyword arguments: weights -- the set of allowed weights num_bins -- number of bins capacity -- bins capacity Return maximal capacity required (stretching factor = ret / capacity) """ bins = bin_factory(num_bins, capacity) # Keep only feasible weights ws = [] for w in weights: if w <= capacity: ws.append(w) # Sort items by decreasing order of their weights ws.sort(reverse=True) # WARNING: if the order is changed then, feasibility has to be verified # for every item ! cf boolean feasibilityVerified in branch() init_solver(SOLVER, jarpath) if SOLVER == "CHOCO" or SOLVER == "CP": run_jvm(jvmpath, jarpath) # Create cache if cache_size <= 0: d = {} else: d = lrucache(cache_size) if lower_bound < 0: lower_bound = capacity root=TreeNode() root.attr['Name']="Root" val = branch(ws, bins, num_bins*capacity, lower_bound, 26.*capacity/17., d, backtrack=root) root.set_input() #strftime("%Y-%m-%d %H:%M:%S", gmtime()) f = open('backtrack.dot', 'w') f.write(root.dot()) f.close() terminate_solver(SOLVER) """ Memory profiling from guppy import hpy h = hpy() print h.heap() """ return val
def __init__(self, properties): settings = zc.mappingobject.mappingobject(properties) self.cache_size = settings.cache_size self.cache = pylru.lrucache(self.cache_size) self.hitrates = Sample() @properties def changed(*a): if settings.cache_size != self.cache_size: self.cache_size = settings.cache_size self.cache.size(self.cache_size)
def register_cache(cls, key, size=512): """Create a new cache for this entity, keyed by attribute. :param str key: The attribute name to use as a key :param int size: The size of the cache to create :returns None: :raises KeyError: If a cache already exists for `key` """ if key in cls._caches: raise KeyError(joins("entity already has cache:", key)) cls._caches[key] = lrucache(size)
def __init__(self,email="*****@*****.**",user_count=1000): """ email alert email users number of users to track files number of recent file to track_sid threshold for generating an alert bucket = for each time frame to track file writes windows = number of buckets to track, window in seconds = window * bucket """ self.user_cache = pylru.lrucache(user_count) self.logger = logging.getLogger()
def __init__(self, user_agent, auth, delay, fetch_limit, cache_size=0, dry_run=False): self.user_agent = user_agent self.auth = auth self.delay = delay self.fetch_limit = fetch_limit self.cache_size = cache_size self.dry_run = dry_run self.cache = pylru.lrucache(self.cache_size) if self.cache_size > 0 else None self.api_request_delay = 1.0 if self.__is_oauth() else 2.0 self.r = praw.Reddit(self.user_agent, cache_timeout=0, api_request_delay=self.api_request_delay) self.expires = -1 self.__auth()
def __init__(self, basefolder, create=False): self._logger = logging.getLogger(__name__) self.basefolder = os.path.realpath(os.path.abspath(basefolder)) if not os.path.exists(self.basefolder) and create: os.makedirs(self.basefolder) if not os.path.exists(self.basefolder) or not os.path.isdir(self.basefolder): raise RuntimeError("{basefolder} is not a valid directory".format(**locals())) import threading self._metadata_lock = threading.Lock() self._metadata_cache = pylru.lrucache(10)
def register(self, name, controller, config): self.host = config.get(name, 'host') self.url = Kannel.URL % self.host self.name = name self.username = config.get(name, 'username') self.password = config.get(name, 'password') self.controller = controller controller.register_backend(name, self) self.seen = pylru.lrucache(10000)
def start(): # 缓存队列,收到的请求都先放在这里,然后从这里拿数据处理 DNSServer.deq_cache = Queue(maxsize=deq_size) if deq_size > 0 else Queue() # LRU Cache,使用近期最少使用覆盖原则 DNSServer.dns_cache = pylru.lrucache(lru_size) # 启动协程,循环处理缓存队列 gevent.spawn(_init_cache_queue) # 启动DNS服务器 print 'Start DNS server at %s:%d\n' % (ip, port) dns_server = SocketServer.UDPServer((ip, port), DNSHandler) dns_server.serve_forever()
def run_server(confvars): if 'editdir' in confvars: try: for dir in ['wiki', 'wiki.orig']: fdirpath = os.path.join(confvars['editdir'], dir) if not os.path.exists(fdirpath): os.mkdir(fdirpath) except: logging.error("Error setting up directories:") logging.debug("%s must be a writable directory" % confvars['editdir']) blacklistpath = os.path.join(os.path.dirname(confvars['path']), 'template_blacklist') logging.debug("Reading template_blacklist %s" % blacklistpath) blacklist = set() if os.path.exists(blacklistpath): with open(blacklistpath, 'r') as f: for line in f.readlines(): blacklist.add(line.rstrip().decode('utf8')) logging.debug("Read %d blacklisted templates" % len(blacklist)) confvars['templateblacklist'] = blacklist confvars['lang'] = confvars['path'][0:2] confvars['flang'] = os.path.basename(confvars['path'])[0:5] wikidb = WPWikiDB(confvars['path'], confvars['lang'], confvars['templateprefix'], confvars['templateblacklist']) links_cache = pylru.lrucache(10) confvars['xocolor'] = profile.get_color() httpd = MyHTTPServer( ('', confvars['port']), lambda *args: WikiRequestHandler(wikidb, confvars, links_cache, *args)) if confvars['comandline']: httpd.serve_forever() else: from threading import Thread server = Thread(target=httpd.serve_forever) server.setDaemon(True) logging.debug("Before start server") server.start() logging.debug("After start server") # Tell the world that we're ready to accept request. logging.debug('Ready')
def __init__(self, vn=None, fn=None, orig=False): fn = fn or self.FNS[vn] if vn is None and fn in self.FNS: vn = self.FNS.index(fn) self.vn, self.fn, self.orig = vn, fn, orig self.cap, self.fps = self._capFps(fn) self.imgHW = getFrame(self.cap, 0).shape[:2] fnO = re.sub(r" AD\.avi$", ".avi", fn) # original video if os.path.isfile(fnO): self.capO, self.fpsO = self._capFps(fnO) self._setFctrs() else: self.capO = self.fpsO = self.fpsFctr = self.sizeFctr = None self.x, self.y, self.a, self.b, self.theta = self.loadCtraxMatFile(fn, self.cap) self.fiCache = pylru.lrucache(100)
def __init__(self, db, opts, file_name, data_set_name, deflated_text): self.db = db self.opts = opts self.file_name = file_name self.data_set_name = data_set_name if data_set_name == 'crawl': self.collection_name = 'documents' else: self.collection_name = data_set_name self.batcher = MongoBatcher(db, self.collection_name, opts.batch_size) self.cache = pylru.lrucache(10000) self.crawl_url = '' self.crawl_html = [] self._import(deflated_text)
def init(self,X,Y): #assert X.shape[0]==Y.shape[0] self.N = X.shape[0] column_size = self.N*4 cacheMB = self.cache_size*1024*1024 #100MB for cache size #how many kernel colums will be stored in cache cache_items = np.floor(cacheMB/column_size).astype(int) cache_items = min(self.N,cache_items) self.kernel_cache = pylru.lrucache(cache_items) self.X =X self.Y = Y self.compute_diag()
def run_server(confvars): if "editdir" in confvars: try: for dir in ["wiki", "wiki.orig"]: fdirpath = os.path.join(confvars["editdir"], dir) if not os.path.exists(fdirpath): os.mkdir(fdirpath) except: logging.error("Error setting up directories:") logging.debug("%s must be a writable directory" % confvars["editdir"]) blacklistpath = os.path.join(os.path.dirname(confvars["path"]), "template_blacklist") logging.debug("Reading template_blacklist %s" % blacklistpath) blacklist = set() if os.path.exists(blacklistpath): with open(blacklistpath, "r") as f: for line in f.readlines(): blacklist.add(line.rstrip().decode("utf8")) logging.debug("Read %d blacklisted templates" % len(blacklist)) confvars["templateblacklist"] = blacklist confvars["lang"] = confvars["path"][0:2] confvars["flang"] = os.path.basename(confvars["path"])[0:5] wikidb = WPWikiDB(confvars["path"], confvars["lang"], confvars["templateprefix"], confvars["templateblacklist"]) links_cache = pylru.lrucache(10) httpd = MyHTTPServer(("", confvars["port"]), lambda *args: WikiRequestHandler(wikidb, confvars, links_cache, *args)) if confvars["comandline"]: httpd.serve_forever() else: from threading import Thread server = Thread(target=httpd.serve_forever) server.setDaemon(True) logging.debug("Before start server") server.start() logging.debug("After start server") # Tell the world that we're ready to accept request. logging.debug("Ready")