def test_memoize(self): def func(a, b): return a + b + random.randrange(0, 100000) config = get_cache_config('simple') cache = Cache(namespace=self.namespace, **config) cache_key1 = self.cache._memoize_make_cache_key()(func) cache_key2 = cache._memoize_make_cache_key()(func) nt.assert_equal(cache_key1, cache_key2)
def test_timeout(self): config = get_cache_config('simple', CACHE_DEFAULT_TIMEOUT=1) self.cache = Cache(**config) @self.cache.memoize(50) def func(a, b): return a + b + random.randrange(0, 100000) result = func(5, 2) time.sleep(2) nt.assert_equal(func(5, 2), result)
def setup_func(*args, **kwargs): namespace = kwargs.pop('namespace', None) client_name = kwargs.pop('client_name', None) if client_name: CACHE_OPTIONS = kwargs.get('CACHE_OPTIONS', {}) CACHE_OPTIONS['preferred_memcache'] = client_name kwargs['CACHE_OPTIONS'] = CACHE_OPTIONS config = get_cache_config(*args, **kwargs) cache = Cache(namespace=namespace, **config) return cache
def memoize(*args, **kwargs): _cache_type = kwargs.pop('cache_type', 'simple') namespace = kwargs.pop('namespace', DEF_NS) cache_type = get_cache_type() if _cache_type == 'auto' else _cache_type config = merge([MEMOIZE_DEFAULTS, CACHE_CONFIGS[cache_type]]) if 'CACHE_TIMEOUT' in kwargs: config['CACHE_TIMEOUT'] = kwargs.pop('CACHE_TIMEOUT') if 'CACHE_THRESHOLD' in kwargs: config['CACHE_THRESHOLD'] = kwargs.pop('CACHE_THRESHOLD') cache = Cache(namespace=namespace, **config) return cache.memoize(*args, **kwargs)
import random from os import environ from mezmorize import Cache if True: config = { 'DEBUG': True, 'CACHE_TYPE': 'memcached', 'CACHE_MEMCACHED_SERVERS': [environ.get('MEMCACHE_SERVERS')] } else: config = {'CACHE_TYPE': 'simple'} cache = Cache(**config) @cache.memoize(60) def add(a, b): return a + b + random.randrange(0, 1000) @cache.memoize(60) def sub(a, b): return a - b - random.randrange(0, 1000) def delete_cache(): cache.delete_memoized(add) cache.delete_memoized(sub)
from epifilter import filterVideo os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'auth.json' os.environ['AWS_ACCESS_KEY_ID'] = '' os.environ['AWS_SECRET_ACCESS_KEY'] = '' os.environ['AWS_ROLE'] = '' os.environ['AWS_DEFAULT_REGION'] = '' config = Config(retries=dict(max_attempts=1000)) app = Flask(__name__) CORS(app) cache = Cache(CACHE_TYPE='filesystem', CACHE_DIR='cache', CACHE_DEFAULT_TIMEOUT=3600 * 24) def downloadVideo(videoCode): ydl_opts = {"format": "135", 'outtmpl': f'{videoCode}.%(ext)s'} with youtube_dl.YoutubeDL(ydl_opts) as ydl: ydl.download([f'https://www.youtube.com/watch?v={videoCode}']) ydl_opts = {"format": "251", 'outtmpl': f'{videoCode}.%(ext)s'} with youtube_dl.YoutubeDL(ydl_opts) as ydl: ydl.download([f'https://www.youtube.com/watch?v={videoCode}']) def extractFrames(videoCode, n): try:
DATETIME_FORMAT = '{0} %H:%M:%S'.format(DATE_FORMAT) URL_SAFE = "%/:=&?~#+!$,;'@()*[]" ALTERNATIVE_DATE_FORMATS = ( "%m-%d-%Y", "%m/%d/%y", "%m/%d/%Y", "%m-%d-%y", "%Y-%m-%dt%H:%M:%Sz", # todo more: whatever Yahoo can accept ) # leave option to substitute with multiprocessing _map_func = imap combine_dicts = lambda *d: dict(chain.from_iterable(imap(dict.iteritems, d))) cache = Cache(**cache_config) timeout = 60 * 60 * 1 class Objectify: def __init__(self, **entries): self.__dict__.update(entries) def __iter__(self): return self.__dict__.itervalues() def iteritems(self): return self.__dict__.iteritems() def _apply_func(funcs, items, map_func=starmap):
def memoize(*args, **kwargs): cache_type = kwargs.pop('cache_type', 'simple') cache = Cache(**CACHE_CONFIGS[cache_type]) return cache.memoize(*args, **kwargs)
def set_cache(cache_config): global CACHE CACHE = Cache(**cache_config)
'memcached': { 'DEBUG': True, 'CACHE_TYPE': 'memcached', 'CACHE_MEMCACHED_SERVERS': [environ.get('MEMCACHE_SERVERS')] } } return CONFIGS[cache_type] def set_cache(cache_config): global CACHE CACHE = Cache(**cache_config) CACHE = Cache(**get_cache_config()) # http://api.stackexchange.com/2.2/tags? # page=1&pagesize=100&order=desc&sort=popular&site=stackoverflow # http://api.stackexchange.com/2.2/tags? # page=1&pagesize=100&order=desc&sort=popular&site=graphicdesign def memoize(*args, **kwargs): return CACHE.memoize(*args, **kwargs) def remove_keys(content, *args): """ Remove keys from a dict and return new dict Args: content (dict): The dict to remove keys from
import subprocess import os from mezmorize import Cache import pandas as pd import json import requests from concurrent.futures import ThreadPoolExecutor, as_completed cache = Cache(DEBUG=True, CACHE_TYPE='filesystem', CACHE_DIR='cache', CACHE_THRESHOLD=1024*1024, CACHE_DEFAULT_TIMEOUT=9999) def get_products(query, count=50): params = ["products", "--keyword=" + query, "-n=" + str(count)] products_dict = run_scraper(params) products_df = pd.json_normalize(products_dict) adjust_reviews(products_df) products_df['title'] = products_df['title'].str.lstrip("Sponsored Ad - ") return products_df @cache.memoize() def run_scraper(parameters): print("Running scraper: ", parameters) default_param = ["node", "amazon-product-api/bin/cli.js", "--random-ua", "--filetype=json"] output = subprocess.check_output(default_param + parameters, universal_newlines=True) filename = output.split("was saved to: ")[1].rstrip() + ".json" with open(filename) as f: result = json.load(f) os.remove(filename) return result def isfloat(value):