コード例 #1
0
def movie(trakt_slug, people_needed=False):
    cache_key = 'movie-{0}'.format(trakt_slug)
    cache_init_func = functools.partial(__movie, trakt_slug=trakt_slug)
    cache_init_exception_handler = network.http_get_init_exception_handler
    cache_init_failure_handler = network.http_get_init_failure_handler
    cache_update_func = functools.partial(__movie,
                                          trakt_slug=trakt_slug,
                                          timeout=network.TIMEOUT_CONNECT)
    cache_data_expiration = cache.WEEK

    result = cache.cache(cache_key, cache_init_func,
                         cache_init_exception_handler,
                         cache_init_failure_handler, cache_update_func,
                         cache_data_expiration)

    if people_needed:
        cache_key = 'movie-{0}-people'.format(trakt_slug)
        cache_init_func = functools.partial(__movie_people,
                                            trakt_slug=trakt_slug)
        cache_init_exception_handler = network.http_get_init_exception_handler
        cache_init_failure_handler = network.http_get_init_failure_handler
        cache_update_func = functools.partial(__movie_people,
                                              trakt_slug=trakt_slug,
                                              timeout=network.TIMEOUT_CONNECT)
        cache_data_expiration = cache.WEEK

        result['people'] = cache.cache(cache_key, cache_init_func,
                                       cache_init_exception_handler,
                                       cache_init_failure_handler,
                                       cache_update_func,
                                       cache_data_expiration)

    return result
コード例 #2
0
def show(trakt_slug, seasons_needed=False):
    cache_key = 'show-{0}'.format(trakt_slug)
    cache_init_func = functools.partial(__show, trakt_slug=trakt_slug)
    cache_init_exception_handler = network.http_get_init_exception_handler
    cache_init_failure_handler = network.http_get_init_failure_handler
    cache_update_func = functools.partial(__show,
                                          trakt_slug=trakt_slug,
                                          timeout=network.TIMEOUT_CONNECT)
    cache_data_expiration = cache.DAY

    result = cache.cache(cache_key, cache_init_func,
                         cache_init_exception_handler,
                         cache_init_failure_handler, cache_update_func,
                         cache_data_expiration)

    if seasons_needed:
        cache_key = 'show-{0}-seasons'.format(trakt_slug)
        cache_init_func = functools.partial(__show_seasons,
                                            trakt_slug=trakt_slug,
                                            show_info=result)
        cache_init_exception_handler = network.http_get_init_exception_handler
        cache_init_failure_handler = network.http_get_init_failure_handler
        cache_update_func = functools.partial(__show_seasons,
                                              trakt_slug=trakt_slug,
                                              show_info=result,
                                              timeout=network.TIMEOUT_CONNECT)
        cache_data_expiration = cache.DAY

        result['seasons'] = cache.cache(cache_key, cache_init_func,
                                        cache_init_exception_handler,
                                        cache_init_failure_handler,
                                        cache_update_func,
                                        cache_data_expiration)

    return result
コード例 #3
0
ファイル: assembly.py プロジェクト: grahamfinney/ybd
def assemble(target):
    '''Assemble dependencies and contents recursively until target exists.'''
    if cache.get_cache(target):
        return

    defs = Definitions()
    this = defs.get(target)

    with app.timer(this, 'Starting assembly'):
        with sandbox.setup(this):
            for it in this.get('build-depends', []):
                dependency = defs.get(it)
                assemble(dependency)
                sandbox.install(this, dependency)

            for it in this.get('contents', []):
                component = defs.get(it)
                if component.get('build-mode') == 'bootstrap':
                    continue
                assemble(component)
                sandbox.install(this, component)

            if this.get('build-mode') != 'bootstrap':
                sandbox.ldconfig(this)
            else:
                app.log(this, "No ldconfig because bootstrap mode is engaged")

            build(this)

            if this.get('devices'):
                sandbox.create_devices(this)
            cache.cache(this)
def main():
    """
    entry point
    """
    url = ('https://en.wikipedia.org/wiki/'
           'Category:Newspapers_published_in_Kansas')
    obj = cache(url)
    parser = WikipediaCatParser()
    strd = str(obj)
    parser.feed(strd)

    for idx in parser.index:
        obj = parser.index[idx]
        for field in ("page", 'siteurl'):
            if field in obj:
                val = obj[field]
                if val:
                    obj2 = cache(val)
                    p=WikipediaPageParser()
                    p.feed(str(obj2))
                    p.obj['url']=val
                    #print("FOUND:%s" % str(p.obj))


    output = open('Category_Newspapers_published_in_Kansas.yaml', 'w')
    output.write(yaml.dump(parser.index, indent=4, default_flow_style=False))
コード例 #5
0
ファイル: assembly.py プロジェクト: mwilliams-ct/ybd
def do_build(defs, component):
    app.config['counter'].increment()
    with app.timer(component, 'build of %s' % component['cache']):
        build(defs, component)

    with app.timer(component, 'artifact creation'):
        do_manifest(component)
        cache(defs, component)
コード例 #6
0
ファイル: assembly.py プロジェクト: locallycompact/ybd
def do_build(defs, component):
    app.config['counter'].increment()
    with app.timer(component, 'build of %s' % component['cache']):
        build(defs, component)

    with app.timer(component, 'artifact creation'):
        do_manifest(component)
        cache(defs, component)
コード例 #7
0
ファイル: assembly.py プロジェクト: nowster/ybd
def assemble(defs, target):
    '''Assemble dependencies and contents recursively until target exists.'''

    if cache.get_cache(defs, target):
        # needed for artifact splitting
        load_manifest(defs, target)
        return cache.cache_key(defs, target)

    random.seed(datetime.datetime.now())
    component = defs.get(target)

    if component.get('arch') and component['arch'] != app.config['arch']:
        app.log(target, 'Skipping assembly for', component.get('arch'))
        return None

    def assemble_system_recursively(system):
        assemble(defs, system['path'])

        for subsystem in system.get('subsystems', []):
            assemble_system_recursively(subsystem)

    with app.timer(component, 'assembly'):
        sandbox.setup(component)

        systems = component.get('systems', [])
        random.shuffle(systems)
        for system in systems:
            assemble_system_recursively(system)

        dependencies = component.get('build-depends', [])
        random.shuffle(dependencies)
        for it in dependencies:
            dependency = defs.get(it)
            assemble(defs, dependency)
            sandbox.install(defs, component, dependency)

        contents = component.get('contents', [])
        random.shuffle(contents)
        for it in contents:
            subcomponent = defs.get(it)
            if subcomponent.get('build-mode') != 'bootstrap':
                assemble(defs, subcomponent)
                splits = None
                if component.get('kind') == 'system':
                    splits = subcomponent.get('artifacts')
                sandbox.install(defs, component, subcomponent, splits)

        app.config['counter'] += 1
        if 'systems' not in component:
            with app.timer(component, 'build'):
                build(defs, component)
        with app.timer(component, 'artifact creation'):
            do_manifest(defs, component)
            cache.cache(defs, component,
                        full_root=component.get('kind') == "system")
        sandbox.remove(component)

    return cache.cache_key(defs, component)
コード例 #8
0
ファイル: query.py プロジェクト: andrew-w-sherman/eduwiki
def info(topic):
    name = topic+" info"
    fetched = fetch(name)
    if fetched:
        return fetched
    art = wiki.search(topic)
    info = {}
    info['name'] = art.title
    info['text'] = art.sections[0].string
    cache(name, info)
    return info
コード例 #9
0
ファイル: merge.py プロジェクト: h4ck3rm1k3/letter-to-editor
def index_object(filename, data,i):
    d = data[i]
    for k in d.keys() :
        if k not in (
            'A',
            'C',
            'contact_page',
            'F', 
            'facebook', 
            'ksa_site', 
            'named' ,
            'src_url', 
            'T', 
            'twitter', 
            'user_forum', 
            'V', 
            'W', 
            'website', 
            'Website', 
            'wikipedia'):
            continue
        v = d[k]

        if not isinstance(v,str):
            continue

        if v.startswith('No Website') :
            continue

        v = v.strip().rstrip()
        v = v.rstrip("/")
        v = v.replace("http://","")
        v = v.replace("https://","")

        # strip
        if not v:
            continue
        if v == '':
            continue
        if v == "NONE":
            continue
        if v[0] == "?":
            continue

        if v not in index:
            index[v]={}
        if k not in index[v]:

            # now lets make sure we can cache this all
            cache.cache("http://%s" %v)

            index[v][k]=[]
            print ("adding key:'%s' val'%s'" % (k, v))
            index[v][k].append(d)
コード例 #10
0
ファイル: query.py プロジェクト: andrew-w-sherman/eduwiki
def prereqs(topic):
    name = topic+" prereqs"
    fetched = fetch(name)
    if fetched:
        return fetched
    art = wiki.search(topic)
    reqs = getReqs(art)
    for req in reqs:
        review(req) # without grabbing return, works as pure caching function
    cache(name, reqs)
    return reqs
コード例 #11
0
ファイル: assembly.py プロジェクト: rdale/ybd
def build(defs, component):
    '''Create an artifact for a single component and add it to the cache'''

    with claim(defs, component):
        app.config['counter'].increment()
        with app.timer(component, 'build of %s' % component['cache']):
            run_build(defs, component)

        with app.timer(component, 'artifact creation'):
            do_manifest(component)
            cache(defs, component)
コード例 #12
0
ファイル: analysis.py プロジェクト: dostos/nct_dav_20fall
def get_cached_count_dicts(field: str,
                           key_column: str) -> Dict[str, Dict[str, int]]:
    if len(list(
            Path(f'{CACHED_DIR}/{field}/{key_column}').glob('*.json'))) == 0:
        cache(field, key_column, reset=True)
    key_count_dict = {}
    for cached_filepath in Path(f'{CACHED_DIR}/{field}/{key_column}').glob(
            '*.json'):
        key = cached_filepath.stem
        with cached_filepath.open('r') as f:
            key_count_dict[key] = json.load(f)
    return key_count_dict
コード例 #13
0
ファイル: query.py プロジェクト: andrew-w-sherman/eduwiki
def quiz(topic):
    name = topic+" quiz"
    fetched = fetch(name)
    if fetched:
        return fetched
    art = wiki.search(topic)
    quiz = {}
    quiz['name'] = art.title
    quiz['description'] = getDescription(art)
    quiz['distractors'] = getDists(art)
    quiz['prereqs'] = getReqs(art)
    cache(name, quiz)
    return quiz
コード例 #14
0
ファイル: assembly.py プロジェクト: padrigali/ybd
def assemble(defs, target):
    '''Assemble dependencies and contents recursively until target exists.'''

    component = defs.get(target)
    if get_cache(defs, component) or get_remote(defs, component):
        return cache_key(defs, component)

    random.seed(datetime.datetime.now())

    if component.get('arch') and component['arch'] != app.config['arch']:
        app.log(target, 'Skipping assembly for', component.get('arch'))
        return None

    sandbox.setup(component)

    systems = component.get('systems', [])
    random.shuffle(systems)
    for system in systems:
        assemble(defs, system['path'])
        for subsystem in system.get('subsystems', []):
            assemble(defs, subsystem)

    dependencies = component.get('build-depends', [])
    for it in dependencies:
        preinstall(defs, component, it)

    contents = component.get('contents', [])
    random.shuffle(contents)
    for it in contents:
        subcomponent = defs.get(it)
        if subcomponent.get('build-mode', 'staging') != 'bootstrap':
            preinstall(defs, component, subcomponent)

    if 'systems' not in component:
        if is_building(defs, component):
            import time
            time.sleep(10)
            raise Exception

        app.config['counter'] += 1
        if not get_cache(defs, component):
            with app.timer(component, 'build of %s' % component['cache']):
                with claim(defs, component):
                    build(defs, component)

    with app.timer(component, 'artifact creation'):
        do_manifest(component)
        cache(defs, component)
    sandbox.remove(component)

    return cache_key(defs, component)
コード例 #15
0
ファイル: query.py プロジェクト: andrew-w-sherman/eduwiki
def review(topic):
    name = topic+" review"
    fetched = fetch(name)
    if fetched:
        return fetched
    art = wiki.search(topic)
    rev = {}
    if art.disambiguation:
        raise DisambiguationError()
    rev['name'] = art.title
    rev['description'] = getDescription(art)
    rev['distractors'] = getDists(art)
    cache(name, rev)
    return rev
コード例 #16
0
ファイル: assembly.py プロジェクト: leeming/ybd
def build(defs, component):
    '''Create an artifact for a single component and add it to the cache'''

    if get_cache(defs, component):
        return

    with claim(defs, component):
        if component.get('kind', 'chunk') == 'chunk':
            install_dependencies(defs, component)
        with timer(component, 'build of %s' % component['cache']):
            run_build(defs, component)

        with timer(component, 'artifact creation'):
            write_metadata(defs, component)
            cache(defs, component)
コード例 #17
0
ファイル: assembly.py プロジェクト: leeming/ybd
def build(defs, component):
    '''Create an artifact for a single component and add it to the cache'''

    if get_cache(defs, component):
        return

    with claim(defs, component):
        if component.get('kind', 'chunk') == 'chunk':
            install_dependencies(defs, component)
        with timer(component, 'build of %s' % component['cache']):
            run_build(defs, component)

        with timer(component, 'artifact creation'):
            write_metadata(defs, component)
            cache(defs, component)
コード例 #18
0
def transfer_values_cache(cache_path, model, images=None, image_paths=None):
    """
    This function either loads the transfer-values if they have
    already been calculated, otherwise it calculates the values
    and saves them to a file that can be re-loaded again later.
    Because the transfer-values can be expensive to compute, it can
    be useful to cache the values through this function instead
    of calling transfer_values() directly on the Inception model.
    See Tutorial #08 for an example on how to use this function.
    :param cache_path:
        File containing the cached transfer-values for the images.
    :param model:
        Instance of the Inception model.
    :param images:
        4-dim array with images. [image_number, height, width, colour_channel]
    :param image_paths:
        Array of file-paths for images (must be jpeg-format).
    :return:
        The transfer-values from the Inception model for those images.
    """

    # Helper-function for processing the images if the cache-file does not exist.
    # This is needed because we cannot supply both fn=process_images
    # and fn=model.transfer_values to the cache()-function.
    def fn():
        return process_images(fn=model.transfer_values,
                              images=images,
                              image_paths=image_paths)

    # Read the transfer-values from a cache-file, or calculate them if the file does not exist.
    transfer_values = cache(cache_path=cache_path, fn=fn)

    return transfer_values
コード例 #19
0
def main(args=None, axes=None):
    if args is not None:
        ARGS = args
    steps, metrics = cache(ARGS)

    # create plot
    print(">> Plotting...")
    plt.rcParams["font.size"] = 18
    if axes is None:
        fig, axes = plt.subplots(figsize=(15, 15))

    if "performance" in metrics.keys():
        performance_plot(axes, steps, metrics["performance"])
    elif "network" in ARGS.viz:
        network_plot(ARGS, axes, metrics["empirical"])
    else:
        empirical_plot(ARGS, axes, metrics)

    if ARGS.legend:
        axes.legend()

    # save plot
    if ARGS.plot_dir is None:
        plot_path = f"{ARGS.save_dir}/{ARGS.experiment}/{ARGS.expid}/img"
    else:
        plot_path = f"{ARGS.plot_dir}/img"
    helper.makedir_quiet(plot_path)
    plot_file = f"{plot_path}/{ARGS.viz}{ARGS.suffix}.pdf"
    plt.savefig(plot_file)
    print(f">> Saving figure to {plot_file}")
コード例 #20
0
ファイル: datacenter.py プロジェクト: Fangui/hash_code
    def __init__(self, path):
        f = open(path)
        self.V, self.E, self.R, self.C, self.X = f.readline().split(' ')
        self.V, self.E, self.R, self.C, self.X = int(self.V), int(self.E), int(
            self.R), int(self.C), int(self.X)
        self.videos = []
        self.vidreq = [(0, 0)] * self.V  # (id vid, nbreq)
        self.endPoints = []
        self.requests = []

        self.caches = []
        for i in range(self.C):
            self.caches.append(cache.cache(self.X))

        vi = f.readline().split(' ')
        for i in range(len(vi)):
            self.videos.append(video.video(int(vi[i]), i, 0))

        for i in range(self.E):
            Ld, K = f.readline().split(' ')
            Ld, K = int(Ld), int(K)
            e = endPoint.endPoint(Ld)

            for j in range(K):
                c, Lc = f.readline().split(' ')
                e.addCacheLat(self.caches[int(c)], int(Lc))
            self.endPoints.append(e)

        for i in range(self.R):
            Rv, Re, Rn = f.readline().split(' ')
            self.requests.append(request.request(int(Rv), int(Re), int(Rn)))

        for r in self.requests:
            self.videos[r.idVid].req += r.nbCall

        Length = len(self.videos)
        k = 0
        while k < Length:
            if self.videos[k].req == 0:
                self.videos.pop(k)
                Length -= 1
            else:
                k += 1

        self.videos = sorted(self.videos, key=lambda v: v.size / v.req)

        for e in self.endPoints:
            e.savingLat()

        for e in self.endPoints:
            for c in e.list:
                k = 0
                while k < Length:
                    if c[0].capacity >= c[0].size + self.videos[k].size:
                        c[0].addVideo(self.videos.pop(k))
                        Length -= 1
                    else:
                        k += 1

        f.close()
コード例 #21
0
def trove(name) :
    headers = {}
    key= "wpt2_" + name 
    #print key
    f=lambda : fetch(name)
#    cache.delcache(key)
    return cache.cache(key,f)
コード例 #22
0
ファイル: bloattrack.py プロジェクト: Esteban-Rocha/digsby
def get_win32_symbols(filepath):
    assert os.path.isfile(SIZER_EXE)
    return cache(
        lambda: run(SIZER_EXE, filepath),
        dirname="symanalysis",
        hashelems=("get_win32_symbols", file_contents(SIZER_EXE), file_contents(filepath)),
    )["val"]
コード例 #23
0
def transfer_values_cache(cache_path, model, images=None, image_paths=None):
    """
    This function either loads the transfer-values if they have
    already been calculated, otherwise it calculates the values
    and saves them to a file that can be re-loaded again later.
    Because the transfer-values can be expensive to compute, it can
    be useful to cache the values through this function instead
    of calling transfer_values() directly on the Inception model.
    See Tutorial #08 for an example on how to use this function.
    :param cache_path:
        File containing the cached transfer-values for the images.
    :param model:
        Instance of the Inception model.
    :param images:
        4-dim array with images. [image_number, height, width, colour_channel]
    :param image_paths:
        Array of file-paths for images (must be jpeg-format).
    :return:
        The transfer-values from the Inception model for those images.
    """

    # Helper-function for processing the images if the cache-file does not exist.
    # This is needed because we cannot supply both fn=process_images
    # and fn=model.transfer_values to the cache()-function.
    def fn():
        return process_images(fn=model.transfer_values, images=images, image_paths=image_paths)

    # Read the transfer-values from a cache-file, or calculate them if the file does not exist.
    transfer_values = cache(cache_path=cache_path, fn=fn)

    return transfer_values
コード例 #24
0
ファイル: coco.py プロジェクト: samvram/RNN_Caption_Generator
def load_records(train=True):
    """
    Load the data-records for the data-set. This returns the image ids,
    filenames and text-captions for either the training-set or validation-set.
    
    This wraps _load_records() above with a cache, so if the cache-file already
    exists then it is loaded instead of processing the original data-file.
    
    :param train:
        Bool whether to load the training-set (True) or validation-set (False).
    :return: 
        ids, filenames, captions for the images in the data-set.
    """

    if train:
        # Cache-file for the training-set data.
        cache_filename = "records_train.pkl"
    else:
        # Cache-file for the validation-set data.
        cache_filename = "records_val.pkl"

    # Path for the cache-file.
    cache_path = os.path.join(data_dir, cache_filename)

    # If the data-records already exist in a cache-file then load it,
    # otherwise call the _load_records() function and save its
    # return-values to the cache-file so it can be loaded the next time.
    records = cache(cache_path=cache_path, fn=_load_records, train=train)

    return records


########################################################################
コード例 #25
0
def load_cached(cache_path, in_dir):

    print("Criando conjuntos de dados a partir dos arquivos em: " + in_dir)

    dataset = cache(cache_path=cache_path, fn=DataSet, in_dir=in_dir)

    return dataset
コード例 #26
0
def load_cached(cache_path, in_dir):
    """
    이미 존재하면 캐쉬 파일을 불러오고, 그렇지 않으면 새로운 객체를 만들고 캐쉬파일에 저장하는
    데이터셋 객체를 만들기 위한 Wrapper 함수

    만약 파일명의 이름이 매번 데이터셋을 불러올 때 일치하는 것을 보장하기를 원한다면 유용하다
    예를 들면, 다른 캐쉬파일에 저장된 변환 값을 갖는 결합 데이터셋 객체를 사용하는 경우,
    e.g. 튜토리얼 #09 에서 이 예제를 볼 수 있다.

    :param cache_path:
        캐쉬파일에 대한 경로

    :param in_dir:
        데이터셋 안에 파일들에 대한 최상위 경로.
        데이터셋 init 메소드에 대한 인자

    :return:
        데이터셋 객체
    """

    print("Creating dataset from the files in: " + in_dir)

    # 만약 DataSet(in_dir=data_dir)의 객체 인스턴스가 이미 캐쉬파일안에 존재한다면 불러오고,
    # 그렇지 않다면 객체 인스턴스를 만들고 다음번을 위해 캐쉬파일에 저장한다
    dataset = cache(cache_path=cache_path,
                    fn=DataSet, in_dir=in_dir)

    return dataset
コード例 #27
0
ファイル: execute.py プロジェクト: dilip640/RISC-V-Simulator
 def __init__(self):
     self.RegisterFile = register()
     self.Memory = cache()
     self.sp = 0x7ffffffc
     self.PC = 0
     self.IR = 0
     self.total_control_ins = 0
コード例 #28
0
def trove(name):
    headers = {}
    key = "wpt2_" + name
    #print key
    f = lambda: fetch(name)
    #    cache.delcache(key)
    return cache.cache(key, f)
コード例 #29
0
def load_cached(cache_path, in_dir):
    """
    Wrapper-function for creating a DataSet-object, which will be
    loaded from a cache-file if it already exists, otherwise a new
    object will be created and saved to the cache-file.

    This is useful if you need to ensure the ordering of the
    filenames is consistent every time you load the data-set,
    for example if you use the DataSet-object in combination
    with Transfer Values saved to another cache-file, see e.g.
    Tutorial #09 for an example of this.

    :param cache_path:
        File-path for the cache-file.

    :param in_dir:
        Root-dir for the files in the data-set.
        This is an argument for the DataSet-init function.

    :return:
        The DataSet-object.
    """

    print("Creating dataset from the files in: " + in_dir)

    # If the object-instance for DataSet(in_dir=data_dir) already
    # exists in the cache-file then reload it, otherwise create
    # an object instance and save it to the cache-file for next time.
    dataset = cache(cache_path=cache_path, fn=DataSet, in_dir=in_dir)

    return dataset


########################################################################
コード例 #30
0
def load_cached(cache_path, in_dir):
    """
    Wrapper-function for creating a DataSet-object, which will be
    loaded from a cache-file if it already exists, otherwise a new
    object will be created and saved to the cache-file.

    This is useful if you need to ensure the ordering of the
    filenames is consistent every time you load the data-set,
    for example if you use the DataSet-object in combination
    with Transfer Values saved to another cache-file, see e.g.
    Tutorial #09 for an example of this.

    :param cache_path:
        File-path for the cache-file.

    :param in_dir:
        Root-dir for the files in the data-set.
        This is an argument for the DataSet-init function.

    :return:
        The DataSet-object.
    """

    print("Creating dataset from the files in: " + in_dir)

    # If the object-instance for DataSet(in_dir=data_dir) already
    # exists in the cache-file then reload it, otherwise create
    # an object instance and save it to the cache-file for next time.
    dataset = cache(cache_path=cache_path,
                    fn=DataSet, in_dir=in_dir)

    return dataset
コード例 #31
0
def load_records(train=True):
    """
    Load the data-records for the data-set. This returns the image ids,
    filenames and text-captions for either the training-set or validation-set.
    
    This wraps _load_records() above with a cache, so if the cache-file already
    exists then it is loaded instead of processing the original data-file.
    
    :param train:
        Bool whether to load the training-set (True) or validation-set (False).

    :return: 
        ids, filenames, captions for the images in the data-set.
    """

    if train:
        # Cache-file for the training-set data.
        cache_filename = "records_train.pkl"
    else:
        # Cache-file for the validation-set data.
        cache_filename = "records_val.pkl"

    # Path for the cache-file.
    cache_path = os.path.join(data_dir, cache_filename)

    # If the data-records already exist in a cache-file then load it,
    # otherwise call the _load_records() function and save its
    # return-values to the cache-file so it can be loaded the next time.
    records = cache(cache_path=cache_path,
                    fn=_load_records,
                    train=train)

    return records
コード例 #32
0
ファイル: assembly.py プロジェクト: devcurmudgeon/cida
def assemble(defs, target):
    '''Assemble dependencies and contents recursively until target exists.'''

    if cache.get_cache(defs, target):
        return cache.cache_key(defs, target)

    component = defs.get(target)

    if component.get('arch') and component['arch'] != app.settings['arch']:
        app.log(target, 'Skipping assembly for', component.get('arch'))
        return None

    def assemble_system_recursively(system):
        assemble(defs, system['path'])
        for subsystem in system.get('subsystems', []):
            assemble_system_recursively(subsystem)

    with app.timer(component, 'Starting assembly'):
        sandbox.setup(component)
        for system_spec in component.get('systems', []):
            assemble_system_recursively(system_spec)

        dependencies = component.get('build-depends', [])
        random.shuffle(dependencies)
        for it in dependencies:
            dependency = defs.get(it)
            assemble(defs, dependency)
            sandbox.install(defs, component, dependency)

        contents = component.get('contents', [])
        random.shuffle(contents)
        for it in contents:
            subcomponent = defs.get(it)
            if subcomponent.get('build-mode') != 'bootstrap':
                assemble(defs, subcomponent)
                sandbox.install(defs, component, subcomponent)

        app.settings['counter'] += 1
        if 'systems' not in component:
            build(defs, component)
        do_manifest(component)
        cache.cache(defs, component,
                    full_root=component.get('kind') == "system")
        sandbox.remove(component)

    return cache.cache_key(defs, component)
コード例 #33
0
ファイル: core.py プロジェクト: santigr17/multicore-simulator
 def __init__(self, coreID, bus, clock, update):
     self.ID = coreID
     self.myBus = bus
     self.update = update
     self.myCache = cache.cache(coreID, update)
     self.controller = cacheController.controller(self.myCache, self.myBus, self.change_state, update, coreID)
     self.myBus.add_ctrl(self.controller)
     self.processor = core.processor(coreID, clock, self.controller, update)
コード例 #34
0
ファイル: wazuhtoken.py プロジェクト: wazuh/wazuh-splunk
 def __init__(self):
     """Constructor."""
     try:
         self.logger = log()
         self.session = requestsbak.Session()
         self.session.trust_env = False
         self.cache = cache()
     except Exception as e:
         self.logger.error("token: Error in token module constructor: %s" % (e))
コード例 #35
0
def transfer_values_cache(cache_path, model, images=None, image_paths=None):
    def fn():
        return process_images(fn=model.transfer_values,
                              images=images,
                              image_paths=image_paths)

    transfer_values = cache(cache_path=cache_path, fn=fn)

    return transfer_values
コード例 #36
0
def load_weight(filenames, captions):
    cache_filename = 'weight.pkl'
    cache_path = os.path.join(dir_path, cache_filename)

    weight = cache(cache_path=cache_path,
                   fn=_tf_idf,
                   filenames=filenames,
                   captions=captions)
    return weight
コード例 #37
0
ファイル: trakt.py プロジェクト: abisiaux/scrapyard
def show_season(trakt_slug, season_index):
    cache_key                    = 'show-{0}-{1}'.format(trakt_slug, season_index)
    cache_init_func              = functools.partial(__show_season, trakt_slug=trakt_slug, season_index=season_index)
    cache_init_exception_handler = network.http_get_init_exception_handler
    cache_init_failure_handler   = network.http_get_init_failure_handler
    cache_update_func            = functools.partial(__show_season, trakt_slug=trakt_slug, season_index=season_index, timeout=network.TIMEOUT_CONNECT)
    cache_data_expiration        = cache.HOUR * 6

    return cache.cache(cache_key, cache_init_func, cache_init_exception_handler, cache_init_failure_handler, cache_update_func, cache_data_expiration)
コード例 #38
0
ファイル: trakt.py プロジェクト: abisiaux/scrapyard
def shows_search(query):
    cache_key                    = 'shows-search-{0}'.format(query)
    cache_init_func              = functools.partial(__shows_search, query=query)
    cache_init_exception_handler = network.http_get_init_exception_handler
    cache_init_failure_handler   = network.http_get_init_failure_handler
    cache_update_func            = functools.partial(__shows_search, query=query, timeout=network.TIMEOUT_CONNECT)
    cache_data_expiration        = cache.HOUR

    return cache.cache(cache_key, cache_init_func, cache_init_exception_handler, cache_init_failure_handler, cache_update_func, cache_data_expiration)
コード例 #39
0
def load_vector(filenames, captions):
    cache_filename = "data_vector.pkl"
    cache_path = os.path.join(dir_path, cache_filename)

    vectors = cache(cache_path=cache_path,
                    fn=_load_vector,
                    filenames=filenames,
                    captions=captions)
    return vectors
コード例 #40
0
def process_images_train():        #定义处理训练集图片的函数
    print("Processing {0} images in training-set ...".format(len(filenames_train)))
    # 缓存文件的路径
    cache_path = os.path.join(coco.data_dir, 'inception_coco_train.pkl')
    #如果缓存存在的话我们就直接加载它,这样可以更快速的进入模型
    transfer_values = cache(cache_path=cache_path,
                            fn=process_images,
                            data_dir=coco.train_dir,
                            filenames=filenames_train)
    return transfer_values
コード例 #41
0
ファイル: assembly.py プロジェクト: jjardon/ybd
def build(dn):
    '''Create an artifact for a single component and add it to the cache'''

    if get_cache(dn):
        return

    with claim(dn):
        if dn.get('kind', 'chunk') == 'chunk':
            install_dependencies(dn)
        with timer(dn, 'build of %s' % dn['cache']):
            run_build(dn)

        with timer(dn, 'artifact creation'):

            if dn.get('kind', 'chunk') == 'system':
                install_split_artifacts(dn)

            write_metadata(dn)
            cache(dn)
コード例 #42
0
ファイル: assembly.py プロジェクト: JanderJLR/ybd
def build(dn):
    '''Create an artifact for a single component and add it to the cache'''

    if get_cache(dn):
        return

    with claim(dn):
        if dn.get('kind', 'chunk') == 'chunk':
            install_dependencies(dn)
        with timer(dn, 'build of %s' % dn['cache']):
            run_build(dn)

        with timer(dn, 'artifact creation'):

            if dn.get('kind', 'chunk') == 'system':
                install_split_artifacts(dn)

            write_metadata(dn)
            cache(dn)
コード例 #43
0
def process_link(l):
    f_name_link = l.get("href")
    f_name = l.text.strip()

    callit = lambda: extract_links(f_name_link)
    #           cache.delcache("wp"+ f_name)
    extract = cache.cache("wp" + f_name, callit)

    (name, party, state) = parse_name(f_name)
    (link, link2) = parse_link(f_name_link)

    ####
    callit = lambda: runtrove(name)
    #            troveid = runtrove(name)
    #            cache.delcache("wpt"+ f_name)
    troveid = cache.cache("wpt" + f_name, callit)

    wiki = scan_links(extract, l, f_name_link, f_name, name, party, state,
                      link, link2, troveid)
コード例 #44
0
def load_records(train=True):
    if train:
        cache_filename = "records_train.pkl"
    else:
        cache_filename = "records_val.pkl"

    cache_path = os.path.join(data_dir, cache_filename)
    records = cache(cache_path=cache_path, fn=_load_records, train=train)

    return records
コード例 #45
0
def load_cached(cache_path, in_dir):

    print("Creating dataset from the files in: " + in_dir)

    # If the object-instance for DataSet(in_dir=data_dir) already
    # exists in the cache-file then reload it, otherwise create
    # an object instance and save it to the cache-file for next time.
    dataset = cache(cache_path=cache_path, fn=DataSet, in_dir=in_dir)

    return dataset
コード例 #46
0
 def __init__(self, remoteShell, domainAdmin="admin", domain=None):
     self.remoteShell = remoteShell
     self.vastoolPath = "/opt/quest/bin/vastool"     
     self.domainAdmin = domainAdmin
     self.defaultDomain = domain
     
     self.info = info.info(self.run)
     self.flush = flush.flush(self.run)
     self.create = create.create(self.run, self.defaultDomain)
     self.delete = delete.delete(self.run)
     self.timesync = timesync.timesync(self.run)
     self.nss = nss.nss(self.run)
     self.group = group.group(self.run)
     self.isvas = isvas.isvas(self.run)
     self.list = list.list(self.run)
     self.auth = auth.auth(self.run, self.defaultDomain)
     self.cache = cache.cache(self.run)
     self.configure = configure.configure(self.run)
     self.configureVas = configureVas.configureVas(self.run)
     self.schema = schema.schema(self.run)
     self.merge = merge.merge(self.run)
     self.unmerge = unmerge.unmerge(self.run)
     self.user = User.user(self.run)
     self.ktutil = ktutil.ktutil(self.run)
     self.load = load.load(self.run)
     self._license = License.License(self.run)
     self.License = self._license.License
     self.parseLicense = self._license.parseLicense
     self.compareLicenses = self._license.compareLicenses
     #self.vasUtilities = vasUtilities.vasUtilities(self.remoteShell)
     self.unconfigure = unconfigure.unconfigure(self.run)
     self.nssdiag = nssdiag(self.run)
     
     isinstance(self.info, info.info)
     isinstance(self.flush, flush.flush)
     isinstance(self.create, create.create)
     isinstance(self.delete, delete.delete)
     isinstance(self.timesync, timesync.timesync)
     isinstance(self.nss, nss.nss)
     isinstance(self.group, group.group)
     isinstance(self.isvas, isvas.isvas)
     isinstance(self.list, list.list)
     isinstance(self.auth, auth.auth)
     isinstance(self.cache, cache.cache)
     isinstance(self.configure, configure.configure)
     isinstance(self.configureVas, configureVas.configureVas)
     isinstance(self.schema, schema.schema)
     isinstance(self.merge, merge.merge)
     isinstance(self.unmerge, unmerge.unmerge)
     isinstance(self.user, User.user)
     isinstance(self.ktutil, ktutil.ktutil)
     isinstance(self.load, load.load)
     #isinstance(self.vasUtilities, vasUtilities.vasUtilities)
     isinstance(self.unconfigure, unconfigure.unconfigure)
     isinstance(self.nssdiag, nssdiag)
コード例 #47
0
ファイル: analex.py プロジェクト: abougouffa/mishkal
    def __init__(self, allowTagGuessing=True, allowDisambiguation=True):
        """
		Create Analex instance.
		"""

        self.nounstemmer = stem_noun.nounStemmer()
        # to stem nouns
        self.verbstemmer = stem_verb.verbStemmer()
        # to stem verbs
        self.unknownstemmer = stem_unknown.unknownStemmer()
        # to stem unknown
        self.stopwordsstemmer = stem_stopwords.stopWordStemmer()
        # to stem stopwords

        self.allowTagGuessing = allowTagGuessing  # allow gueesing tags by naftawayh before analyis
        # if taggin is disabled, the disambiguation is also disabled
        self.allowDisambiguation = allowDisambiguation and allowTagGuessing  # allow disambiguation before analyis
        # enable the last mark (Harakat Al-I3rab)
        self.allowSyntaxLastMark = True
        if self.allowTagGuessing:
            self.tagger = naftawayh.wordtag.WordTagger()
        if self.allowDisambiguation:
            self.disambiguator = disambig.disambiguator()
        self.debug = False
        # to allow to print internal data
        self.limit = 10000
        # limit words in the text
        self.wordcounter = 0
        # the words contain arabic letters and harakat.
        # the unicode considers arabic harakats as marks not letters,
        # then we add harakat to the regluar expression to tokenize
        marks = u"".join(
            araby.TASHKEEL
        )  # contains [FATHA,DAMMA,KASRA,SUKUN,DAMMATAN,KASRATAN,FATHATAN,SHADDA])
        # used to tokenize arabic text
        self.token_pat = re.compile(u"([\w%s]+)" % marks, re.UNICODE)
        #used to split text into clauses
        self.Clause_pattern = re.compile(
            u"([\w%s\s]+)" % (u"".join(araby.TASHKEEL), ), re.UNICODE)

        # allow partial vocalization support,
        #~The text is analyzed as partial or fully vocalized.
        self.partial_vocalization_support = True

        #word frequency dictionary
        self.wordfreq = wordfreqdictionaryclass.wordfreqDictionary(
            'wordfreq', wordfreqdictionaryclass.wordfreq_DICTIONARY_INDEX)

        # added to avoid duplicated search in the word frequency database
        # used as cache to reduce database access
        #added as a global variable to avoid duplucated search in mutliple call of analex
        # cache used to avoid duplicata
        self.allowCacheUse = True
        if self.allowCacheUse:
            self.cache = cache.cache()
コード例 #48
0
ファイル: assembly.py プロジェクト: jamespthomas/ybd
def assemble(target):
    '''Assemble dependencies and contents recursively until target exists.'''

    if cache.get_cache(target):
        return cache.cache_key(target)

    defs = Definitions()
    this = defs.get(target)

    if this.get('arch') and this['arch'] != app.settings['arch']:
        app.log(target, 'Skipping assembly for', this['arch'])
        return None

    with app.timer(this, 'Starting assembly'):
        sandbox.setup(this)
        for it in this.get('systems', []):
            system = defs.get(it)
            assemble(system)
            for subsystem in this.get('subsystems', []):
                assemble(subsystem)

        dependencies = this.get('build-depends', [])
        random.shuffle(dependencies)
        for it in dependencies:
            dependency = defs.get(it)
            assemble(dependency)
            sandbox.install(this, dependency)

        contents = this.get('contents', [])
        random.shuffle(contents)
        for it in contents:
            component = defs.get(it)
            if component.get('build-mode') != 'bootstrap':
                assemble(component)
                sandbox.install(this, component)

        build(this)
        do_manifest(this)
        cache.cache(this, full_root=this.get('kind', None) == "system")
        sandbox.remove(this)

    return cache.cache_key(this)
コード例 #49
0
ファイル: analex.py プロジェクト: ATouhou/mishkal
    def __init__(self, allowTagGuessing=True, allowDisambiguation=True):
        """
		Create Analex instance.
		"""

        self.nounstemmer = stem_noun.nounStemmer()
        # to stem nouns
        self.verbstemmer = stem_verb.verbStemmer()
        # to stem verbs
        self.unknownstemmer = stem_unknown.unknownStemmer()
        # to stem unknown
        self.stopwordsstemmer = stem_stopwords.stopWordStemmer()
        # to stem stopwords

        self.allowTagGuessing = allowTagGuessing  # allow gueesing tags by naftawayh before analyis
        # if taggin is disabled, the disambiguation is also disabled
        self.allowDisambiguation = allowDisambiguation and allowTagGuessing  # allow disambiguation before analyis
        # enable the last mark (Harakat Al-I3rab)
        self.allowSyntaxLastMark = True
        if self.allowTagGuessing:
            self.tagger = naftawayh.wordtag.WordTagger()
        if self.allowDisambiguation:
            self.disambiguator = disambig.disambiguator()
        self.debug = False
        # to allow to print internal data
        self.limit = 10000
        # limit words in the text
        self.wordcounter = 0
        # the words contain arabic letters and harakat.
        # the unicode considers arabic harakats as marks not letters,
        # then we add harakat to the regluar expression to tokenize
        marks = u"".join(araby.TASHKEEL)  # contains [FATHA,DAMMA,KASRA,SUKUN,DAMMATAN,KASRATAN,FATHATAN,SHADDA])
        # used to tokenize arabic text
        self.token_pat = re.compile(u"([\w%s]+)" % marks, re.UNICODE)
        # used to split text into clauses
        self.Clause_pattern = re.compile(u"([\w%s\s]+)" % (u"".join(araby.TASHKEEL),), re.UNICODE)

        # allow partial vocalization support,
        # ~The text is analyzed as partial or fully vocalized.
        self.partial_vocalization_support = True

        # word frequency dictionary
        self.wordfreq = wordfreqdictionaryclass.wordfreqDictionary(
            "wordfreq", wordfreqdictionaryclass.wordfreq_DICTIONARY_INDEX
        )

        # added to avoid duplicated search in the word frequency database
        # used as cache to reduce database access
        # added as a global variable to avoid duplucated search in mutliple call of analex
        # cache used to avoid duplicata
        self.allowCacheUse = True
        if self.allowCacheUse:
            self.cache = cache.cache()
コード例 #50
0
ファイル: trakt.py プロジェクト: abisiaux/scrapyard
def movie(trakt_slug, people_needed=False):
    cache_key                    = 'movie-{0}'.format(trakt_slug)
    cache_init_func              = functools.partial(__movie, trakt_slug=trakt_slug)
    cache_init_exception_handler = network.http_get_init_exception_handler
    cache_init_failure_handler   = network.http_get_init_failure_handler
    cache_update_func            = functools.partial(__movie, trakt_slug=trakt_slug, timeout=network.TIMEOUT_CONNECT)
    cache_data_expiration        = cache.WEEK

    result = cache.cache(cache_key, cache_init_func, cache_init_exception_handler, cache_init_failure_handler, cache_update_func, cache_data_expiration)

    if people_needed:
        cache_key                    = 'movie-{0}-people'.format(trakt_slug)
        cache_init_func              = functools.partial(__movie_people, trakt_slug=trakt_slug)
        cache_init_exception_handler = network.http_get_init_exception_handler
        cache_init_failure_handler   = network.http_get_init_failure_handler
        cache_update_func            = functools.partial(__movie_people, trakt_slug=trakt_slug, timeout=network.TIMEOUT_CONNECT)
        cache_data_expiration        = cache.WEEK

        result['people'] = cache.cache(cache_key, cache_init_func, cache_init_exception_handler, cache_init_failure_handler, cache_update_func, cache_data_expiration)

    return result
コード例 #51
0
 def __init__(self):
     print('This simulator simulates an L1 cache with a writeback policy. There are 2 modes, 0 and 1, for reading from a file and for receiving inputs from the command line.')
     self.mode = int(input('Enter simulator mode (0 for reading a file and 1 for reading from the command line):'))
     association = int(input('Enter the associativity for the cache to be built:'))
     num_sets = int(input('Enter the number of sets for the cache to be built:'))
     line_size = int(input('Enter the line size for the cache to be built:'))
     self.cache_sim = c.cache(num_sets, association, line_size)
     
     if(self.mode):
         print('Entering command line mode . . .')
     else:
         print('Entering read file mode . . .')
コード例 #52
0
ファイル: trakt.py プロジェクト: abisiaux/scrapyard
def show(trakt_slug, seasons_needed=False):
    cache_key                    = 'show-{0}'.format(trakt_slug)
    cache_init_func              = functools.partial(__show, trakt_slug=trakt_slug)
    cache_init_exception_handler = network.http_get_init_exception_handler
    cache_init_failure_handler   = network.http_get_init_failure_handler
    cache_update_func            = functools.partial(__show, trakt_slug=trakt_slug, timeout=network.TIMEOUT_CONNECT)
    cache_data_expiration        = cache.DAY

    result = cache.cache(cache_key, cache_init_func, cache_init_exception_handler, cache_init_failure_handler, cache_update_func, cache_data_expiration)

    if seasons_needed:
        cache_key                    = 'show-{0}-seasons'.format(trakt_slug)
        cache_init_func              = functools.partial(__show_seasons, trakt_slug=trakt_slug, show_info=result)
        cache_init_exception_handler = network.http_get_init_exception_handler
        cache_init_failure_handler   = network.http_get_init_failure_handler
        cache_update_func            = functools.partial(__show_seasons, trakt_slug=trakt_slug, show_info=result, timeout=network.TIMEOUT_CONNECT)
        cache_data_expiration        = cache.DAY

        result['seasons'] = cache.cache(cache_key, cache_init_func, cache_init_exception_handler, cache_init_failure_handler, cache_update_func, cache_data_expiration)

    return result
コード例 #53
0
ファイル: vgg16.py プロジェクト: JadeBlue96/Machine-Learning
def transfer_values_cache(cache_path, model, images=None, image_paths=None):

    # Helper-function for processing the images if the cache-file does not exist.
    def fn():
        return process_images(fn=model.transfer_values,
                              images=images,
                              image_paths=image_paths)

    # Read the transfer-values from a cache-file, or calculate them if the file does not exist.
    transfer_values = cache(cache_path=cache_path, fn=fn)

    return transfer_values
コード例 #54
0
def tastekid_lookup(title, check_cache=True, load_rec_content=False, use_key=True):
    if check_cache and cache.in_cache(title):
        ret = cache.retrieve_valid(title)
        tkterms = config.tk_terms.keys()
        ret = {k:v for k,v in ret.items() if k in tkterms}
        if ((not load_rec_content) or ('suggestions' in ret)) and ret.keys() == tkterms:
            return ret
    tkjson = tastekid_json(title, use_key)
    if 'Error' in tkjson:
        print 'Yarr there be an error fetching tk; trying sneakily'
        try:
            tkjson = tastekid_json(title, False)['Similar']
        except:
            print 'Yarr there be another bloody error; grabbing everything fromcache'
            ret = cache.retrieve_cached(title) #ignoring cache limits
            if ret is not None and 'info' in ret:
                ret['info']['suggestions'] = ret['suggestions'] if 'suggestions' in ret else None
                ret = ret['info']
                print 'check ret:', ret
                for k in ret.keys():
                    if k not in config.tk_terms.keys():
                        del ret[k]
                for k in config.tk_terms.keys():
                    if k not in ret:
                        ret[k] = None
                return ret
            return None
    else:
        tkjson = tkjson['Similar']
    item = extract_terms(tkjson['Info'][0], config.tk_terms) #actual movie
    suggestions = [extract_terms(tk, config.tk_terms) for tk in tkjson['Results']]
    rec_titles = [r['title'] for r in suggestions]
    tk = {'title':item['title'], 'suggestions':rec_titles}
    cache.upsert_properties(item)
    cache.cache(tk)
    for r in suggestions:
        cache.upsert_properties(r)
    for k in config.tk_terms:
        tk[k] = item[k]
    return tk
コード例 #55
0
def load_cache():
    u'''
    '''
    print ("lobbist")
    cache_lobbist = cache.cache("fec_lobbist", lobbyist)
    print ("lobbist bundle")
    cache_lobbist_bundle = cache.cache("fec_lobbist_bundle", lobbyist_bundle)
    print ("expenses")
    cache_expenses = cache.cache("fec_expenses", expenses)

    print ("candidate_disbursement")
    #    cache_can_disburse = cache.cache(
    #        "fec_all_candidate_disbursement",
    #        all_candidate_disbursement)

    print ("Committee Summary")
    cache_committee_summary = cache.cache("CommitteeSummary", committee_summary)

    print ("CampaignAndCommitteeSummary")
    cache_camp_comm_sum = cache.cache(
        "CampaignAndCommitteeSummary",
        campaign_and_committee_summary)

    print ("CandidateSummary")
    cache_candidate_summary = cache.cache("CandidateSummary", candidate_summary)
コード例 #56
0
ファイル: trakt.py プロジェクト: abisiaux/scrapyard
def shows_trending(page):
    if page < 1 or page > 10:
        raise exceptions.HTTPError(404)

    cache_key                    = 'shows-trending-{0}'.format(page)
    cache_init_func              = functools.partial(__shows_list_page, page='/shows/trending', page_index=page)
    cache_init_exception_handler = network.http_get_init_exception_handler
    cache_init_failure_handler   = network.http_get_init_failure_handler
    cache_update_func            = functools.partial(__shows_list_page, page='/shows/trending', page_index=page, timeout=network.TIMEOUT_CONNECT)
    cache_data_expiration        = cache.HOUR
    cache_expiration             = None

    return cache.cache(cache_key, cache_init_func, cache_init_exception_handler, cache_init_failure_handler, cache_update_func, cache_data_expiration, cache_expiration)
コード例 #57
0
ファイル: trakt.py プロジェクト: abisiaux/scrapyard
def movies_popular(page):
    if page < 1 or page > 10:
        raise exceptions.HTTPError(404)

    cache_key                    = 'movies-popular-{0}'.format(page)
    cache_init_func              = functools.partial(__movies_list_page, page='/movies/popular', page_index=page)
    cache_init_exception_handler = network.http_get_init_exception_handler
    cache_init_failure_handler   = network.http_get_init_failure_handler
    cache_update_func            = functools.partial(__movies_list_page, page='/movies/popular', page_index=page, timeout=network.TIMEOUT_CONNECT)
    cache_data_expiration        = cache.DAY
    cache_expiration             = None

    return cache.cache(cache_key, cache_init_func, cache_init_exception_handler, cache_init_failure_handler, cache_update_func, cache_data_expiration, cache_expiration)
コード例 #58
0
ファイル: Spider.py プロジェクト: sandin/tvspider
 def findNewEd2k(self, url):
     '''
     find and cache all ed2k links on a page, but only return new links
     '''
     links = self.findEd2k(url)
     print 'found %i ed2k links' % len(links)
     self.ed2k.extend(links)
     cache_id = hash(url)
     if cache.has_cache(cache_id):
         cacheList = cache.load(cache_id)
         if cacheList == self.ed2k:
             print 'nothing change. ' + url
         else: 
             print 'you has new links ' + url
             newLinks = zip(*self.ed2k)[0]
             oldLinks = zip(*cacheList)[0]
             diff = list(set(newLinks).difference( set(oldLinks) )) # lists difference
             for link in diff:
                 print link
                 pyperclip.copy(link) # TODO
     else:
         print 'just cache the links ' + url
     cache.cache(self.ed2k, cache_id)
コード例 #59
0
def shows_search(query):
    cache_key = 'shows-search-{0}'.format(query)
    cache_init_func = functools.partial(__shows_search, query=query)
    cache_init_exception_handler = network.http_get_init_exception_handler
    cache_init_failure_handler = network.http_get_init_failure_handler
    cache_update_func = functools.partial(__shows_search,
                                          query=query,
                                          timeout=network.TIMEOUT_CONNECT)
    cache_data_expiration = cache.HOUR

    return cache.cache(cache_key, cache_init_func,
                       cache_init_exception_handler,
                       cache_init_failure_handler, cache_update_func,
                       cache_data_expiration)