Пример #1
0
 def completedefault(self, text, line, start, end):
     if re.match(r'([a-z-]+ +){2,}', line):
         return []
     lists = set(utils.read_log().keys()).union(
         set(utils.read_cache(complete=True)))
     match = filter(lambda t: t.startswith(text), lists)
     return utils.completion_list(match)
Пример #2
0
 def complete_url(self, text, line, *ignored):
     lists = set(utils.read_log().keys()).union(
         set(utils.read_cache(complete=True)))
     urls = map(
         lambda name: commands.anime_source_module.get_episode_url(
             name, ''), lists)
     match = filter(lambda t: t.startswith(text), urls)
     return utils.completion_list(match)
Пример #3
0
def read_args(args, episodes=True, verbose=True):
    if len(args) == 0:
        name = utils.read_cache()
    elif len(args) == 1 and args[0].isnumeric():
        name = utils.read_cache(int(args[0]))
        if verbose:
            outputs.prompt_val("Name", name)
    elif "/" in args[0]:
        name = args[0].strip("/").split("/")[-1]
    else:
        name = anime_source_module.process_anime_name(args[0].strip('"'))
        if not anime_source_module.verify_anime_exists(name):
            outputs.prompt_val("Anime with the name doesn't exist", args[0],
                               "error")
            raise SystemExit

    if not name:
        outputs.error_info("Numbers choice invalid, or invalid context.")
        raise SystemExit

    if len(args) <= 1:
        if episodes:
            if verbose:
                outputs.warning_info("Episodes range not given defaulting to all")
            available_rng = anime_source_module.get_episodes_range(
                anime_source_module.get_anime_url(name))
            if verbose:
                outputs.prompt_val("Available episodes", available_rng)
            eps = utils.extract_range(available_rng)
        else:
            eps = None
    elif len(args) == 2:
        eps = utils.extract_range(args[1])
    else:
        outputs.error_info("Too many arguments.\n")
        outputs.normal_info(__doc__)
        raise SystemExit
    return name, eps
Пример #4
0
	def GET(self):
		params = web.input(reflash='true')
		res = {'actual_on': None, 'tests': []}
		
		if params.reflash == 'true':
			results = fit.check_results(team=params.team)
			utils.write_cache(results)
			res['tests'] = results
		elif params.reflash == 'false' and utils.has_cache():
			res['tests'] = json.loads(utils.read_cache())

		actual_on = utils.cache_time()
		res['actual_on'] = actual_on
		return json.dumps(res)
Пример #5
0
    def GET(self):
        params = web.input(reflash='true')
        res = {'actual_on': None, 'tests': []}

        if params.reflash == 'true':
            results = fit.check_results(team=params.team)
            utils.write_cache(results)
            res['tests'] = results
        elif params.reflash == 'false' and utils.has_cache():
            res['tests'] = json.loads(utils.read_cache())

        actual_on = utils.cache_time()
        res['actual_on'] = actual_on
        return json.dumps(res)
Пример #6
0
def read_args(args, episodes=True, verbose=True):
    if len(args) == 0:
        name = utils.read_cache()
    elif args[0].isnumeric():
        name = utils.read_cache(int(args[0]))
        if verbose:
            outputs.prompt_val('Name', name)
    elif '/' in args[0]:
        name = args[0].strip('/').split('/')[-1]
    else:
        name = gogoanime.process_anime_name(args[0])
        if not gogoanime.verify_anime_exists(name):
            outputs.prompt_val(f'Anime with the name doesn\'t exist', args[0],
                               'error')
            raise SystemExit

    if not name:
        outputs.error_info('Numbers choice invalid, or invalid context.')
        raise SystemExit

    if not episodes:
        return name
    if len(args) <= 1:
        if verbose:
            outputs.warning_info('Episodes range not given defaulting to all')
        available_rng = gogoanime.get_episodes_range(
            gogoanime.get_anime_url(name))
        if verbose:
            outputs.prompt_val('Available episodes', available_rng)
        episodes = utils.extract_range(available_rng)
    elif len(args) == 2:
        episodes = utils.extract_range(args[1])
    else:
        outputs.error_info('Too many arguments.\n')
        outputs.normal_info(__doc__)
        raise SystemExit
    return name, episodes
Пример #7
0
def verify_anime_exists(anime_name, verbose=False):
    if utils.read_log(anime_name) is not None:
        if verbose:
            outputs.normal_info(anime_name, 'LOG', reverse=True)
        return True
    elif anime_name in utils.read_cache(complete=True):
        if verbose:
            outputs.normal_info(anime_name, 'CACHE', reverse=True)
        return True
    elif utils.get_soup(get_anime_url(anime_name)) is not None:
        if verbose:
            outputs.normal_info(anime_name, 'SITE', reverse=True)
        return True
    else:
        return False
Пример #8
0
path_model = 'MODEL'
if os.path.isdir(path_result) is False:
    os.mkdir(path_result)
if os.path.isdir(path_model) is False:
    os.mkdir(path_model)
if CACHEDATA and os.path.isdir(path_cache) is False:
    os.mkdir(path_cache)

# load data
print("loading data...")
fname = os.path.join(path_cache, 'BikeNYC_C{}_P{}_T{}.h5'.format(
    len_closeness, len_period, len_trend))
if os.path.exists(fname) and CACHEDATA:
    X_train_all, Y_train_all, X_train, Y_train, \
    X_val, Y_val, X_test, Y_test, mmn, external_dim, \
    timestamp_train_all, timestamp_train, timestamp_val, timestamp_test = read_cache(
        fname, 'preprocessing_nyc.pkl')
    print("load %s successfully" % fname)
else:
    if (model_name == 'model3'):
        load_data = BikeNYC3d.load_data
    else:
        load_data = BikeNYC.load_data
    X_train_all, Y_train_all, X_train, Y_train, \
    X_val, Y_val, X_test, Y_test, mmn, external_dim, \
    timestamp_train_all, timestamp_train, timestamp_val, timestamp_test = load_data(
        T=T, nb_flow=nb_flow, len_closeness=len_closeness, len_period=len_period, len_trend=len_trend, len_test=len_test,
        len_val=len_val, preprocess_name='preprocessing_nyc.pkl', meta_data=True, datapath=DATAPATH)
    if CACHEDATA:
        cache(fname, X_train_all, Y_train_all, X_train, Y_train, X_val, Y_val, X_test, Y_test,
              external_dim, timestamp_train_all, timestamp_train, timestamp_val, timestamp_test)
Пример #9
0
path_cache = os.path.join(DATAPATH, 'CACHE', cache_folder)  # cache path
if CACHEDATA and os.path.isdir(path_cache) is False:
    os.mkdir(path_cache)
if os.path.isdir('results_roma_bergamo') is False:
    os.mkdir('results_roma_bergamo')

# load data
print("loading data...")
preprocess_name = 'preprocess_rome_2.pkl'
fname = os.path.join(
    path_cache, 'Rome_C{}_P{}_T{}_2.h5'.format(len_closeness, len_period,
                                               len_trend))
if os.path.exists(fname) and CACHEDATA:
    X_train_all, Y_train_all, X_train, Y_train, \
    X_val, Y_val, X_test, Y_test, mmn, external_dim, \
    timestamp_train_all, timestamp_train, timestamp_val, timestamp_test = read_cache(
        fname, preprocess_name)
    print("load %s successfully" % fname)
else:
    X_train_all, Y_train_all, X_train, Y_train, \
    X_val, Y_val, X_test, Y_test, mmn, external_dim, \
    timestamp_train_all, timestamp_train, timestamp_val, timestamp_test = load_data(
        T=T, nb_flow=nb_flow, len_closeness=len_closeness, len_period=len_period, len_trend=len_trend, len_test=len_test,
        len_val=len_val, preprocess_name=preprocess_name, meta_data=True, datapath=DATAPATH)
    if CACHEDATA:
        cache(fname, X_train_all, Y_train_all, X_train, Y_train, X_val, Y_val,
              X_test, Y_test, external_dim, timestamp_train_all,
              timestamp_train, timestamp_val, timestamp_test)

print(external_dim)
print("\n days (test): ", [v[:8] for v in timestamp_test[0::T]])
def taxibj_evaluation():
    # parameters
    DATAPATH = '../data'
    T = 48  # number of time intervals in one day
    CACHEDATA = True  # cache data or NOT

    len_closeness = 4  # length of closeness dependent sequence
    len_period = 0  # length of peroid dependent sequence
    len_trend = 0  # length of trend dependent sequence

    nb_flow = 2  # there are two types of flows: new-flow and end-flow
    days_test = 4 * 7  # 4 weeks
    len_test = T * days_test
    len_val = 2 * len_test

    map_height, map_width = 32, 32  # grid size

    cache_folder = 'Autoencoder/model3'
    path_cache = os.path.join(DATAPATH, 'CACHE', cache_folder)  # cache path
    if CACHEDATA and os.path.isdir(path_cache) is False:
        os.mkdir(path_cache)

    # load data
    print("loading data...")
    fname = os.path.join(
        path_cache,
        'TaxiBJ_withMeteo_C{}_P{}_T{}.h5'.format(len_closeness, len_period,
                                                 len_trend))
    if os.path.exists(fname) and CACHEDATA:
        X_train_all, Y_train_all, X_train, Y_train, \
        X_val, Y_val, X_test, Y_test, mmn, external_dim, \
        timestamp_train_all, timestamp_train, timestamp_val, timestamp_test = read_cache(
            fname, 'preprocessing_bj.pkl')
        print("load %s successfully" % fname)
    else:
        X_train_all, Y_train_all, X_train, Y_train, \
        X_val, Y_val, X_test, Y_test, mmn, external_dim, \
        timestamp_train_all, timestamp_train, timestamp_val, timestamp_test = TaxiBJ3d.load_data(
            T=T, nb_flow=nb_flow, len_closeness=len_closeness, len_period=len_period, len_trend=len_trend, len_test=len_test,
            len_val=len_val, preprocess_name='preprocessing_bj.pkl', meta_data=True, meteorol_data=True, holiday_data=True, datapath=DATAPATH)
        if CACHEDATA:
            cache(fname, X_train_all, Y_train_all, X_train, Y_train, X_val,
                  Y_val, X_test, Y_test, external_dim, timestamp_train_all,
                  timestamp_train, timestamp_val, timestamp_test)

    print(external_dim)
    print("\n days (test): ", [v[:8] for v in timestamp_test[0::T]])

    # build model
    model = build_model(len_closeness,
                        len_period,
                        len_trend,
                        nb_flow,
                        map_height,
                        map_width,
                        external_dim=external_dim,
                        encoder_blocks=3,
                        filters=[64, 64, 64, 64, 16],
                        kernel_size=3,
                        num_res=2)

    model_fname = 'model3resunit_doppia_attention.TaxiBJ9.c4.p0.t0.encoderblocks_3.kernel_size_3.lr_0.0007.batchsize_16.best.h5'
    model.load_weights(os.path.join('../best_models', 'model3', model_fname))

    # evaluate and save results
    dict_multi_score = multi_step_2D(model,
                                     X_test,
                                     Y_test,
                                     mmn,
                                     len_closeness,
                                     step=5)

    for i in range(len(dict_multi_score)):
        csv_name = os.path.join('results', f'taxibj_step{i+1}.csv')
        save_to_csv(dict_multi_score[i], csv_name)
def bikenyc_evaluation():
    DATAPATH = '../data'
    T = 24  # number of time intervals in one day
    CACHEDATA = True  # cache data or NOT

    len_closeness = 4  # length of closeness dependent sequence
    len_period = 0  # length of peroid dependent sequence
    len_trend = 0  # length of trend dependent sequence

    nb_flow = 2
    days_test = 10
    len_test = T * days_test
    len_val = 2 * len_test

    map_height, map_width = 16, 8

    cache_folder = 'Autoencoder/model3'
    path_cache = os.path.join(DATAPATH, 'CACHE', cache_folder)
    if CACHEDATA and os.path.isdir(path_cache) is False:
        os.mkdir(path_cache)

    # load data
    print("loading data...")
    fname = os.path.join(
        path_cache, 'BikeNYC_C{}_P{}_T{}.h5'.format(len_closeness, len_period,
                                                    len_trend))
    if os.path.exists(fname) and CACHEDATA:
        X_train_all, Y_train_all, X_train, Y_train, \
        X_val, Y_val, X_test, Y_test, mmn, external_dim, \
        timestamp_train_all, timestamp_train, timestamp_val, timestamp_test = read_cache(
            fname, 'preprocessing_bikenyc.pkl')
        print("load %s successfully" % fname)
    else:
        X_train_all, Y_train_all, X_train, Y_train, \
        X_val, Y_val, X_test, Y_test, mmn, external_dim, \
        timestamp_train_all, timestamp_train, timestamp_val, timestamp_test = BikeNYC3d.load_data(
            T=T, nb_flow=nb_flow, len_closeness=len_closeness, len_period=len_period, len_trend=len_trend,
            len_test=len_test,
            len_val=len_val, preprocess_name='preprocessing_bikenyc.pkl', meta_data=True, datapath=DATAPATH)
        if CACHEDATA:
            cache(fname, X_train_all, Y_train_all, X_train, Y_train, X_val,
                  Y_val, X_test, Y_test, external_dim, timestamp_train_all,
                  timestamp_train, timestamp_val, timestamp_test)

    # build model
    model = build_model(len_closeness,
                        len_period,
                        len_trend,
                        nb_flow,
                        map_height,
                        map_width,
                        external_dim=external_dim,
                        encoder_blocks=2,
                        filters=[64, 64, 64, 16],
                        kernel_size=3,
                        num_res=2)

    model_fname = 'model3resunit_doppia_attention.BikeNYC6.c4.p0.t0.encoderblocks_2.kernel_size_3.lr_0.0001.batchsize_16.best2.h5'
    model.load_weights(os.path.join('../best_models', 'model3', model_fname))

    # evaluate and save results
    dict_multi_score = multi_step_2D(model,
                                     X_test,
                                     Y_test,
                                     mmn,
                                     len_closeness,
                                     step=5)

    for i in range(len(dict_multi_score)):
        csv_name = os.path.join('results', f'bikenyc_step{i+1}.csv')
        save_to_csv(dict_multi_score[i], csv_name)