Exemplo n.º 1
0
    pass

from learning_to_learn.environment import Environment
from learning_to_learn.pupils.lstm_for_meta import Lstm, LstmFastBatchGenerator as BatchGenerator
from learning_to_learn.useful_functions import create_vocabulary, compose_hp_confs, get_num_exps_and_res_files, \
    get_optimizer_evaluation_results, get_best, print_hps, get_hp_names_from_conf_file

from learning_to_learn.optimizers.l2l import L2L
import os

parameter_set_file_name = sys.argv[1]


base = parameter_set_file_name.split('.')[0]
save_path = base + '/evaluation'
confs, _ = compose_hp_confs(parameter_set_file_name, save_path, chop_last_experiment=False)
confs.reverse()  # start with small configs
print("confs:", confs)

abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
dataset_path = os.path.join(*(['..']*ROOT_HEIGHT + ['datasets', 'text8.txt']))
with open(dataset_path, 'r') as f:
    text = f.read()

valid_size = 2000
test_size = 100000
test_text = text[:test_size]
valid_text = text[test_size:valid_size+test_size]
train_text = text[valid_size+test_size:]
Exemplo n.º 2
0
from learning_to_learn.pupils.lstm_for_meta import Lstm, LstmFastBatchGenerator as BatchGenerator
from learning_to_learn.useful_functions import create_vocabulary, compose_hp_confs, get_combs_and_num_exps

from learning_to_learn.optimizers.res_net_opt import ResNet4Lstm

import os

pretrain_step = sys.argv[1]
parameter_set_file_name = sys.argv[2]
# if len(sys.argv) > 3:
#     initial_experiment_counter_value = int(sys.argv[3])
# else:
#     initial_experiment_counter_value = 0
eval_dir = 'run1/evaluation'
confs, _ = compose_hp_confs(parameter_set_file_name,
                            eval_dir,
                            chop_last_experiment=True)
confs.reverse()  # start with small configs
print("(test2/run)confs:", confs)

save_path = os.path.join('.'.join(parameter_set_file_name.split('.')[:-1]),
                         'evaluation')

abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
with open('../../../datasets/text8.txt', 'r') as f:
    text = f.read()

valid_size = 500
valid_text = text[:valid_size]
Exemplo n.º 3
0
dataset_name = 'valid'

parameter_set_file_name = sys.argv[1]
if len(sys.argv) > 2:
    chop_last_experiment = bool(sys.argv[2])
else:
    chop_last_experiment = False
conf_name = os.path.join(*parameter_set_file_name.split('.')[:-1])
results_dir = helmo.util.path_help.move_path_postfix_within_repo(
    path_to_smth_in_separator=__file__)
results_dir = os.path.split(results_dir)[0]
save_path = os.path.join(results_dir, conf_name)
results_file_name = os.path.join(save_path, dataset_name + '.txt')
confs, _ = compose_hp_confs(parameter_set_file_name,
                            results_file_name,
                            chop_last_experiment=chop_last_experiment,
                            model='pupil')
confs.reverse()  # start with small configs
print("confs:", confs)

dataset_file_name = 'enwiki1G.txt'
text = helmo.util.dataset.get_text(dataset_file_name)

test_size, valid_size = int(6.4e6), int(6.4e5)
train_size = len(text) - test_size - valid_size
test_text, valid_text, train_text = helmo.util.dataset.split_text(
    text, test_size, valid_size, train_size)

voc_file_name = 'enwiki1G_voc.txt'
vocabulary, vocabulary_size = helmo.util.dataset.get_vocab(voc_file_name, text)
Exemplo n.º 4
0
from learning_to_learn.environment import Environment
from learning_to_learn.pupils.mlp_for_meta import MlpForMeta as Mlp
from learning_to_learn.image_batch_gens import CifarBatchGenerator
from learning_to_learn.useful_functions import compose_hp_confs

import os

parameter_set_file_name = sys.argv[1]
if len(sys.argv) > 2:
    chop_last_experiment = bool(sys.argv[2])
else:
    chop_last_experiment = False
save_path = os.path.join(parameter_set_file_name.split('.')[0], 'evaluation')
confs, _ = compose_hp_confs(
    parameter_set_file_name,
    os.path.join(save_path, 'valid.txt'),
    chop_last_experiment=chop_last_experiment,
    model='pupil'
)
confs.reverse()  # start with small configs
print("confs:", confs)

abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
data_dir = os.path.join(*(['..']*ROOT_HEIGHT + ['datasets', 'mnist']))

env = Environment(Mlp, CifarBatchGenerator)

add_metrics = ['bpc', 'perplexity', 'accuracy']
train_add_feed = [
    {'placeholder': 'dropout', 'value': .9}
Exemplo n.º 5
0
#     helmo.util.path_help.move_path_postfix_within_repo(
#         path_to_smth_in_separator=save_path_relative_to_expres,
#         separator="experiments",
#         new_prefix_within_repo="expres",
#     ),
#     os.path.split(save_path_relative_to_expres)[-1]
# )
# # print(results_dir)
# save_path = results_dir

dir_with_confs, results_directory_rel_to_repo_root = \
    ('tests', 'testres') if args.test else ('experiments', 'expres')
save_path = helmo.util.path_help.get_save_path_from_config_path(
    config_path, dir_with_confs, results_directory_rel_to_repo_root)
results_file_name = os.path.join(save_path, 'test.txt')
confs, _ = compose_hp_confs(
    config_path, results_file_name, chop_last_experiment=False, model='pupil')
confs.reverse()  # start with small configs
print("confs:", confs)

text = helmo.util.dataset.get_text(config['dataset']['path'])
test_size = int(config['dataset']['test_size'])
valid_size = int(config['dataset']['valid_size'])
train_size = len(text) - test_size - valid_size
test_text, valid_text, train_text = helmo.util.dataset.split_text(text, test_size, valid_size, train_size)

vocabulary, vocabulary_size = helmo.util.dataset.get_vocab(config['dataset']['vocab_path'], text)

env = Environment(Net, BatchGenerator, vocabulary=vocabulary)

cpiv = get_positions_in_vocabulary(vocabulary)