def fixed_hps_from_str(string): if len(string) > 0: tmp = string[1:-1] hps = [convert(x, "float") for x in tmp] return tuple(hps) else: return ()
from learning_to_learn.useful_functions import remove_empty_strings_from_list, convert conf_file = sys.argv[1] save_path = os.path.join(conf_file.split('.')[0], '%s') abspath = os.path.abspath(__file__) dname = os.path.dirname(abspath) os.chdir(dname) with open(conf_file, 'r') as f: lines = remove_empty_strings_from_list(f.read().split('\n')) opt = lines[0] num_runs = int(lines[1]) hps = dict() for line in lines[2:]: spl = line.split() hps[spl[0]] = float(convert(spl[1], 'float')) env = Environment(Mlp, CifarBatchGenerator) train_add_feed = [{'placeholder': 'dropout', 'value': .9}] if 'momentum' in hps: train_add_feed.append({ 'placeholder': 'momentum', 'value': hps['momentum'] }) valid_add_feed = [{'placeholder': 'dropout', 'value': 1.}] add_metrics = ['bpc', 'perplexity', 'accuracy'] VALID_SIZE = 1000 BATCH_SIZE = 32
def convert_sel_line_name(x): if is_float(x): return convert(x) if x == 'None': return None return x
os.chdir(dname) with open(conf_file, 'r') as f: lines = f.read().split('\n') model = lines[0] steps = int(lines[1]) base = lines[2] if base == 'None': base = None else: base = float(base) names = lines[3].split() types = lines[4].split() optimizer_varying = dict() for name, type_, line in zip(names, types, lines[5:]): optimizer_varying[name] = [convert(v, type_) for v in line.split()] dataset_path = os.path.join(*(['..'] * ROOT_HEIGHT + ['datasets', 'text8.txt'])) with open(dataset_path, 'r') as f: text = f.read() train_text = text vocabulary = create_vocabulary(text) vocabulary_size = len(vocabulary) print(vocabulary_size) env = Environment( pupil_class=Lstm, meta_optimizer_class=ResNet4Lstm,
# pupil_names_by_ed, dataset_names_by_ed, labels_by_ed, hp_orders_by_ed, num_lines) for eval_dir, ed_lines, ed_fixed_hps, ed_regimes, ed_pupil_names, ed_dataset_names, ed_labels, hp_order, nlines in \ zip( eval_dirs, lines_by_ed, fixed_hp_by_ed, regimes_by_ed, pupil_names_by_ed, dataset_names_by_ed, labels_by_ed, hp_orders_by_ed, num_lines ): [ed_lines, ed_fixed_hps, ed_regimes, ed_pupil_names, ed_dataset_names, ed_labels] = split_strings_by_char( [ed_lines, ed_fixed_hps, ed_regimes, ed_pupil_names, ed_dataset_names, ed_labels], ',' ) [ed_regimes, ed_pupil_names, ed_dataset_names, ed_fixed_hps] = broadcast_many_lists( [ed_regimes, ed_pupil_names, ed_dataset_names, ed_fixed_hps], nlines, ) ed_lines = [convert(x, 'float') for x in ed_lines] ed_fixed_hps = [fixed_hps_from_str(x) for x in ed_fixed_hps] hp_plot_order = hp_order.split(',') changing_hps.append(hp_plot_order[-1]) line_retr_inf = list() if args.model == 'pupil': for line_hp, fixed_hps, dataset_name, label in zip( ed_lines, ed_fixed_hps, ed_dataset_names, ed_labels ): line_retr_inf.append( dict( line_hp=line_hp, fixed_hps=fixed_hps, dataset_name=dataset_name, label=label,