def train_all_mixrule_softplus(seed=0, root_dir='mixrule_softplus'): """Training of all tasks.""" model_dir = os.path.join(DATAPATH, root_dir, str(seed)) hp = { 'activation': 'softplus', 'w_rec_init': 'diag', 'use_separate_input': True, 'mix_rule': True } rule_prob_map = {'contextdm1': 5, 'contextdm2': 5} train.train(model_dir, hp=hp, ruleset='all', rule_prob_map=rule_prob_map, seed=seed) # Analyses variance.compute_variance(model_dir) log = tools.load_log(model_dir) analysis = clustering.Analysis(model_dir, 'rule') log['n_cluster'] = analysis.n_cluster tools.save_log(log) setups = [1, 2, 3] for setup in setups: taskset.compute_taskspace(model_dir, setup, restore=False, representation='rate') taskset.compute_replacerule_performance(model_dir, setup, False)
def train_all_tanhgru(seed=0, model_dir='tanhgru'): """Training of all tasks with Tanh GRUs.""" model_dir = os.path.join(DATAPATH, model_dir, str(seed)) hp = {'activation': 'tanh', 'rnn_type': 'LeakyGRU'} rule_prob_map = {'contextdm1': 5, 'contextdm2': 5} train.train(model_dir, hp=hp, ruleset='all', rule_prob_map=rule_prob_map, seed=seed) # Analyses variance.compute_variance(model_dir) log = tools.load_log(model_dir) analysis = clustering.Analysis(model_dir, 'rule') log['n_cluster'] = analysis.n_cluster tools.save_log(log) data_analysis.compute_var_all(model_dir) setups = [1, 2, 3] for setup in setups: taskset.compute_taskspace(model_dir, setup, restore=False, representation='rate') taskset.compute_replacerule_performance(model_dir, setup, False)
def mante_tanh(seed=0, model_dir='mante_tanh'): """Training of only the Mante task.""" hp = {'activation': 'tanh', 'target_perf': 0.9} model_dir = os.path.join(DATAPATH, model_dir, str(seed)) train.train(model_dir, hp=hp, ruleset='mante', seed=seed) # Analyses variance.compute_variance(model_dir) log = tools.load_log(model_dir) analysis = clustering.Analysis(model_dir, 'rule') log['n_cluster'] = analysis.n_cluster tools.save_log(log) data_analysis.compute_var_all(model_dir)
def train_vary_hp(i): """Vary the hyperparameters. This experiment loops over a set of hyperparameters. Args: i: int, the index of the hyperparameters list """ # Ranges of hyperparameters to loop over hp_ranges = OrderedDict() # hp_ranges['activation'] = ['softplus', 'relu', 'tanh', 'retanh'] # hp_ranges['rnn_type'] = ['LeakyRNN', 'LeakyGRU'] # hp_ranges['w_rec_init'] = ['diag', 'randortho'] hp_ranges['activation'] = ['softplus'] hp_ranges['rnn_type'] = ['LeakyRNN'] hp_ranges['w_rec_init'] = ['randortho'] hp_ranges['l1_h'] = [0, 1e-9, 1e-8, 1e-7, 1e-6] # TODO(gryang): Change this? hp_ranges['l2_h'] = [0] hp_ranges['l1_weight'] = [0, 1e-7, 1e-6, 1e-5] # TODO(gryang): add the level of overtraining # Unravel the input index keys = hp_ranges.keys() dims = [len(hp_ranges[k]) for k in keys] n_max = np.prod(dims) indices = np.unravel_index(i % n_max, dims=dims) # Set up new hyperparameter hp = dict() for key, index in zip(keys, indices): hp[key] = hp_ranges[key][index] model_dir = os.path.join(DATAPATH, 'varyhp_reg2', str(i)) rule_prob_map = {'contextdm1': 5, 'contextdm2': 5} train.train(model_dir, hp, ruleset='all', rule_prob_map=rule_prob_map, seed=i // n_max) # Analyses variance.compute_variance(model_dir) log = tools.load_log(model_dir) analysis = clustering.Analysis(model_dir, 'rule') log['n_cluster'] = analysis.n_cluster tools.save_log(log) data_analysis.compute_var_all(model_dir)
def train_all_analysis(seed=0, root_dir='train_all'): model_dir = os.path.join(DATAPATH, root_dir, str(seed)) # Analyses variance.compute_variance(model_dir) variance.compute_variance(model_dir, random_rotation=True) log = tools.load_log(model_dir) analysis = clustering.Analysis(model_dir, 'rule') log['n_cluster'] = analysis.n_cluster tools.save_log(log) data_analysis.compute_var_all(model_dir) for rule in ['dm1', 'contextdm1', 'multidm']: performance.compute_choicefamily_varytime(model_dir, rule) setups = [1, 2, 3] for setup in setups: taskset.compute_taskspace(model_dir, setup, restore=False, representation='rate') taskset.compute_replacerule_performance(model_dir, setup, False)
def _base_vary_hp_mante(i, hp_ranges, base_name): """Vary hyperparameters for mante tasks.""" # Unravel the input index keys = hp_ranges.keys() dims = [len(hp_ranges[k]) for k in keys] n_max = np.prod(dims) indices = np.unravel_index(i % n_max, dims=dims) # Set up new hyperparameter hp = dict() for key, index in zip(keys, indices): hp[key] = hp_ranges[key][index] model_dir = os.path.join(DATAPATH, base_name, str(i)) train.train(model_dir, hp, ruleset='mante', max_steps=1e7, seed=i // n_max) # Analyses variance.compute_variance(model_dir) log = tools.load_log(model_dir) analysis = clustering.Analysis(model_dir, 'rule') log['n_cluster'] = analysis.n_cluster tools.save_log(log) data_analysis.compute_var_all(model_dir)
mpl.rcParams.update({'font.size': 7}) FIGPATH = os.path.join(os.getcwd(), 'figure') HP_NAME = {'activation': 'Activation Fun.', 'rnn_type': 'Network type', 'w_rec_init': 'Initialization', 'l1_h': 'L1 rate', 'l1_weight': 'L1 weight', 'l2_weight_init': 'L2 weight anchor', 'target_perf': 'Target perf.'} #maddy added check tanh fig 4 #root_dir = './data/debug/8' #0, 33 './data/train_all' """ variance.compute_variance(root_dir) variance.plot_hist_varprop_selection(root_dir) variance.plot_hist_varprop_all(root_dir) analysis = clustering.Analysis(root_dir, 'rule') analysis.plot_variance() """ """ standard_analysis.easy_connectivity_plot(root_dir) rule = 'contextdm1' standard_analysis.easy_activity_plot(root_dir, rule) print "easy_connectivity_plot"+root_dir """ def compute_n_cluster(model_dirs): for model_dir in model_dirs: