def run_lowd(param_set, train_params, i0, multiprocess_lock=None):
    plots.USE_ERRORBARS = True
    # time.sleep(i0)
    tps = low_d_params.copy()
    # tps['table_path'] = 'output_lowd/output_table.csv'
    subdir_prefix = Path('Win_orth_lowd/')
    for i0, key in enumerate(keys_lowd):
        tps[key] = param_set[i0]
    tps_11 = tps.copy()
    tps_11['g_radius'] = 20
    tps_21 = tps_11.copy()
    tps_21['num_epochs'] = 0
    tps_12 = tps.copy()
    tps_12['g_radius'] = 250
    tps_22 = tps_12.copy()
    tps_22['num_epochs'] = 0

    figname = ''.join(key + '_' + str(val) + '_'
                      for key, val in zip(keys_abbrev_lowd, param_set))
    figname = figname[:-1]
    # print(figname)
    # figname = "lr_{}_clust_{}_layers_{}".format(lr, x_cluster, n_lag)
    subdir = subdir_prefix / 'dim_over_layers'
    plots.dim_over_layers([tps_11, tps_12], [tps_21, tps_22],
                          seeds=[0, 1, 2],
                          hue_key='g_radius',
                          style_key='num_epochs',
                          figname=figname + '_g_{}'.format(tps_11['g_radius']),
                          subdir=subdir,
                          multiprocess_lock=multiprocess_lock,
                          style_order=[80, 0],
                          palette=chaos_palette)
    plots.dim_over_layers([tps_11, tps_12], [tps_21, tps_22],
                          seeds=[0, 1, 2],
                          hue_key='g_radius',
                          style_key='num_epochs',
                          figname=figname + '_g_{}'.format(tps_11['g_radius']),
                          subdir=subdir,
                          multiprocess_lock=multiprocess_lock,
                          style_order=[80, 0],
                          palette=chaos_palette)
    subdir = subdir_prefix / 'clust_holdout_over_layers'
    plots.clust_holdout_over_layers([tps_11, tps_12], [tps_21, tps_22],
                                    seeds=[0, 1, 2],
                                    hue_key='g_radius',
                                    style_key='num_epochs',
                                    figname=figname +
                                    '_g_{}'.format(tps_11['g_radius']),
                                    subdir=subdir,
                                    multiprocess_lock=multiprocess_lock,
                                    style_order=[80, 0],
                                    palette=chaos_palette)
    # subdir = subdir_prefix/'ashok_compression_metric'
    # plots.ashok_compression_metric([tps_12], [tps_21, tps_22], seeds=[0, 1, 2],
    #                                style_key='num_epochs',
    #                                figname=figname + '_g_{}'.format(
    #                                    tps_12['g_radius']), subdir=subdir,
    #                                multiprocess_lock=multiprocess_lock,
    #                                style_order=[80, 0])
    plots.USE_ERRORBARS = False
def run_lowd_chaos(param_set, train_params, i0, multiprocess_lock=None):
    plots.USE_ERRORBARS = True
    # time.sleep(i0)
    tps = train_params.copy()
    subdir_prefix = Path('Win_orth_lowd_chaos/')
    for i0, key in enumerate(keys_lowd_chaos):
        tps[key] = param_set[i0]

    tp_list_0 = []
    tp_list_1 = []
    for g in range(20, 261, 40):
        tp = tps.copy()
        tp['g_radius'] = g
        tp_list_1.append(tp)
        tp = tp.copy()
        tp['num_epochs'] = 0
        tp_list_0.append(tp)

    figname = ''.join(key + '_' + str(val) + '_'
                      for key, val in zip(keys_abbrev_lowd_chaos, param_set))
    figname = figname[:-1]
    seeds = list(range(5))
    subdir = subdir_prefix / 'dim_over_layers'
    plots.dim_over_layers(tp_list_0,
                          None,
                          seeds=seeds,
                          hue_key='g_radius',
                          style_key='num_epochs',
                          figname=figname + '_before',
                          subdir=subdir,
                          use_error_bars=True,
                          multiprocess_lock=multiprocess_lock,
                          palette='viridis')
    plots.dim_over_layers(tp_list_1,
                          None,
                          seeds=seeds,
                          hue_key='g_radius',
                          style_key='num_epochs',
                          figname=figname,
                          use_error_bars=True,
                          subdir=subdir,
                          multiprocess_lock=multiprocess_lock,
                          palette='viridis')
    plots.USE_ERRORBARS = False
def run_rnn_noisy_units(param_set, multiprocess_lock=None):
    subdir_prefix2 = Path('vanilla_rnn')
    tps_high_d = high_d_input_edge_of_chaos_params.copy()
    for i0, key in enumerate(params_rnn_noisy_units_keys):
        tps_high_d[key] = param_set[i0]
    seeds = list(range(5))
    tps_11_high_d = tps_high_d.copy()
    tps_11_high_d['g_radius'] = 20
    tps_12_high_d = tps_high_d.copy()
    tps_12_high_d['g_radius'] = 250
    figname = ''.join(
        key + '_' + str(val) + '_'
        for key, val in zip(params_rnn_noisy_units_keys_abbrev, param_set))
    figname = figname[:-1]
    subdir = subdir_prefix2 / 'dim_noisy_units'
    plots.dim_over_layers([tps_11_high_d, tps_12_high_d],
                          None,
                          seeds,
                          'g_radius',
                          None,
                          figname + '_X_dim_200',
                          subdir=subdir_prefix / subdir,
                          multiprocess_lock=multiprocess_lock,
                          palette=chaos_palette)

    tps_low_d = low_d_params.copy()
    for i0, key in enumerate(params_rnn_noisy_units_keys):
        tps_low_d[key] = param_set[i0]
    tps_11_low_d = tps_low_d.copy()
    tps_11_low_d['g_radius'] = 20
    tps_12_low_d = tps_low_d.copy()
    tps_12_low_d['g_radius'] = 250
    plots.dim_over_layers([tps_11_low_d, tps_12_low_d],
                          None,
                          seeds,
                          'g_radius',
                          None,
                          figname + '_X_dim_2',
                          subdir=subdir_prefix / subdir,
                          multiprocess_lock=multiprocess_lock,
                          palette=chaos_palette)

    tps_low_d_2neurons_11 = tps_low_d.copy()
    tps_low_d_2neurons_11['Win'] = 'diagonal_first_two'
    tps_low_d_2neurons_11['g_radius'] = 20
    tps_low_d_2neurons_12 = tps_low_d.copy()
    tps_low_d_2neurons_12['Win'] = 'diagonal_first_two'
    tps_low_d_2neurons_12['g_radius'] = 250
    plots.dim_over_layers([tps_low_d_2neurons_11, tps_low_d_2neurons_12],
                          None,
                          seeds,
                          'g_radius',
                          None,
                          figname + '_X_dim_2_ident',
                          subdir=subdir_prefix / subdir,
                          multiprocess_lock=multiprocess_lock,
                          palette=chaos_palette)
def run_rnn_high_d_input(param_set, multiprocess_lock=None):
    subdir_prefix2 = Path('vanilla_rnn')
    tps = high_d_input_edge_of_chaos_params.copy()
    for i0, key in enumerate(params_rnn_high_d_keys):
        tps[key] = param_set[i0]
    seeds = list(range(5))
    tps_11 = tps.copy()
    tps_11['g_radius'] = 20
    tps_12 = tps.copy()
    tps_12['g_radius'] = 250
    figname = ''.join(
        key + '_' + str(val) + '_'
        for key, val in zip(params_rnn_high_d_keys_abbrev, param_set))
    figname = figname[:-1]
    subdir = subdir_prefix2 / 'dim_high_d_experiments'
    plots.dim_over_layers([tps_11, tps_12],
                          None,
                          seeds,
                          'g_radius',
                          None,
                          figname,
                          subdir=subdir_prefix / subdir,
                          multiprocess_lock=multiprocess_lock,
                          palette=chaos_palette)
def run_shallow_1(param_set, train_params, i0, multiprocess_lock=None):
    # time.sleep(i0)
    print(multiprocess_lock)
    train_params = train_params.copy()
    for i0, key in enumerate(keys_shallow):
        train_params[key] = param_set[i0]
    train_params['network'] = 'feedforward'
    n_lag = 0
    train_params['n_lag'] = n_lag
    full_batch_size = train_params['num_train_samples_per_epoch']
    tps_11 = train_params.copy()
    tps_11['batch_size'] = 1
    # tps_11['num_epochs'] = 200
    # tps_11['saves_per_epoch'] = 1/20
    tps_11['num_epochs'] = 100
    tps_11['saves_per_epoch'] = [0] * 100
    for k in range(0, 10):
        tps_11['saves_per_epoch'][k] = 2
    for k in range(11, 100, 10):
        tps_11['saves_per_epoch'][k] = 1
    # tps_11['num_epochs'] = 100
    # tps_11['saves_per_epoch'] = 1/10
    if train_params['scheduler'] in ('onecyclelr_4e4', 'onecyclelr'):
        tps_11['learning_patience'] = 20
        tps_11['scheduler_factor'] = 10
        tps_11['patience_before_stopping'] = tps_11['num_epochs']
    tps_12 = train_params.copy()
    tps_12['batch_size'] = full_batch_size
    tps_12['num_epochs'] = 1000
    tps_12['saves_per_epoch'] = 1 / 100
    if train_params['scheduler'] in ('onecyclelr_4e4', 'onecyclelr'):
        tps_12['learning_patience'] = 100
        tps_12['scheduler_factor'] = 10
        tps_12['patience_before_stopping'] = tps_12['num_epochs']
    tps_21 = tps_11.copy()
    tps_21['train_output_weights'] = False
    tps_22 = tps_12.copy()
    tps_22['train_output_weights'] = False
    figname = ''.join(key + '_' + str(val) + '_'
                      for key, val in zip(keys_shallow_abbrev, param_set))
    figname = figname[:-1]

    subdir_prefix2 = Path('{}/'.format(tps_11['network']))
    subdir_suffix = Path('nlag_{}_g_{}_l2_{}/'.format(
        n_lag, tps_11['g_radius'], train_params['l2_regularization']))
    plot_ps = ([tps_11, tps_12], [tps_21, tps_22], [0, 1, 2], 'batch_size',
               'train_output_weights', figname)
    subdir = subdir_prefix2 / 'dim_over_training' / subdir_suffix
    plots.dim_through_training(*plot_ps,
                               subdir=subdir_prefix / subdir,
                               multiprocess_lock=multiprocess_lock)
    subdir = subdir_prefix2 / 'dim_over_layers' / subdir_suffix
    plots.dim_over_layers(*plot_ps,
                          subdir=subdir_prefix / subdir,
                          multiprocess_lock=multiprocess_lock)
    subdir = subdir_prefix2 / 'orth_compression_through_training' / subdir_suffix
    plots.orth_compression_through_training(
        *plot_ps,
        subdir=subdir_prefix / subdir,
        multiprocess_lock=multiprocess_lock)
    subdir = subdir_prefix2/'orth_compression_through_training_input_sep' \
                            ''/subdir_suffix
    plots.orth_compression_through_training_input_sep(
        *plot_ps,
        subdir=subdir_prefix / subdir,
        multiprocess_lock=multiprocess_lock)
    subdir = subdir_prefix2 / 'acc_over_training' / subdir_suffix
    plots.acc_over_training(*plot_ps,
                            subdir=subdir_prefix / subdir,
                            multiprocess_lock=multiprocess_lock)
    subdir = subdir_prefix2 / 'loss_over_training' / subdir_suffix
    plots.loss_over_training(*plot_ps,
                             subdir=subdir_prefix / subdir,
                             multiprocess_lock=multiprocess_lock)
def run_recurrent_1(param_set, train_params, i0, multiprocess_lock=None):
    # time.sleep(i0)
    print(multiprocess_lock)
    train_params = train_params.copy()
    for i0, key in enumerate(keys_deep):
        train_params[key] = param_set[i0]
    # if train_params['loss'] == 'mse_scalar':
    #     train_params['rerun'] = True
    train_params['n_lag'] = 10
    lr = train_params['learning_rate']
    optimizer = train_params['optimizer']
    n_lag = train_params['n_lag']
    figname = ''.join(key + '_' + str(val) + '_'
                      for key, val in zip(keys_abbrev, param_set))
    figname = figname[:-1]
    full_batch_size = train_params['num_train_samples_per_epoch']
    tps_11 = train_params.copy()
    tps_11['batch_size'] = 1
    tps_11['num_epochs'] = 200
    tps_11['saves_per_epoch'] = 1 / 20
    # tps_11['num_epochs'] = 100
    # tps_11['saves_per_epoch'] = [0]*100
    # for k in range(0, 10):
    #     tps_11['saves_per_epoch'][k] = 2
    # for k in range(11, 100, 10):
    #     tps_11['saves_per_epoch'][k] = 1
    if train_params['scheduler'] in ('onecyclelr_4e4', 'onecyclelr'):
        tps_11['learning_patience'] = 20
        tps_11['scheduler_factor'] = 10
        tps_11['patience_before_stopping'] = tps_11['num_epochs']
    tps_11['patience_before_stopping'] = tps_11['num_epochs']
    tps_12 = train_params.copy()
    tps_12['batch_size'] = full_batch_size
    tps_12['num_epochs'] = 1000
    tps_12['saves_per_epoch'] = 1 / 100
    if train_params['scheduler'] in ('onecyclelr_4e4', 'onecyclelr'):
        tps_12['learning_patience'] = 20
        tps_12['scheduler_factor'] = 10
        tps_12['patience_before_stopping'] = tps_12['num_epochs']
    # figname = "lr_{}_opt_{}_l2_{}".format(lr, optimizer,
    #                                       train_params['l2_regularization'])

    subdir_prefix2 = Path('{}'.format(train_params['network']))
    subdir_suffix = Path('nlag_{}_g_{}_l2_{}'.format(
        n_lag, tps_11['g_radius'], train_params['l2_regularization']))
    plot_ps = ([tps_11, tps_12], None, [0, 1], 'batch_size', None, figname)
    subdir = subdir_prefix2 / 'dim_over_training' / subdir_suffix
    plots.dim_through_training(*plot_ps,
                               subdir=subdir_prefix / subdir,
                               multiprocess_lock=multiprocess_lock)
    subdir = subdir_prefix2 / 'dim_over_layers' / subdir_suffix
    plots.dim_over_layers(*plot_ps,
                          subdir=subdir_prefix / subdir,
                          multiprocess_lock=multiprocess_lock)
    subdir = subdir_prefix2 / 'orth_compression_through_training' / subdir_suffix
    plots.orth_compression_through_training(
        *plot_ps,
        subdir=subdir_prefix / subdir,
        multiprocess_lock=multiprocess_lock)
    subdir = subdir_prefix2/'orth_compression_through_training_input_sep' \
                            ''/subdir_suffix
    plots.orth_compression_through_training_input_sep(
        *plot_ps,
        subdir=subdir_prefix / subdir,
        multiprocess_lock=multiprocess_lock)
    subdir = subdir_prefix2 / 'acc_over_training' / subdir_suffix
    plots.acc_over_training(*plot_ps,
                            subdir=subdir_prefix / subdir,
                            multiprocess_lock=multiprocess_lock)
    subdir = subdir_prefix2 / 'loss_over_training' / subdir_suffix
    plots.loss_over_training(*plot_ps,
                             subdir=subdir_prefix / subdir,
                             multiprocess_lock=multiprocess_lock)
示例#7
0
# %% Figure 1d
hue_dictionary = {'g_radius': [20, 250]}
acc_and_loss_params = high_d_input_edge_of_chaos_params.copy()
acc_and_loss_params['num_epochs'] = 10
acc_and_loss_params['num_train_samples_per_epoch'] = 400
epochs = list(range(acc_and_loss_params['num_epochs'] + 1))
figname = 'fig_1d_acc_and_loss_over_training'
plots.acc_and_loss_over_training(acc_and_loss_params,
                                 seeds,
                                 epochs=epochs,
                                 hue_dictionary=hue_dictionary,
                                 figname=figname)

# %% Figure 1e
plots.dim_over_layers(seeds, [20, 250],
                      high_d_input_edge_of_chaos_params,
                      figname="fig_1e_dim_over_time")
#
# # %% Figures 1f and 1g
plots.clust_holdout_over_layers(seeds, [20, 250],
                                high_d_input_edge_of_chaos_params,
                                figname="fig_1f_1g_clust_holdout_over_time")

# %% Figure 2b (top)
g = low_d_input_edge_of_chaos_params['g_radius']
plots.lyaps([0],
            low_d_input_edge_of_chaos_params,
            lyap_epochs_edge_of_chaos,
            figname="fig_2b_top_lyaps_g_{}".format(g))

# %% Figure 2b (bottom)