コード例 #1
0
def main():
    args = get_args()
    #model_name = 'nodepert_ae5_sgd_correctgeom'
    model_name = 'nodepert_ae5_faauto'
    #model_name = 'nodepert_ae5_bpauto'

    #Model = AENPModel5
    #Model = AENPModel5_ExactLsq
    #Model = AENPModel5_ExactLsq_BPAuto
    #Model = AENPModel5_ExactLsq_BPSelf
    Model = AENPModel5_ExactLsq_FAAuto
    #Model = AENPModel5_ExactLsq_FASelf
    Data = MNISTDataGenerator
    Trainer = AESFTrainer

    config = process_config('./configs/np_optimized.json', model_name)
    create_dirs([config.summary_dir, config.checkpoint_dir])

    #var_vals = [1e-2]
    N = 1
    #M = 5
    M = 10
    T = config.num_epochs + 1
    n_tags = 13
    test_losses = np.zeros((N, M))
    isnan = np.zeros((N, M))
    metrics = np.zeros((N, M, T, n_tags))

    for n in range(N):
        tf.reset_default_graph()
        model = Model(config)
        data = Data(config)
        for m in range(M):
            with tf.Session() as sess:
                logger = LoggerNumpy(sess, config, model)
                model.load(sess)
                trainer = Trainer(sess, model, data, config, logger)
                try:
                    trainer.train()
                except ValueError:
                    print("Method fails to converge for these parameters")
                    isnan[n, m] = 1
                loss, acc = trainer.test()
                metric = logger.get_data()
                tags = logger.get_tags()
                test_losses[n, m] = loss
                metrics[n, m, :, :] = metric
        #Save after each run
        fn = os.path.join(
            config.summary_dir) + "3_autoencoder_correctbatch.npz"
        to_save = {
            'test_losses': test_losses,
            'metrics': metrics,
            'isnan': isnan,
            'tags': tags
        }
        pickle.dump(to_save, open(fn, "wb"))
    return metrics
コード例 #2
0
def main():
    args = get_args()
    model_name = 'nodepert4_fixedw_exact'
    Model = NPModel4_ExactLsq
    Data = MNISTDataGenerator
    Trainer = SFTrainer

    config = process_config('./configs/np.json', model_name)
    create_dirs([config.summary_dir, config.checkpoint_dir])

    #Param search parameters
    attr = ['var_xi']
    #var_vals = [1e-3, 1e-2, 1e-1, 1, 10]
    var_vals = [1e-1]
    N = len(var_vals)
    M = 1
    T = config.num_epochs + 1

    n_tags = 8
    test_losses = np.zeros((N, M))
    isnan = np.zeros((N, M))
    metrics = np.zeros((N, M, T, n_tags))

    for n in range(N):
        var_val = [var_vals[n]]
        set_hyperparameters(config, attr, var_val)
        model = Model(config)
        data = Data(config)
        print('Hyperparameters: ' + attr[0] + ' = %f' % var_vals[n])
        for m in range(M):
            with tf.Session() as sess:
                logger = LoggerNumpy(sess, config, model)
                model.load(sess)
                trainer = Trainer(sess, model, data, config, logger)
                try:
                    trainer.train()
                except ValueError:
                    print("Method fails to converge for these parameters")
                    isnan[n, m] = 1
                loss, acc = trainer.test()
                metric = logger.get_data()
                tags = logger.get_tags()
                test_losses[n, m] = loss
                metrics[n, m, :] = metric

    fn = os.path.join(
        config.summary_dir) + "2_establish_convergence_feedforward_output.npz"
    to_save = {
        'test_losses': test_losses,
        'metrics': metrics,
        'isnan': isnan,
        'tags': tags
    }
    pickle.dump(to_save, open(fn, "wb"))

    #np.savez(fn, test_losses=test_losses, metrics = metrics, isnan = isnan, tags = tags)
    return metrics
def main():
    args = get_args()
    model_name = 'feedbackalignment4_small'
    Model = FAModel4_Small
    #Model = FAModel4
    Data = MNISTDataGenerator
    Trainer = SFTrainer

    config = process_config('./configs/sf.json', model_name)
    create_dirs([config.summary_dir, config.checkpoint_dir])

    N = 1
    M = 5
    T = config.num_epochs + 1

    n_tags = 10
    test_losses = np.zeros((N, M))
    isnan = np.zeros((N, M))
    metrics = np.zeros((N, M, T, n_tags))

    tfconfig = tf.ConfigProto()
    tfconfig.gpu_options.allow_growth = True

    n = 0
    model = Model(config)
    data = Data(config)
    for m in range(M):
        with tf.Session(config=tfconfig) as sess:
            logger = LoggerNumpy(sess, config, model)
            model.load(sess)
            trainer = Trainer(sess, model, data, config, logger)
            try:
                trainer.train()
            except ValueError:
                print("Method fails to converge for these parameters")
                isnan[n, m] = 1
            loss, acc = trainer.test()
            metric = logger.get_data()
            tags = logger.get_tags()
            test_losses[n, m] = loss
            metrics[n, m, :, :] = metric

    #Save after each run
    fn = os.path.join(
        config.summary_dir
    ) + "2b_establish_convergence_feedforward_feedbackalignment_output.npz"
    to_save = {
        'test_losses': test_losses,
        'metrics': metrics,
        'isnan': isnan,
        'tags': tags
    }
    pickle.dump(to_save, open(fn, "wb"))

    return metrics
def main():
    args = get_args()
    #model_name = 'nodepert_ae5_sgd'
    #model_name = 'nodepert_ae5_bpauto'
    model_name = 'nodepert_ae5_adam'
    #model_name = 'nodepert_ae5_bpself'
    #model_name = 'nodepert_ae5_faauto'
    #model_name = 'nodepert_ae5_faself'
    #Model = AENPModel5
    Model = AENPModel5_ADAM
    #Model = AENPModel5_ExactLsq
    #Model = AENPModel5_ExactLsq_BPAuto
    #Model = AENPModel5_ExactLsq_BPSelf
    #Model = AENPModel5_ExactLsq_FAAuto
    #Model = AENPModel5_ExactLsq_FASelf
    Data = MNISTDataGenerator
    Trainer = AESFTrainer

    config = process_config('./configs/np.json', model_name)
    create_dirs([config.summary_dir, config.checkpoint_dir])

    #Param search parameters
    attr = ['var_xi', 'learning_rate', 'lmda_learning_rate']
    attr_ranges = [[-4, -1], [-6, -3], [-6, -3]]
    log_scale = [True, True, True]
    N = 20
    #M = 5
    M = 1
    T = config.num_epochs + 1
    n_tags = 13
    test_losses = np.zeros((N, M))
    isnan = np.zeros((N, M))
    metrics = np.zeros((N, M, T, n_tags))
    params = []

    for n in range(N):
        param = set_random_hyperparameters(config, attr, attr_ranges,
                                           log_scale)
        params.append(param)
        tf.reset_default_graph()
        model = Model(config)
        data = Data(config)
        print('Hyperparameters: ' + ' '.join(
            [attr[ii] + ' = %f' % param[ii] for ii in range(len(attr))]))
        for m in range(M):
            with tf.Session() as sess:
                logger = LoggerNumpy(sess, config, model)
                model.load(sess)
                trainer = Trainer(sess, model, data, config, logger)
                try:
                    trainer.train()
                except ValueError:
                    print("Method fails to converge for these parameters")
                    isnan[n, m] = 1
                loss, acc = trainer.test()
                metric = logger.get_data()
                tags = logger.get_tags()
                test_losses[n, m] = loss
                metrics[n, m, :, :] = metric
        #Save after each run
        fn = os.path.join(
            config.summary_dir) + "3_autoencoder_correctbatch_hyperparam.npz"
        to_save = {
            'attr': attr,
            'params': params,
            'test_losses': test_losses,
            'metrics': metrics,
            'isnan': isnan,
            'tags': tags
        }
        pickle.dump(to_save, open(fn, "wb"))
    return metrics
コード例 #5
0
def main():
    args = get_args()
    model_name = 'WM_4layer'
    Model = WM_4layer
    Data = MNISTDataGenerator
    Trainer = SFTrainer

    config = process_config('/home/prashanth/synthfeedback/configs/wm.json',
                            model_name)
    create_dirs([config.summary_dir, config.checkpoint_dir])

    #Param search parameters
    #attr = ['']
    #var_vals = [1e-3, 1e-2, 1e-1, 1, 10]
    #var_vals = [30,100]

    N = 1
    M = 3
    #config.num_epoch=5
    #print("\n\n",config.num_epoch)
    T = config.num_epochs + 1
    #config.gamma=250000

    n_tags = 10
    test_losses = np.zeros((N, M))
    isnan = np.zeros((N, M))
    metrics = np.zeros((N, M, T, n_tags))

    tfconfig = tf.ConfigProto()
    tfconfig.gpu_options.allow_growth = True

    for n in range(N):
        #var_val = [var_vals[n]]
        #set_hyperparameters(config, attr, var_val)
        tf.reset_default_graph()
        model = Model(config)
        #print("\n\n",config.num_epoch)
        data = Data(config)
        #print('Hyperparameters: ' + attr[0] + ' = %f'%var_vals[n])
        for m in range(M):
            with tf.Session(config=tfconfig) as sess:
                #options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                #run_metadata = tf.RunMetadata()
                logger = LoggerNumpy(sess, config, model)
                model.load(sess)
                #trainer = Trainer(sess, model, data, config, logger, options, run_metadata)
                trainer = Trainer(sess, model, data, config, logger)
                try:
                    trainer.train()
                except ValueError:
                    print("Method fails to converge for these parameters")
                    isnan[n, m] = 1
                loss, acc = trainer.test()
                metric = logger.get_data()
                tags = logger.get_tags()
                test_losses[n, m] = loss
                metrics[n, m, :, :] = metric

                #fetched_timeline = timeline.Timeline(run_metadata.step_stats)
                #chrome_trace = fetched_timeline.generate_chrome_trace_format()
                #with open('./timeline_02_n_%d_m_%d.json'%(n,m), 'w') as f:
                #    f.write(chrome_trace)

        #Save after each run
        #fn = os.path.join(config.summary_dir) + "WM_4layer(condnum).npz"
        to_save = {
            'test_losses': test_losses,
            'metrics': metrics,
            'isnan': isnan,
            'tags': tags
        }
        #pickle.dump(to_save, open(fn, "wb"))

    #np.savez(fn, test_losses=test_losses, metrics = metrics, isnan = isnan, tags = tags)
    return metrics
def main():
    args = get_args()
    model_name = 'nodepert4_exact'
    Model = NPModel4_ExactLsq_CorrectBatch
    Data = MNISTDataGenerator
    Trainer = SFTrainer

    config = process_config('./configs/np.json', model_name)
    create_dirs([config.summary_dir, config.checkpoint_dir])

    #Param search parameters
    attr = ['var_xi']
    var_vals = [1e-4, 1e-3, 1e-2, 1e-1]
    #var_vals = [1e-3]
    N = len(var_vals)
    M = 5
    #M = 1
    T = config.num_epochs + 1

    n_tags = 10
    test_losses = np.zeros((N, M))
    isnan = np.zeros((N, M))
    metrics = np.zeros((N, M, T, n_tags))

    tfconfig = tf.ConfigProto()
    tfconfig.gpu_options.allow_growth = True

    for n in range(N):
        var_val = [var_vals[n]]
        set_hyperparameters(config, attr, var_val)
        tf.reset_default_graph()
        model = Model(config)
        data = Data(config)
        print('Hyperparameters: ' + attr[0] + ' = %f' % var_vals[n])
        for m in range(M):
            with tf.Session(config=tfconfig) as sess:
                logger = LoggerNumpy(sess, config, model)
                model.load(sess)
                trainer = Trainer(sess, model, data, config, logger)
                try:
                    trainer.train()
                except ValueError:
                    print("Method fails to converge for these parameters")
                    isnan[n, m] = 1
                loss, acc = trainer.test()
                metric = logger.get_data()
                tags = logger.get_tags()
                test_losses[n, m] = loss
                metrics[n, m, :, :] = metric

        #Save after each run
        fn = os.path.join(
            config.summary_dir
        ) + "2b_establish_convergence_feedforward_output_correctbatch.npz"
        to_save = {
            'test_losses': test_losses,
            'metrics': metrics,
            'isnan': isnan,
            'tags': tags
        }
        pickle.dump(to_save, open(fn, "wb"))

    return metrics
コード例 #7
0
def main():
    args = get_args()
    model_name = 'nodepert4_fixedw_exact'
    Model = NPModel4_ExactLsq_CorrectBatch
    Data = MNISTDataGenerator
    Trainer = SFTrainer

    config = process_config('./configs/np.json', model_name)
    create_dirs([config.summary_dir, config.checkpoint_dir])

    #Param search parameters
    attr = ['var_xi']
    var_vals = [1e-3, 1e-2, 1e-1]
    #var_vals = [1e-1]
    N = len(var_vals)
    M = 3
    T = config.num_epochs+1

    n_tags = 10
    test_losses = np.zeros((N, M))
    isnan = np.zeros((N, M))
    metrics = np.zeros((N, M, T, n_tags))

    tfconfig = tf.ConfigProto()
    tfconfig.gpu_options.allow_growth = True

    for n in range(N):
        var_val = [var_vals[n]]
        set_hyperparameters(config, attr, var_val)
        tf.reset_default_graph()
        model = Model(config)
        data = Data(config)
        print('Hyperparameters: ' + attr[0] + ' = %f'%var_vals[n])
        for m in range(M):
            with tf.Session(config=tfconfig) as sess:
                #options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                #run_metadata = tf.RunMetadata()
                logger = LoggerNumpy(sess, config, model)
                model.load(sess)
                #trainer = Trainer(sess, model, data, config, logger, options, run_metadata)
                trainer = Trainer(sess, model, data, config, logger)
                try:
                    trainer.train()
                except ValueError:
                    print("Method fails to converge for these parameters")
                    isnan[n,m] = 1
                loss, acc = trainer.test()
                metric = logger.get_data()
                tags = logger.get_tags()
                test_losses[n,m] = loss
                metrics[n,m,:,:] = metric

                #See here: https://towardsdatascience.com/howto-profile-tensorflow-1a49fb18073d
                #fetched_timeline = timeline.Timeline(run_metadata.step_stats)
                #chrome_trace = fetched_timeline.generate_chrome_trace_format()
                #with open('./timeline_02_n_%d_m_%d.json'%(n,m), 'w') as f:
                #    f.write(chrome_trace)

        #Save after each run
        fn = os.path.join(config.summary_dir) + "2_establish_convergence_feedforward_output_correctbatches.npz"
        to_save = {
            'test_losses': test_losses,
            'metrics': metrics,
            'isnan': isnan,
            'tags': tags
        }
        pickle.dump(to_save, open(fn, "wb"))

    return metrics