def test_models(nnet_type):
    batch_size = 6
    n_epoch = 1

    data = random.Random('probability')

    nnet = nnet_for_testing(nnet_type)

    nnet = initializers.init_standard(nnet, data)
    optimizer = optimizers.SGD()
    trainer = training.CD(nnet, nb_pos_steps=2, nb_neg_steps=2)
    model = Model(nnet, optimizer, trainer)

    # test train_on_batch
    out = model.train_on_batch(data.train.data)
    assert out.size == 1

    # predict_on_batch
    out = model.predict_on_batch(data.valid.data)
    assert out.size == 1

    # test fit
    out = model.fit(data.train.data, n_epoch=n_epoch, batch_size=batch_size)
    assert isinstance(out, History)

    # test validation data
    out = model.fit(data.train.data, n_epoch=n_epoch, batch_size=batch_size,
                    validation_data=data.valid.data)
    assert isinstance(out, History)
Exemple #2
0
    def make_model(dbm, data):

        optimizer = optimizers.SGD()

        trainer = training.CD(dbm)

        model = Model(dbm, optimizer, trainer)

        return model
Exemple #3
0
def run_rbm(nnet_type='rbm'):

    # parameters
    batch_size = 100
    n_epoch = 2
    results_folder = 'fast_demo'

    # just makes a tiny neural network
    if nnet_type == 'rbm':
        layer_size_list = [100, 10]
        topology_dict = {0: {1}}
    elif nnet_type == 'dbm':
        layer_size_list = [100, 10, 5]
        topology_dict = {0: {1}, 1: {2}}
    elif nnet_type == 'dbm_complex':
        layer_size_list = [100, 10, 5, 2]
        topology_dict = {0: {1, 3}, 1: {2}, 2: {3}}
    else:
        raise NotImplementedError

    # this is a small dataset useful for demos
    data = fast.Fast('probability')

    # create and initialize the rbm
    dbm = DBM(layer_size_list, topology_dict)
    dbm = init_standard(dbm, data)

    # make the model
    optimizer = optimizers.SGD()
    trainer = training.CD(dbm)
    model = Model(dbm, optimizer, trainer)

    # prepare output paths
    filepath = os.path.dirname(os.path.abspath(__file__))
    save_folder = os.path.abspath(
        os.path.join(filepath, '..', 'results', results_folder))
    save_dict = standard_save_folders(save_folder, overwrite=True)

    # these callbacks monitor progress
    cb_csv = callbacks.CSVLogger(save_dict['csv'], separator='\t')
    cb_ais = callbacks.AISCallback(dbm, 100, 1000, epoch_ls=[0, 1])
    cb_period = callbacks.PeriodicSave(save_dict['weights'], [0, 1],
                                       opt_path=save_dict['opt_weights'])
    cb_plot = callbacks.PlotCallback(save_dict['plots'], save_dict['csv'])
    cb_summary = callbacks.SummaryCallback(save_folder, save_dict['csv'])
    callbacks_ls = [cb_csv, cb_ais, cb_period, cb_plot, cb_summary]

    # do the actual training
    history = model.fit(data.train.data,
                        batch_size=batch_size,
                        n_epoch=n_epoch,
                        callbacks=callbacks_ls,
                        validation_data=data.valid.data)
Exemple #4
0
def test_regularization_fit(nnet_type):
    batch_size = 100
    n_epoch = 1
    W_reg_type = 'l1_l2'
    b_reg_type = 'l1_l2'

    data = random.Random('probability')

    nnet = nnet_for_testing(nnet_type, W_reg_type, b_reg_type)

    nnet = initializers.init_standard(nnet, data)
    optimizer = optimizers.SGD()
    trainer = training.CD(nnet)
    model = Model(nnet, optimizer, trainer)

    # test fit
    out = model.fit(data.train.data, n_epoch=n_epoch, batch_size=batch_size)
    assert isinstance(out, History)
Exemple #5
0
def run_rbm():

    # parameters    
    batch_size = 100
    n_epoch = 1000 # 1000 for true literature comparison, but 250 should be good
    results_folder = 'long_demo'
    
    # needed datasets, standard in field is probability for train, sampled for test    
    data_train = mnist.MNIST('probability')
    data_valid = mnist.MNIST('sampled')
    
    # this is a large neural network
    layer_size_list = [784, 500]
    topology_dict = {0: {1}}
    
    # training parameters 
    # lr rate decays from 0.01 to 0.001 over training epochs
    # momentum is ramped up (controlled by schedule_decay) to max
    # and then ramped back down to a smaller value for the last 50 epochs
    n_batch = np.floor(data_train.train.num_samples/batch_size)
    decay = (1/0.1-1)/(n_epoch*data_train.train.num_samples/batch_size)
    optimizer_kwargs = {'lr' : 0.01,
                        'momentum' : 0.9,
                        'nesterov' : True,
                        'decay' : decay,
                        'schedule_decay': 0.004,
                        'mom_iter_max': n_batch*(n_epoch-50)}
   
    # create and initialize the rbm
    W_regularizer = regularizers.l2(l=2e-4)
    dbm = DBM(layer_size_list, topology_dict, W_regularizer=W_regularizer)
    dbm = init_standard(dbm, data_train)
    
    # make the model    
    optimizer = optimizers.SGD(**optimizer_kwargs)
    trainer = training.PCD(dbm, nb_pos_steps=25, nb_neg_steps=25, batch_size=batch_size)
    model = Model(dbm, optimizer, trainer)
    
    # prepare output paths
    filepath = os.path.dirname(os.path.abspath(__file__))
    save_folder = os.path.abspath(os.path.join(filepath, '..', 'results', results_folder))
    save_dict = standard_save_folders(save_folder, overwrite=True)
  
    # epochs to monitor
    temp = [0, 1, 5, 10]
    fixed_ls = temp + [n_epoch-t for t in temp] + [n_epoch-2]
    epoch_ls = list(set(list(range(0, n_epoch, 25)) + fixed_ls))
    
    # these callbacks monitor progress
    cb_csv = callbacks.CSVLogger(save_dict['csv'], separator='\t')
    cb_ais = callbacks.AISCallback(dbm, 1000, 30000, epoch_ls=epoch_ls)
    cb_period = callbacks.PeriodicSave(save_dict['weights'], epoch_ls,
                                       opt_path=save_dict['opt_weights'])
    cb_opt = callbacks.OptimizerSpy()
    cb_plot = callbacks.PlotCallback(save_dict['plots'], save_dict['csv'])
    cb_summary = callbacks.SummaryCallback(save_folder, save_dict['csv'])
    callbacks_ls = [cb_csv, cb_ais, cb_period, cb_opt, cb_plot, cb_summary]

    # do the actual training
    history = model.fit(data_train.train.data,
                        batch_size=batch_size,
                        n_epoch=n_epoch,
                        callbacks=callbacks_ls,
                        validation_data=data_valid.valid.data)
Exemple #6
0
def test_clipvalue():
    sgd = optimizers.SGD(lr=0.01, momentum=0.9, clipvalue=0.5)
    _test_optimizer(sgd)
Exemple #7
0
def test_clipnorm():
    sgd = optimizers.SGD(lr=0.01, momentum=0.9, clipnorm=0.5)
    _test_optimizer(sgd)
Exemple #8
0
def test_sgd():
    sgd = optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
    _test_optimizer(sgd)
Exemple #9
0
def train_check():
    """
    This is a full implementation test. On the fast dataset with the training
    setup below, the final log likelihood on the validation data should be
    around -34.5 nats.
    
    I left this out of the automatic pytests since this takes around 
    2 minutes to run on a CPU and really should be run only if all the pytest
    tests were successful.
    """

    # needed datasets, standard in field is probability for train, sampled for test
    data_prob = fast.Fast('probability')
    data_sampled = fast.Fast('sampled')

    # rbm setup
    layer_size_list = [data_prob.train.num_pixels, 16]
    topology_dict = {0: {1}}
    W_regularizer = regularizers.l1_l2(l1=1e-5, l2=1e-5)

    # training parameters
    batch_size = 100
    n_epoch = 100
    n_batch = np.floor(data_prob.train.num_samples / batch_size)
    optimizer_kwargs = {
        'lr': 0.01,
        'momentum': 0.9,
        'nesterov': True,
        'decay': 1.8e-5,
        'schedule_decay': 0.004,
        'mom_iter_max': n_batch * (n_epoch - 50)
    }

    # initialize the model
    dbm = DBM(layer_size_list, topology_dict, W_regularizer=W_regularizer)
    dbm = init_standard(dbm, data_prob)
    optimizer = optimizers.SGD(**optimizer_kwargs)
    trainer = training.PCD(dbm, batch_size=batch_size)
    model = Model(dbm, optimizer, trainer)

    # list of epochs to check on performance, want a few at start/end
    temp = [0, 1]
    fixed_ls = temp + [n_epoch - t for t in temp] + [n_epoch - 2]
    n_runs = 100
    n_betas = 10000

    # prep the callbacks
    filepath = os.path.dirname(os.path.abspath(__file__))
    save_folder = os.path.abspath(
        os.path.join(filepath, '..', '..', 'results', 'fast_checks'))
    save_dict = standard_save_folders(save_folder, overwrite=True)
    cb_csv = callbacks.CSVLogger(save_dict['csv'], separator='\t')
    cb_ais = callbacks.AISCallback(dbm, n_runs, n_betas, epoch_ls=fixed_ls)
    callbacks_ls = [cb_csv, cb_ais]

    # do the actual training
    history = model.fit(data_prob.train.data,
                        batch_size=batch_size,
                        n_epoch=n_epoch,
                        callbacks=callbacks_ls,
                        validation_data=data_sampled.valid.data)

    # check how training went
    val_prob = history.history['val_prob']
    best_val_prob = np.nanmax(val_prob)

    # Bare minimum goal
    assert best_val_prob >= -40.0

    # Ultimate goal
    assert best_val_prob >= -35.0