def test_models(nnet_type): batch_size = 6 n_epoch = 1 data = random.Random('probability') nnet = nnet_for_testing(nnet_type) nnet = initializers.init_standard(nnet, data) optimizer = optimizers.SGD() trainer = training.CD(nnet, nb_pos_steps=2, nb_neg_steps=2) model = Model(nnet, optimizer, trainer) # test train_on_batch out = model.train_on_batch(data.train.data) assert out.size == 1 # predict_on_batch out = model.predict_on_batch(data.valid.data) assert out.size == 1 # test fit out = model.fit(data.train.data, n_epoch=n_epoch, batch_size=batch_size) assert isinstance(out, History) # test validation data out = model.fit(data.train.data, n_epoch=n_epoch, batch_size=batch_size, validation_data=data.valid.data) assert isinstance(out, History)
def make_model(dbm, data): optimizer = optimizers.SGD() trainer = training.CD(dbm) model = Model(dbm, optimizer, trainer) return model
def run_rbm(nnet_type='rbm'): # parameters batch_size = 100 n_epoch = 2 results_folder = 'fast_demo' # just makes a tiny neural network if nnet_type == 'rbm': layer_size_list = [100, 10] topology_dict = {0: {1}} elif nnet_type == 'dbm': layer_size_list = [100, 10, 5] topology_dict = {0: {1}, 1: {2}} elif nnet_type == 'dbm_complex': layer_size_list = [100, 10, 5, 2] topology_dict = {0: {1, 3}, 1: {2}, 2: {3}} else: raise NotImplementedError # this is a small dataset useful for demos data = fast.Fast('probability') # create and initialize the rbm dbm = DBM(layer_size_list, topology_dict) dbm = init_standard(dbm, data) # make the model optimizer = optimizers.SGD() trainer = training.CD(dbm) model = Model(dbm, optimizer, trainer) # prepare output paths filepath = os.path.dirname(os.path.abspath(__file__)) save_folder = os.path.abspath( os.path.join(filepath, '..', 'results', results_folder)) save_dict = standard_save_folders(save_folder, overwrite=True) # these callbacks monitor progress cb_csv = callbacks.CSVLogger(save_dict['csv'], separator='\t') cb_ais = callbacks.AISCallback(dbm, 100, 1000, epoch_ls=[0, 1]) cb_period = callbacks.PeriodicSave(save_dict['weights'], [0, 1], opt_path=save_dict['opt_weights']) cb_plot = callbacks.PlotCallback(save_dict['plots'], save_dict['csv']) cb_summary = callbacks.SummaryCallback(save_folder, save_dict['csv']) callbacks_ls = [cb_csv, cb_ais, cb_period, cb_plot, cb_summary] # do the actual training history = model.fit(data.train.data, batch_size=batch_size, n_epoch=n_epoch, callbacks=callbacks_ls, validation_data=data.valid.data)
def _init_training(training_type, nnet, nb_pos_steps, nb_neg_steps, batch_size): if training_type == 'cd': train = training.CD(nnet, nb_pos_steps=nb_pos_steps, nb_neg_steps=nb_neg_steps) elif training_type == 'pcd': train = training.PCD(nnet, nb_pos_steps=nb_pos_steps, nb_neg_steps=nb_neg_steps, batch_size=batch_size) else: raise NotImplementedError return train
def test_regularization_fit(nnet_type): batch_size = 100 n_epoch = 1 W_reg_type = 'l1_l2' b_reg_type = 'l1_l2' data = random.Random('probability') nnet = nnet_for_testing(nnet_type, W_reg_type, b_reg_type) nnet = initializers.init_standard(nnet, data) optimizer = optimizers.SGD() trainer = training.CD(nnet) model = Model(nnet, optimizer, trainer) # test fit out = model.fit(data.train.data, n_epoch=n_epoch, batch_size=batch_size) assert isinstance(out, History)