def train(): # build dataset batch_size = 64 data = Mnist(batch_size=batch_size, train_valid_test_ratio=[5, 1, 1]) # build model model = Sequential(input_var=T.matrix(), output_var=T.matrix()) model.add(Linear(prev_dim=28 * 28, this_dim=200)) model.add(RELU()) model.add(Linear(prev_dim=200, this_dim=100)) model.add(RELU()) model.add(Dropout(0.5)) model.add(Linear(prev_dim=100, this_dim=10)) model.add(Softmax()) # build learning method decay_batch = int(data.train.X.shape[0] * 2 / batch_size) learning_method = SGD(learning_rate=0.1, momentum=0.9, lr_decay_factor=0.9, decay_batch=decay_batch) # Build Logger log = Log( experiment_name='MLP', description='This is a tutorial', save_outputs=True, # log all the outputs from the screen save_model=True, # save the best model save_epoch_error=True, # log error at every epoch save_to_database={ 'name': 'Example.sqlite3', 'records': { 'Batch_Size': batch_size, 'Learning_Rate': learning_method.learning_rate, 'Momentum': learning_method.momentum } }) # end log # put everything into the train object train_object = TrainObject(model=model, log=log, dataset=data, train_cost=mse, valid_cost=error, learning_method=learning_method, stop_criteria={ 'max_epoch': 100, 'epoch_look_back': 5, 'percent_decrease': 0.01 }) # finally run the code train_object.setup() train_object.run() ypred = model.fprop(data.get_test().X) ypred = np.argmax(ypred, axis=1) y = np.argmax(data.get_test().y, axis=1) accuracy = np.equal(ypred, y).astype('f4').sum() / len(y) print('test accuracy:', accuracy)
def train(): data = Cifar10(batch_size=32, train_valid_test_ratio=[4,1,1]) model = Sequential(input_var=T.tensor4(), output_var=T.matrix()) model.add(Convolution2D(input_channels=3, filters=8, kernel_size=(3,3), stride=(1,1), border_mode='full')) model.add(RELU()) model.add(Convolution2D(input_channels=8, filters=16, kernel_size=(3,3), stride=(1,1))) model.add(RELU()) model.add(Pooling2D(poolsize=(4, 4), stride=(4,4), mode='max')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Linear(16*8*8, 512)) model.add(RELU()) model.add(Dropout(0.5)) model.add(Linear(512, 10)) model.add(Softmax()) # build learning method learning_method = SGD(learning_rate=0.01, momentum=0.9, lr_decay_factor=0.9, decay_batch=5000) # Build Logger log = Log(experiment_name = 'cifar10_cnn', description = 'This is a tutorial', save_outputs = True, # log all the outputs from the screen save_model = True, # save the best model save_epoch_error = True, # log error at every epoch save_to_database = {'name': 'hyperparam.sqlite3', 'records': {'Batch_Size': data.batch_size, 'Learning_Rate': learning_method.learning_rate, 'Momentum': learning_method.momentum}} ) # end log # put everything into the train object train_object = TrainObject(model = model, log = log, dataset = data, train_cost = entropy, valid_cost = error, learning_method = learning_method, stop_criteria = {'max_epoch' : 30, 'epoch_look_back' : 5, 'percent_decrease' : 0.01} ) # finally run the code train_object.setup() train_object.run() # test the model on test set ypred = model.fprop(data.get_test().X) ypred = np.argmax(ypred, axis=1) y = np.argmax(data.get_test().y, axis=1) accuracy = np.equal(ypred, y).astype('f4').sum() / len(y) print 'test accuracy:', accuracy
def train(): batch_size = 128 data = Cifar10(batch_size=batch_size, train_valid_test_ratio=[4,1,1]) _, c, h, w = data.train.X.shape model = Sequential(input_var=T.tensor4(), output_var=T.matrix()) model.add(Convolution2D(input_channels=c, filters=8, kernel_size=(3,3), stride=(1,1), border_mode='full')) h, w = full(h, w, kernel=3, stride=1) model.add(RELU()) model.add(Convolution2D(input_channels=8, filters=16, kernel_size=(3,3), stride=(1,1), border_mode='valid')) h, w = valid(h, w, kernel=3, stride=1) model.add(RELU()) model.add(Pooling2D(poolsize=(4, 4), stride=(4,4), mode='max')) h, w = valid(h, w, kernel=4, stride=4) model.add(Flatten()) model.add(Linear(16*h*w, 512)) model.add(BatchNormalization((512,), short_memory=0.9)) model.add(RELU()) model.add(Linear(512, 10)) model.add(Softmax()) learning_method = RMSprop(learning_rate=0.01) # Build Logger log = Log(experiment_name = 'cifar10_cnn_example', description = 'This is a tutorial', save_outputs = True, # log all the outputs from the screen save_model = True, # save the best model save_epoch_error = True, # log error at every epoch save_to_database = {'name': 'hyperparam.sqlite3', 'records': {'Batch_Size': batch_size, 'Learning_Rate': learning_method.learning_rate}} ) # end log # put everything into the train object train_object = TrainObject(model = model, log = log, dataset = data, train_cost = entropy, valid_cost = error, learning_method = learning_method, stop_criteria = {'max_epoch' : 30, 'epoch_look_back' : 5, 'percent_decrease' : 0.01} ) # finally run the code train_object.setup() train_object.run() # test the model on test set ypred = model.fprop(data.get_test().X) ypred = np.argmax(ypred, axis=1) y = np.argmax(data.get_test().y, axis=1) accuracy = np.equal(ypred, y).astype('f4').sum() / len(y) print 'test accuracy:', accuracy
def train(): # build dataset batch_size = 64 data = Mnist(batch_size=batch_size, train_valid_test_ratio=[5,1,1]) # build model model = Sequential(input_var=T.matrix(), output_var=T.matrix()) model.add(Linear(prev_dim=28*28, this_dim=200)) model.add(RELU()) model.add(Linear(prev_dim=200, this_dim=100)) model.add(RELU()) model.add(Dropout(0.5)) model.add(Linear(prev_dim=100, this_dim=10)) model.add(Softmax()) # build learning method decay_batch = int(data.train.X.shape[0] * 2 / batch_size) learning_method = SGD(learning_rate=0.1, momentum=0.9, lr_decay_factor=0.9, decay_batch=decay_batch) # Build Logger log = Log(experiment_name = 'MLP', description = 'This is a tutorial', save_outputs = True, # log all the outputs from the screen save_model = True, # save the best model save_epoch_error = True, # log error at every epoch save_to_database = {'name': 'Example.sqlite3', 'records': {'Batch_Size': batch_size, 'Learning_Rate': learning_method.learning_rate, 'Momentum': learning_method.momentum}} ) # end log # put everything into the train object train_object = TrainObject(model = model, log = log, dataset = data, train_cost = mse, valid_cost = error, learning_method = learning_method, stop_criteria = {'max_epoch' : 100, 'epoch_look_back' : 5, 'percent_decrease' : 0.01} ) # finally run the code train_object.setup() train_object.run() ypred = model.fprop(data.get_test().X) ypred = np.argmax(ypred, axis=1) y = np.argmax(data.get_test().y, axis=1) accuracy = np.equal(ypred, y).astype('f4').sum() / len(y) print 'test accuracy:', accuracy
def train(): data = Cifar10(batch_size=32, train_valid_test_ratio=[4,1,1]) model = Sequential(input_var=T.tensor4(), output_var=T.matrix()) model.add(Convolution2D(input_channels=3, filters=32, kernel_size=(3,3), stride=(1,1), border_mode='full')) model.add(RELU()) model.add(Convolution2D(input_channels=32, filters=32, kernel_size=(3,3), stride=(1,1))) model.add(RELU()) model.add(Pooling2D(poolsize=(2, 2), mode='max')) model.add(Dropout(0.25)) model.add(Convolution2D(input_channels=32, filters=64, kernel_size=(3,3), stride=(1,1), border_mode='full')) model.add(RELU()) model.add(Convolution2D(input_channels=64, filters=64, kernel_size=(3,3), stride=(1,1))) model.add(RELU()) model.add(Pooling2D(poolsize=(2, 2), mode='max')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Linear(64*8*8, 512)) model.add(RELU()) model.add(Dropout(0.5)) model.add(Linear(512, 10)) model.add(Softmax()) # build learning method learning_method = SGD(learning_rate=0.01, momentum=0.9, lr_decay_factor=0.9, decay_batch=5000) # put everything into the train object train_object = TrainObject(model = model, log = None, dataset = data, train_cost = entropy, valid_cost = error, learning_method = learning_method, stop_criteria = {'max_epoch' : 10, 'epoch_look_back' : 5, 'percent_decrease' : 0.01} ) # finally run the code train_object.setup() train_object.run() # test the model on test set ypred = model.fprop(data.get_test().X) ypred = np.argmax(ypred, axis=1) y = np.argmax(data.get_test().y, axis=1) accuracy = np.equal(ypred, y).astype('f4').sum() / len(y) print 'test accuracy:', accuracy
def train(): # build dataset data = Mnist(batch_size=64, train_valid_test_ratio=[5,1,1]) # build model model = Sequential() model.add(Linear(prev_dim=28*28, this_dim=200)) model.add(RELU()) model.add(Linear(prev_dim=200, this_dim=100)) model.add(RELU()) model.add(Dropout(0.5)) model.add(Linear(prev_dim=100, this_dim=10)) model.add(Softmax()) # build learning method learning_method = AdaGrad(learning_rate=0.1, momentum=0.9, lr_decay_factor=0.9, decay_batch=10000) # put everything into the train object train_object = TrainObject(model = model, log = None, dataset = data, train_cost = mse, valid_cost = error, learning_method = learning_method, stop_criteria = {'max_epoch' : 10, 'epoch_look_back' : 5, 'percent_decrease' : 0.01} ) # finally run the code train_object.setup() train_object.run() ypred = model.fprop(data.get_test().X) ypred = np.argmax(ypred, axis=1) y = np.argmax(data.get_test().y, axis=1) print 'test accuracy:', accuracy_score(y, ypred)
def train(): # create a fake dataset X1 = np.random.rand(100000, 1000) y1 = np.random.rand(100000, 10) with open('X1.npy', 'wb') as xin, open('y1.npy', 'wb') as yin: np.save(xin, X1) np.save(yin, y1) X2 = np.random.rand(100000, 1000) y2 = np.random.rand(100000, 10) with open('X2.npy', 'wb') as xin, open('y2.npy', 'wb') as yin: np.save(xin, X2) np.save(yin, y2) X3 = np.random.rand(100000, 1000) y3 = np.random.rand(100000, 10) with open('X3.npy', 'wb') as xin, open('y3.npy', 'wb') as yin: np.save(xin, X3) np.save(yin, y3) # now we can create the data by putting the paths # ('X1.npy', 'y1.npy') and ('X2.npy', 'y2.npy') into DataBlocks data = DataBlocks(data_paths=[('X1.npy', 'y1.npy'), ('X2.npy', 'y2.npy'), ('X3.npy', 'y3.npy')], batch_size=100, train_valid_test_ratio=[3,2,0], allow_preload=False) model = Sequential(input_var=T.matrix(), output_var=T.matrix()) model.add(Linear(prev_dim=1000, this_dim=200)) model.add(RELU()) model.add(Linear(prev_dim=200, this_dim=100)) model.add(RELU()) model.add(Dropout(0.5)) model.add(Linear(prev_dim=100, this_dim=10)) model.add(Softmax()) # build learning method learning_method = SGD(learning_rate=0.01, momentum=0.9, lr_decay_factor=0.9, decay_batch=5000) # put everything into the train object train_object = TrainObject(model = model, log = None, dataset = data, train_cost = entropy, valid_cost = error, learning_method = learning_method, stop_criteria = {'max_epoch' : 10, 'epoch_look_back' : 5, 'percent_decrease' : 0.01} ) # finally run the code train_object.setup() train_object.run() for X_path, y_path in [('X1.npy', 'y1.npy'), ('X2.npy', 'y2.npy')]: with open(X_path) as Xin, open(y_path) as yin: # test the model on test set ypred = model.fprop(np.load(Xin)) ypred = np.argmax(ypred, axis=1) y = np.argmax(np.load(yin), axis=1) accuracy = np.equal(ypred, y).astype('f4').sum() / len(y) print('combined accuracy for blk %s:'%X_path, accuracy)
def train(): batch_size = 256 short_memory = 0.9 learning_rate = 0.005 data = Cifar10(batch_size=batch_size, train_valid_test_ratio=[4, 1, 1]) _, c, h, w = data.train.X.shape model = Sequential(input_var=T.tensor4(), output_var=T.matrix()) model.add( Convolution2D(input_channels=c, filters=8, kernel_size=(3, 3), stride=(1, 1), border_mode='full')) h, w = full(h, w, kernel=3, stride=1) model.add( BatchNormalization(dim=8, layer_type='conv', short_memory=short_memory)) model.add(RELU()) model.add( Convolution2D(input_channels=8, filters=16, kernel_size=(3, 3), stride=(1, 1), border_mode='valid')) h, w = valid(h, w, kernel=3, stride=1) model.add( BatchNormalization(dim=16, layer_type='conv', short_memory=short_memory)) model.add(RELU()) model.add(Pooling2D(poolsize=(4, 4), stride=(4, 4), mode='max')) h, w = valid(h, w, kernel=4, stride=4) model.add(Flatten()) model.add(Linear(16 * h * w, 512)) model.add( BatchNormalization(dim=512, layer_type='fc', short_memory=short_memory)) model.add(RELU()) model.add(Linear(512, 10)) model.add(Softmax()) # learning_method = RMSprop(learning_rate=learning_rate) learning_method = Adam(learning_rate=learning_rate) # learning_method = SGD(learning_rate=0.001) # Build Logger log = Log( experiment_name='cifar10_cnn_tutorial', description='This is a tutorial', save_outputs=True, # log all the outputs from the screen save_model=True, # save the best model save_epoch_error=True, # log error at every epoch save_to_database={ 'name': 'hyperparam.sqlite3', 'records': { 'Batch_Size': batch_size, 'Learning_Rate': learning_method.learning_rate } }) # end log # put everything into the train object train_object = TrainObject(model=model, log=log, dataset=data, train_cost=entropy, valid_cost=error, learning_method=learning_method, stop_criteria={ 'max_epoch': 100, 'epoch_look_back': 10, 'percent_decrease': 0.01 }) # finally run the code train_object.setup() train_object.run() # test the model on test set ypred = model.fprop(data.get_test().X) ypred = np.argmax(ypred, axis=1) y = np.argmax(data.get_test().y, axis=1) accuracy = np.equal(ypred, y).astype('f4').sum() / len(y) print 'test accuracy:', accuracy
def train(): # create a fake dataset X1 = np.random.rand(100000, 1000) y1 = np.random.rand(100000, 10) with open('X1.npy', 'wb') as xin, open('y1.npy', 'wb') as yin: np.save(xin, X1) np.save(yin, y1) X2 = np.random.rand(100000, 1000) y2 = np.random.rand(100000, 10) with open('X2.npy', 'wb') as xin, open('y2.npy', 'wb') as yin: np.save(xin, X2) np.save(yin, y2) X3 = np.random.rand(100000, 1000) y3 = np.random.rand(100000, 10) with open('X3.npy', 'wb') as xin, open('y3.npy', 'wb') as yin: np.save(xin, X3) np.save(yin, y3) # now we can create the data by putting the paths # ('X1.npy', 'y1.npy') and ('X2.npy', 'y2.npy') into DataBlocks data = DataBlocks(data_paths=[('X1.npy', 'y1.npy'), ('X2.npy', 'y2.npy'), ('X3.npy', 'y3.npy')], batch_size=100, train_valid_test_ratio=[3,2,0]) model = Sequential(input_var=T.matrix(), output_var=T.matrix()) model.add(Linear(prev_dim=1000, this_dim=200)) model.add(RELU()) model.add(Linear(prev_dim=200, this_dim=100)) model.add(RELU()) model.add(Dropout(0.5)) model.add(Linear(prev_dim=100, this_dim=10)) model.add(Softmax()) # build learning method learning_method = SGD(learning_rate=0.01, momentum=0.9, lr_decay_factor=0.9, decay_batch=5000) # put everything into the train object train_object = TrainObject(model = model, log = None, dataset = data, train_cost = entropy, valid_cost = error, learning_method = learning_method, stop_criteria = {'max_epoch' : 10, 'epoch_look_back' : 5, 'percent_decrease' : 0.01} ) # finally run the code train_object.setup() train_object.run() for X_path, y_path in [('X1.npy', 'y1.npy'), ('X2.npy', 'y2.npy')]: with open(X_path) as Xin, open(y_path) as yin: # test the model on test set ypred = model.fprop(np.load(Xin)) ypred = np.argmax(ypred, axis=1) y = np.argmax(np.load(yin), axis=1) accuracy = np.equal(ypred, y).astype('f4').sum() / len(y) print 'combined accuracy for blk %s:'%X_path, accuracy
def train(): data = Cifar10(batch_size=32, train_valid_test_ratio=[4, 1, 1]) model = Sequential(input_var=T.tensor4(), output_var=T.matrix()) model.add( Convolution2D(input_channels=3, filters=8, kernel_size=(3, 3), stride=(1, 1), border_mode='full')) model.add(RELU()) model.add( Convolution2D(input_channels=8, filters=16, kernel_size=(3, 3), stride=(1, 1))) model.add(RELU()) model.add(Pooling2D(poolsize=(4, 4), stride=(4, 4), mode='max')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Linear(16 * 8 * 8, 512)) model.add(RELU()) model.add(Dropout(0.5)) model.add(Linear(512, 10)) model.add(Softmax()) # build learning method learning_method = SGD(learning_rate=0.01, momentum=0.9, lr_decay_factor=0.9, decay_batch=5000) # Build Logger log = Log( experiment_name='cifar10_cnn', description='This is a tutorial', save_outputs=True, # log all the outputs from the screen save_model=True, # save the best model save_epoch_error=True, # log error at every epoch save_to_database={ 'name': 'hyperparam.sqlite3', 'records': { 'Batch_Size': data.batch_size, 'Learning_Rate': learning_method.learning_rate, 'Momentum': learning_method.momentum } }) # end log # put everything into the train object train_object = TrainObject(model=model, log=log, dataset=data, train_cost=entropy, valid_cost=error, learning_method=learning_method, stop_criteria={ 'max_epoch': 30, 'epoch_look_back': 5, 'percent_decrease': 0.01 }) # finally run the code train_object.setup() train_object.run() # test the model on test set ypred = model.fprop(data.get_test().X) ypred = np.argmax(ypred, axis=1) y = np.argmax(data.get_test().y, axis=1) accuracy = np.equal(ypred, y).astype('f4').sum() / len(y) print 'test accuracy:', accuracy
def train(): X1 = np.random.rand(1000, 3, 32, 32) y1 = np.random.rand(1000, 10) with open('X1.npy', 'wb') as xin, open('y1.npy', 'wb') as yin: np.save(xin, X1) np.save(yin, y1) X2 = np.random.rand(1000, 3, 32, 32) y2 = np.random.rand(1000, 10) with open('X2.npy', 'wb') as xin, open('y2.npy', 'wb') as yin: np.save(xin, X1) np.save(yin, y1) # now we can create the data by putting the paths # ('X1.npy', 'y1.npy') and ('X2.npy', 'y2.npy') into DataBlocks data = DataBlocks(data_paths=[('X1.npy', 'y1.npy'), ('X2.npy', 'y2.npy')], batch_size=100, train_valid_test_ratio=[3,1,1]) model = Sequential(input_var=T.tensor4(), output_var=T.matrix()) model.add(Convolution2D(input_channels=3, filters=32, kernel_size=(3,3), stride=(1,1), border_mode='full')) model.add(RELU()) model.add(Convolution2D(input_channels=32, filters=32, kernel_size=(3,3), stride=(1,1))) model.add(RELU()) model.add(Pooling2D(poolsize=(2, 2), mode='max')) model.add(Dropout(0.25)) model.add(Convolution2D(input_channels=32, filters=64, kernel_size=(3,3), stride=(1,1), border_mode='full')) model.add(RELU()) model.add(Convolution2D(input_channels=64, filters=64, kernel_size=(3,3), stride=(1,1))) model.add(RELU()) model.add(Pooling2D(poolsize=(2, 2), mode='max')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Linear(64*8*8, 512)) model.add(RELU()) model.add(Dropout(0.5)) model.add(Linear(512, 10)) model.add(Softmax()) # build learning method learning_method = SGD(learning_rate=0.01, momentum=0.9, lr_decay_factor=0.9, decay_batch=5000) # put everything into the train object train_object = TrainObject(model = model, log = None, dataset = data, train_cost = entropy, valid_cost = error, learning_method = learning_method, stop_criteria = {'max_epoch' : 10, 'epoch_look_back' : 5, 'percent_decrease' : 0.01} ) # finally run the code train_object.setup() train_object.run() for X_path, y_path in [('X1.npy', 'y1.npy'), ('X2.npy', 'y2.npy')]: with open(X_path) as Xin, open(y_path) as yin: # test the model on test set ypred = model.fprop(np.load(Xin)) ypred = np.argmax(ypred, axis=1) y = np.argmax(np.load(yin), axis=1) accuracy = np.equal(ypred, y).astype('f4').sum() / len(y) print 'combined accuracy for blk %s:'%X_path, accuracy