def __init__(self, input_shape, output_dim): ''' FIELDS: self.params: any params from the layer that needs to be updated by backpropagation can be put inside self.params PARAMS: input_shape: tuple shape of the input image with format (channel, height, width) output_dim: int the output dimension of the model ''' assert len(input_shape) == 3, 'input_shape must be a tuple or list of dim (channel, height, width)' c, h, w = input_shape valid = lambda x, y, kernel, stride : ((x-kernel)/stride + 1, (y-kernel)/stride + 1) full = lambda x, y, kernel, stride : ((x+kernel)/stride - 1, (y+kernel)/stride - 1) self.layers = [] self.layers.append(Convolution2D(input_channels=3, filters=96, kernel_size=(11,11), stride=(4,4), border_mode='valid')) nh, nw = valid(h, w, 11, 4) self.layers.append(RELU()) self.layers.append(LRN()) self.layers.append(Pooling2D(poolsize=(3,3), stride=(2,2), mode='max')) nh, nw = valid(nh, nw, 3, 2) self.layers.append(Convolution2D(input_channels=96, filters=256, kernel_size=(5,5), stride=(1,1), border_mode='full')) nh, nw = full(nh, nw, 5, 1) self.layers.append(RELU()) self.layers.append(LRN()) self.layers.append(Pooling2D(poolsize=(3,3), stride=(2,2), mode='max')) nh, nw = valid(nh, nw, 3, 2) self.layers.append(Convolution2D(input_channels=256, filters=384, kernel_size=(3,3), stride=(1,1), border_mode='full')) nh, nw = full(nh, nw, 3, 1) self.layers.append(RELU()) self.layers.append(Convolution2D(input_channels=384, filters=384, kernel_size=(3,3), stride=(1,1), border_mode='full')) nh, nw = full(nh, nw, 3, 1) self.layers.append(RELU()) self.layers.append(Convolution2D(input_channels=384, filters=256, kernel_size=(3,3), stride=(1,1), border_mode='full')) nh, nw = full(nh, nw, 3, 1) self.layers.append(RELU()) self.layers.append(Pooling2D(poolsize=(3,3), stride=(2,2), mode='max')) nh, nw = valid(nh, nw, 3, 2) self.layers.append(Flatten()) self.layers.append(Linear(256*nh*nw,4096)) self.layers.append(RELU()) self.layers.append(Dropout(0.5)) self.layers.append(Linear(4096,4096)) self.layers.append(RELU()) self.layers.append(Dropout(0.5)) self.layers.append(Linear(4096,output_dim)) self.layers.append(Softmax()) self.params = [] for layer in self.layers: self.params += layer.params
def _right_model(img_input_dim, merged_dim): c, h, w = img_input_dim valid = lambda x, y, kernel, stride : ((x-kernel)/stride + 1, (y-kernel)/stride + 1) full = lambda x, y, kernel, stride : ((x+kernel)/stride - 1, (y+kernel)/stride - 1) right_model = Sequential(input_var=T.tensor4(), output_var=T.matrix()) right_model.add(Convolution2D(input_channels=3, filters=8, kernel_size=(3,3), stride=(1,1), border_mode='full')) h, w = full(h, w, 3, 1) right_model.add(RELU()) right_model.add(Convolution2D(input_channels=8, filters=8, kernel_size=(3,3), stride=(1,1), border_mode='valid')) h, w = valid(h, w, 3, 1) right_model.add(RELU()) right_model.add(Pooling2D(poolsize=(2, 2), stride=(1,1), mode='max')) h, w = valid(h, w, 2, 1) right_model.add(Dropout(0.25)) right_model.add(Convolution2D(input_channels=8, filters=8, kernel_size=(3,3), stride=(1,1), border_mode='full')) h, w = full(h, w, 3, 1) right_model.add(RELU()) right_model.add(Convolution2D(input_channels=8, filters=8, kernel_size=(3,3), stride=(1,1), border_mode='valid')) h, w = valid(h, w, 3, 1) right_model.add(RELU()) right_model.add(Pooling2D(poolsize=(2, 2), stride=(1,1), mode='max')) h, w = valid(h, w, 2, 1) right_model.add(Dropout(0.25)) right_model.add(Flatten()) right_model.add(Linear(8*h*w, 512)) right_model.add(Linear(512, 512)) right_model.add(RELU()) right_model.add(Dropout(0.5)) right_model.add(Linear(512, merged_dim)) return right_model
def train(): data = Cifar10(batch_size=32, train_valid_test_ratio=[4,1,1]) model = Sequential(input_var=T.tensor4(), output_var=T.matrix()) model.add(Convolution2D(input_channels=3, filters=32, kernel_size=(3,3), stride=(1,1), border_mode='full')) model.add(RELU()) model.add(Convolution2D(input_channels=32, filters=32, kernel_size=(3,3), stride=(1,1))) model.add(RELU()) model.add(Pooling2D(poolsize=(2, 2), mode='max')) model.add(Dropout(0.25)) model.add(Convolution2D(input_channels=32, filters=64, kernel_size=(3,3), stride=(1,1), border_mode='full')) model.add(RELU()) model.add(Convolution2D(input_channels=64, filters=64, kernel_size=(3,3), stride=(1,1))) model.add(RELU()) model.add(Pooling2D(poolsize=(2, 2), mode='max')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Linear(64*8*8, 512)) model.add(RELU()) model.add(Dropout(0.5)) model.add(Linear(512, 10)) model.add(Softmax()) # build learning method learning_method = SGD(learning_rate=0.01, momentum=0.9, lr_decay_factor=0.9, decay_batch=5000) # put everything into the train object train_object = TrainObject(model = model, log = None, dataset = data, train_cost = entropy, valid_cost = error, learning_method = learning_method, stop_criteria = {'max_epoch' : 10, 'epoch_look_back' : 5, 'percent_decrease' : 0.01} ) # finally run the code train_object.setup() train_object.run() # test the model on test set ypred = model.fprop(data.get_test().X) ypred = np.argmax(ypred, axis=1) y = np.argmax(data.get_test().y, axis=1) accuracy = np.equal(ypred, y).astype('f4').sum() / len(y) print 'test accuracy:', accuracy
def train(): # build dataset batch_size = 64 data = Mnist(batch_size=batch_size, train_valid_test_ratio=[5, 1, 1]) # build model model = Sequential(input_var=T.matrix(), output_var=T.matrix()) model.add(Linear(prev_dim=28 * 28, this_dim=200)) model.add(RELU()) model.add(Linear(prev_dim=200, this_dim=100)) model.add(RELU()) model.add(Dropout(0.5)) model.add(Linear(prev_dim=100, this_dim=10)) model.add(Softmax()) # build learning method decay_batch = int(data.train.X.shape[0] * 2 / batch_size) learning_method = SGD(learning_rate=0.1, momentum=0.9, lr_decay_factor=0.9, decay_batch=decay_batch) # Build Logger log = Log( experiment_name='MLP', description='This is a tutorial', save_outputs=True, # log all the outputs from the screen save_model=True, # save the best model save_epoch_error=True, # log error at every epoch save_to_database={ 'name': 'Example.sqlite3', 'records': { 'Batch_Size': batch_size, 'Learning_Rate': learning_method.learning_rate, 'Momentum': learning_method.momentum } }) # end log # put everything into the train object train_object = TrainObject(model=model, log=log, dataset=data, train_cost=mse, valid_cost=error, learning_method=learning_method, stop_criteria={ 'max_epoch': 100, 'epoch_look_back': 5, 'percent_decrease': 0.01 }) # finally run the code train_object.setup() train_object.run() ypred = model.fprop(data.get_test().X) ypred = np.argmax(ypred, axis=1) y = np.argmax(data.get_test().y, axis=1) accuracy = np.equal(ypred, y).astype('f4').sum() / len(y) print('test accuracy:', accuracy)
def train(): # create a fake dataset X1 = np.random.rand(100000, 1000) y1 = np.random.rand(100000, 10) with open('X1.npy', 'wb') as xin, open('y1.npy', 'wb') as yin: np.save(xin, X1) np.save(yin, y1) X2 = np.random.rand(100000, 1000) y2 = np.random.rand(100000, 10) with open('X2.npy', 'wb') as xin, open('y2.npy', 'wb') as yin: np.save(xin, X2) np.save(yin, y2) X3 = np.random.rand(100000, 1000) y3 = np.random.rand(100000, 10) with open('X3.npy', 'wb') as xin, open('y3.npy', 'wb') as yin: np.save(xin, X3) np.save(yin, y3) # now we can create the data by putting the paths # ('X1.npy', 'y1.npy') and ('X2.npy', 'y2.npy') into DataBlocks data = DataBlocks(data_paths=[('X1.npy', 'y1.npy'), ('X2.npy', 'y2.npy'), ('X3.npy', 'y3.npy')], batch_size=100, train_valid_test_ratio=[3,2,0], allow_preload=False) model = Sequential(input_var=T.matrix(), output_var=T.matrix()) model.add(Linear(prev_dim=1000, this_dim=200)) model.add(RELU()) model.add(Linear(prev_dim=200, this_dim=100)) model.add(RELU()) model.add(Dropout(0.5)) model.add(Linear(prev_dim=100, this_dim=10)) model.add(Softmax()) # build learning method learning_method = SGD(learning_rate=0.01, momentum=0.9, lr_decay_factor=0.9, decay_batch=5000) # put everything into the train object train_object = TrainObject(model = model, log = None, dataset = data, train_cost = entropy, valid_cost = error, learning_method = learning_method, stop_criteria = {'max_epoch' : 10, 'epoch_look_back' : 5, 'percent_decrease' : 0.01} ) # finally run the code train_object.setup() train_object.run() for X_path, y_path in [('X1.npy', 'y1.npy'), ('X2.npy', 'y2.npy')]: with open(X_path) as Xin, open(y_path) as yin: # test the model on test set ypred = model.fprop(np.load(Xin)) ypred = np.argmax(ypred, axis=1) y = np.argmax(np.load(yin), axis=1) accuracy = np.equal(ypred, y).astype('f4').sum() / len(y) print('combined accuracy for blk %s:'%X_path, accuracy)
def train(): max_features=20000 maxseqlen = 100 # cut texts after this number of words (among top max_features most common words) batch_size = 16 word_vec_len = 256 iter_class = 'SequentialRecurrentIterator' seq_len = 10 data = IMDB(pad_zero=True, maxlen=100, nb_words=max_features, batch_size=batch_size, train_valid_test_ratio=[8,2,0], iter_class=iter_class, seq_len=seq_len) print('Build model...') model = Sequential(input_var=T.matrix(), output_var=T.matrix()) model.add(Embedding(max_features, word_vec_len)) # MLP layers model.add(Transform((word_vec_len,))) # transform from 3d dimensional input to 2d input for mlp model.add(Linear(word_vec_len, 100)) model.add(RELU()) model.add(BatchNormalization(dim=100, layer_type='fc')) model.add(Linear(100,100)) model.add(RELU()) model.add(BatchNormalization(dim=100, layer_type='fc')) model.add(Linear(100, word_vec_len)) model.add(RELU()) model.add(Transform((maxseqlen, word_vec_len))) # transform back from 2d to 3d for recurrent input # Stacked up BiLSTM layers model.add(BiLSTM(word_vec_len, 50, output_mode='concat', return_sequences=True)) model.add(BiLSTM(100, 24, output_mode='sum', return_sequences=True)) model.add(LSTM(24, 24, return_sequences=True)) # MLP layers model.add(Reshape((24 * maxseqlen,))) model.add(BatchNormalization(dim=24 * maxseqlen, layer_type='fc')) model.add(Linear(24 * maxseqlen, 50)) model.add(RELU()) model.add(Dropout(0.2)) model.add(Linear(50, 1)) model.add(Sigmoid()) # build learning method decay_batch = int(data.train.X.shape[0] * 5 / batch_size) learning_method = SGD(learning_rate=0.1, momentum=0.9, lr_decay_factor=1.0, decay_batch=decay_batch) # Build Logger log = Log(experiment_name = 'MLP', description = 'This is a tutorial', save_outputs = True, # log all the outputs from the screen save_model = True, # save the best model save_epoch_error = True, # log error at every epoch save_to_database = {'name': 'Example.sqlite3', 'records': {'Batch_Size': batch_size, 'Learning_Rate': learning_method.learning_rate, 'Momentum': learning_method.momentum}} ) # end log # put everything into the train object train_object = TrainObject(model = model, log = log, dataset = data, train_cost = mse, valid_cost = error, learning_method = learning_method, stop_criteria = {'max_epoch' : 100, 'epoch_look_back' : 5, 'percent_decrease' : 0.01} ) # finally run the code train_object.setup() train_object.run()
def train(): data = Cifar10(batch_size=32, train_valid_test_ratio=[4, 1, 1]) model = Sequential(input_var=T.tensor4(), output_var=T.matrix()) model.add( Convolution2D(input_channels=3, filters=8, kernel_size=(3, 3), stride=(1, 1), border_mode='full')) model.add(RELU()) model.add( Convolution2D(input_channels=8, filters=16, kernel_size=(3, 3), stride=(1, 1))) model.add(RELU()) model.add(Pooling2D(poolsize=(4, 4), stride=(4, 4), mode='max')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Linear(16 * 8 * 8, 512)) model.add(RELU()) model.add(Dropout(0.5)) model.add(Linear(512, 10)) model.add(Softmax()) # build learning method learning_method = SGD(learning_rate=0.01, momentum=0.9, lr_decay_factor=0.9, decay_batch=5000) # Build Logger log = Log( experiment_name='cifar10_cnn', description='This is a tutorial', save_outputs=True, # log all the outputs from the screen save_model=True, # save the best model save_epoch_error=True, # log error at every epoch save_to_database={ 'name': 'hyperparam.sqlite3', 'records': { 'Batch_Size': data.batch_size, 'Learning_Rate': learning_method.learning_rate, 'Momentum': learning_method.momentum } }) # end log # put everything into the train object train_object = TrainObject(model=model, log=log, dataset=data, train_cost=entropy, valid_cost=error, learning_method=learning_method, stop_criteria={ 'max_epoch': 30, 'epoch_look_back': 5, 'percent_decrease': 0.01 }) # finally run the code train_object.setup() train_object.run() # test the model on test set ypred = model.fprop(data.get_test().X) ypred = np.argmax(ypred, axis=1) y = np.argmax(data.get_test().y, axis=1) accuracy = np.equal(ypred, y).astype('f4').sum() / len(y) print 'test accuracy:', accuracy