def _right_model(img_input_dim, merged_dim):
    c, h, w = img_input_dim

    valid = lambda x, y, kernel, stride : ((x-kernel)/stride + 1, (y-kernel)/stride + 1)
    full = lambda x, y, kernel, stride : ((x+kernel)/stride - 1, (y+kernel)/stride - 1)

    right_model = Sequential(input_var=T.tensor4(), output_var=T.matrix())
    right_model.add(Convolution2D(input_channels=3, filters=8, kernel_size=(3,3), stride=(1,1), border_mode='full'))
    h, w = full(h, w, 3, 1)
    right_model.add(RELU())
    right_model.add(Convolution2D(input_channels=8, filters=8, kernel_size=(3,3), stride=(1,1), border_mode='valid'))
    h, w = valid(h, w, 3, 1)
    right_model.add(RELU())
    right_model.add(Pooling2D(poolsize=(2, 2), stride=(1,1), mode='max'))
    h, w = valid(h, w, 2, 1)
    right_model.add(Dropout(0.25))

    right_model.add(Convolution2D(input_channels=8, filters=8, kernel_size=(3,3), stride=(1,1), border_mode='full'))
    h, w = full(h, w, 3, 1)
    right_model.add(RELU())
    right_model.add(Convolution2D(input_channels=8, filters=8, kernel_size=(3,3), stride=(1,1), border_mode='valid'))
    h, w = valid(h, w, 3, 1)
    right_model.add(RELU())
    right_model.add(Pooling2D(poolsize=(2, 2), stride=(1,1), mode='max'))
    h, w = valid(h, w, 2, 1)
    right_model.add(Dropout(0.25))

    right_model.add(Flatten())
    right_model.add(Linear(8*h*w, 512))
    right_model.add(Linear(512, 512))
    right_model.add(RELU())
    right_model.add(Dropout(0.5))

    right_model.add(Linear(512, merged_dim))
    return right_model
示例#2
0
 def __init__(self, input_dim, output_dim):
     self.layers = []
     self.layers.append(RELU())
     self.layers.append(Linear(input_dim, 200))
     self.layers.append(RELU())
     self.layers.append(Linear(200, output_dim))
     self.layers.append(Softmax())
     self.params = []
     for layer in self.layers:
         self.params += layer.params
示例#3
0
    def __init__(self, input_shape, output_dim):
        '''
        FIELDS:
            self.params: any params from the layer that needs to be updated
                         by backpropagation can be put inside self.params
        PARAMS:
            input_shape: tuple
                         shape of the input image with format (channel, height, width)
            output_dim: int
                        the output dimension of the model
        '''
        assert len(input_shape) == 3, 'input_shape must be a tuple or list of dim (channel, height, width)'
        c, h, w = input_shape

        valid = lambda x, y, kernel, stride : ((x-kernel)/stride + 1, (y-kernel)/stride + 1)
        full = lambda x, y, kernel, stride : ((x+kernel)/stride - 1, (y+kernel)/stride - 1)

        self.layers = []
        self.layers.append(Convolution2D(input_channels=3, filters=96, kernel_size=(11,11),
                                         stride=(4,4), border_mode='valid'))
        nh, nw = valid(h, w, 11, 4)
        self.layers.append(RELU())
        self.layers.append(LRN())
        self.layers.append(Pooling2D(poolsize=(3,3), stride=(2,2), mode='max'))
        nh, nw = valid(nh, nw, 3, 2)
        self.layers.append(Convolution2D(input_channels=96, filters=256, kernel_size=(5,5),
                                         stride=(1,1), border_mode='full'))
        nh, nw = full(nh, nw, 5, 1)
        self.layers.append(RELU())
        self.layers.append(LRN())
        self.layers.append(Pooling2D(poolsize=(3,3), stride=(2,2), mode='max'))
        nh, nw = valid(nh, nw, 3, 2)
        self.layers.append(Convolution2D(input_channels=256, filters=384, kernel_size=(3,3),
                                         stride=(1,1), border_mode='full'))
        nh, nw = full(nh, nw, 3, 1)
        self.layers.append(RELU())
        self.layers.append(Convolution2D(input_channels=384, filters=384, kernel_size=(3,3),
                                         stride=(1,1), border_mode='full'))
        nh, nw = full(nh, nw, 3, 1)
        self.layers.append(RELU())
        self.layers.append(Convolution2D(input_channels=384, filters=256, kernel_size=(3,3),
                                         stride=(1,1), border_mode='full'))
        nh, nw = full(nh, nw, 3, 1)
        self.layers.append(RELU())
        self.layers.append(Pooling2D(poolsize=(3,3), stride=(2,2), mode='max'))
        nh, nw = valid(nh, nw, 3, 2)

        self.layers.append(Flatten())
        self.layers.append(Linear(256*nh*nw,4096))
        self.layers.append(RELU())
        self.layers.append(Dropout(0.5))
        self.layers.append(Linear(4096,4096))
        self.layers.append(RELU())
        self.layers.append(Dropout(0.5))
        self.layers.append(Linear(4096,output_dim))
        self.layers.append(Softmax())

        self.params = []
        for layer in self.layers:
            self.params += layer.params
示例#4
0
def train():
    max_features=20000
    maxseqlen = 100 # cut texts after this number of words (among top max_features most common words)
    batch_size = 16
    word_vec_len = 256
    iter_class = 'SequentialRecurrentIterator'
    seq_len = 10

    data = IMDB(pad_zero=True, maxlen=100, nb_words=max_features, batch_size=batch_size,
                train_valid_test_ratio=[8,2,0], iter_class=iter_class, seq_len=seq_len)

    print('Build model...')
    model = Sequential(input_var=T.matrix(), output_var=T.matrix())
    model.add(Embedding(max_features, word_vec_len))

    # MLP layers
    model.add(Transform((word_vec_len,))) # transform from 3d dimensional input to 2d input for mlp
    model.add(Linear(word_vec_len, 100))
    model.add(RELU())
    model.add(BatchNormalization(dim=100, layer_type='fc'))
    model.add(Linear(100,100))
    model.add(RELU())
    model.add(BatchNormalization(dim=100, layer_type='fc'))
    model.add(Linear(100, word_vec_len))
    model.add(RELU())
    model.add(Transform((maxseqlen, word_vec_len))) # transform back from 2d to 3d for recurrent input

    # Stacked up BiLSTM layers
    model.add(BiLSTM(word_vec_len, 50, output_mode='concat', return_sequences=True))
    model.add(BiLSTM(100, 24, output_mode='sum', return_sequences=True))
    model.add(LSTM(24, 24, return_sequences=True))

    # MLP layers
    model.add(Reshape((24 * maxseqlen,)))
    model.add(BatchNormalization(dim=24 * maxseqlen, layer_type='fc'))
    model.add(Linear(24 * maxseqlen, 50))
    model.add(RELU())
    model.add(Dropout(0.2))
    model.add(Linear(50, 1))
    model.add(Sigmoid())

    # build learning method
    decay_batch = int(data.train.X.shape[0] * 5 / batch_size)
    learning_method = SGD(learning_rate=0.1, momentum=0.9,
                          lr_decay_factor=1.0, decay_batch=decay_batch)

    # Build Logger
    log = Log(experiment_name = 'MLP',
              description = 'This is a tutorial',
              save_outputs = True, # log all the outputs from the screen
              save_model = True, # save the best model
              save_epoch_error = True, # log error at every epoch
              save_to_database = {'name': 'Example.sqlite3',
                                  'records': {'Batch_Size': batch_size,
                                              'Learning_Rate': learning_method.learning_rate,
                                              'Momentum': learning_method.momentum}}
             ) # end log

    # put everything into the train object
    train_object = TrainObject(model = model,
                               log = log,
                               dataset = data,
                               train_cost = mse,
                               valid_cost = error,
                               learning_method = learning_method,
                               stop_criteria = {'max_epoch' : 100,
                                                'epoch_look_back' : 5,
                                                'percent_decrease' : 0.01}
                               )
    # finally run the code
    train_object.setup()
    train_object.run()
def _left_model(text_input_dim, merged_dim):
    left_model = Sequential(input_var=T.matrix(), output_var=T.matrix())
    left_model.add(Linear(text_input_dim, 100))
    left_model.add(RELU())
    left_model.add(Linear(100, merged_dim))
    return left_model