Beispiel #1
0
def create_model_bn_5_dropout():
    #Max Accuracy: 0.78884
    #Max Validation Accuracy: 0.8482999801635742

    model = Sequential()
    # Convolutional layers
    model.add(
        Conv2D(filters=20,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=x_train.shape[1:],
               padding="same"))
    model.add(BatchNormalization())
    model.add(
        Conv2D(filters=20,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=x_train.shape[1:],
               padding="same"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D())
    model.add(SpatialDropout2D(0.25))

    model.add(
        Conv2D(filters=40,
               kernel_size=(3, 3),
               activation='relu',
               padding="same"))
    model.add(
        Conv2D(filters=40,
               kernel_size=(3, 3),
               activation='relu',
               padding="same"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D())
    model.add(SpatialDropout2D(0.2))

    model.add(
        Conv2D(filters=64,
               kernel_size=(3, 3),
               activation='relu',
               padding="same"))
    model.add(
        Conv2D(filters=64,
               kernel_size=(3, 3),
               activation='relu',
               padding="same"))
    model.add(BatchNormalization())
    model.add(MaxPooling2D())
    model.add(Dropout(0.15))

    # Fully Connected layers
    model.add(Flatten())
    model.add(Dense(units=384, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(units=192, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(units=num_classes, activation='softmax'))

    return model
Beispiel #2
0
def build_model():
    """
    NVIDIA model
    http://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf
    """
    model = Sequential()

    model.add(Lambda(lambda x: x / 127.5 - 1., input_shape=INPUT_SHAPE))
    model.add(Cropping2D(cropping=((60, 20), (0, 0))))
    model.add(Convolution2D(24, 5, 5, border_mode="same", subsample=(2,2), activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(36, 5, 5, border_mode="same", subsample=(2,2), activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(48, 5, 5, border_mode="valid", subsample=(2,2), activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="elu"))
    model.add(SpatialDropout2D(0.2))

    model.add(Flatten())
    model.add(Dropout(0.5))
    model.add(Dense(100, activation="elu"))
    model.add(Dense(50, activation="elu"))
    model.add(Dense(10, activation="elu"))
    model.add(Dropout(0.5))
    model.add(Dense(1))

    return model
def main():
    samples = []
    # with open(FLAGS.csv_path) as csvfile:
    #     reader = csv.reader(csvfile)
    #     for line in reader:
    #         samples.append(line)
    #     samples.pop(0)

    with open(FLAGS.csv_path_augmented) as csvfile:
        reader = csv.reader(csvfile)
        for line in reader:
            samples.append(line)

    from sklearn.model_selection import train_test_split
    train_samples, validation_samples = train_test_split(samples,
                                                         test_size=0.2)

    # compile and train the model using the generator function
    train_generator = generator(train_samples, batch_size=FLAGS.batch_size)
    validation_generator = generator(validation_samples,
                                     batch_size=FLAGS.batch_size)

    def resize(img):
        import tensorflow as tf
        img = tf.image.resize_images(img, (66, 200))
        return img

    model = Sequential([
        Cropping2D(cropping=((22, 22), (0, 0)), input_shape=(160, 320, 3)),
        Lambda(lambda x: (x / 255.0) - 0.5),
        Lambda(resize),
        Conv2D(24, (5, 5), padding='same', strides=(2, 2), activation='relu'),
        SpatialDropout2D(0.2),
        Conv2D(36, (5, 5), padding='same', strides=(2, 2), activation='elu'),
        SpatialDropout2D(0.2),
        Conv2D(48, (5, 5), padding='same', strides=(2, 2), activation='elu'),
        SpatialDropout2D(0.2),
        Conv2D(64, (3, 3), padding='valid', activation='elu'),
        SpatialDropout2D(0.2),
        Conv2D(64, (3, 3), padding='same', activation='elu'),
        SpatialDropout2D(0.2),
        Flatten(),
        Dropout(0.5),
        Dense(100, activation='elu'),
        Dense(50, activation='elu'),
        Dense(10, activation='elu'),
        Dropout(0.5),
        Dense(1)
    ])

    model.compile(loss='mse', optimizer=Adam(lr=FLAGS.lrate))
    model.fit_generator(generator=train_generator,\
        steps_per_epoch=len(train_samples)//FLAGS.batch_size,\
        epochs=FLAGS.num_epochs,\
        validation_data=validation_generator,\
        validation_steps=len(validation_samples)//FLAGS.batch_size\
        )

    model.summary()
    model.save('model.h5')
Beispiel #4
0
    def def_model_(self):
        model = Sequential()
        model.add(Conv2D(self.n_channels, 3, padding='same',input_shape=self.input_size,
                         kernel_regularizer=regularizers.l2(self.weight_decay)))
        model.add(Activation('relu'))
        model.add(BatchNormalization(momentum=0.9))
        model.add(MaxPooling2D(pool_size=3, strides=2))
        model.add(SpatialDropout2D(rate=self.drop))
        #  model.add(Dropout(drop))

        for _ in range(self.blocks - 1):
            self.n_channels *= 2
            model.add(Conv2D(self.n_channels, (3, 3), padding='same',
                             kernel_regularizer=regularizers.l2(self.weight_decay)))
            model.add(Activation('relu'))
            model.add(BatchNormalization(momentum=0.9))
            model.add(MaxPooling2D(pool_size=3, strides=2))
            model.add(SpatialDropout2D(rate=self.drop))
            #  model.add(Dropout(drop))

        model.add(Flatten())
        model.add(Dense(self.embedding_dim, kernel_regularizer=regularizers.l2(self.weight_decay)))
        model.add(Lambda(lambda x: K.l2_normalize(x, axis=-1)))
        model.summary()
        return model
def lenet_model(input_shape=(160, 320, 3), drop_out=0.5, drop_out_sp=0.2):
    """
    LeNet Architecture
    """
    print('\n\n ')
    print('>> Building the model (LeNet Architecture)...')

    # Pre-processing layer
    model = pre_processing_model(input_shape=input_shape)

    # Other layers
    model.add(
        Convolution2D(6, (5, 5),
                      strides=(1, 1),
                      padding='valid',
                      activation='relu'))
    model.add(SpatialDropout2D(drop_out_sp))
    model.add(MaxPooling2D(pool_size=(2, 2), padding='valid'))
    model.add(
        Convolution2D(6, (5, 5),
                      strides=(1, 1),
                      padding='valid',
                      activation='relu'))
    model.add(SpatialDropout2D(drop_out_sp))
    model.add(MaxPooling2D(pool_size=(2, 2), padding='valid'))
    model.add(Flatten())
    model.add(Dropout(drop_out))
    model.add(Dense(120))
    model.add(Dropout(drop_out))
    model.add(Dense(84))
    model.add(Dropout(drop_out))
    model.add(Dense(1))

    model.summary()
    return model
Beispiel #6
0
def model_generator(latent_dim,
                    nch=512,
                    dropout=0.5,
                    reg=lambda: l1l2(l1=1e-7, l2=1e-7)):
    model = Sequential(name="decoder")
    h = 5
    model.add(
        Dense(input_dim=latent_dim,
              output_dim=nch * 4 * 4,
              W_regularizer=reg()))
    model.add(Reshape(dim_ordering_shape((nch, 4, 4))))
    model.add(SpatialDropout2D(dropout))
    model.add(LeakyReLU(0.2))
    model.add(
        Convolution2D(nch / 2, h, h, border_mode='same', W_regularizer=reg()))
    model.add(SpatialDropout2D(dropout))
    model.add(LeakyReLU(0.2))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(
        Convolution2D(nch / 2, h, h, border_mode='same', W_regularizer=reg()))
    model.add(SpatialDropout2D(dropout))
    model.add(LeakyReLU(0.2))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(
        Convolution2D(nch / 4, h, h, border_mode='same', W_regularizer=reg()))
    model.add(SpatialDropout2D(dropout))
    model.add(LeakyReLU(0.2))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Convolution2D(3, h, h, border_mode='same', W_regularizer=reg()))
    model.add(Activation('sigmoid'))
    return model
    def make_model(activation='relu',
                   loss='categorical_crossentropy',
                   optimizer='sgd',
                   sp_dropout=0.1,
                   dropout=0.1):
        """
            This creates a new CNN model evey time is called.
        """

        conv_model_gaus_dr = Sequential()

        conv_model_gaus_dr.add(
            Conv2D(32, (3, 3), input_shape=(64, 64, 1), activation=activation))
        conv_model_gaus_dr.add(BatchNormalization())
        conv_model_gaus_dr.add(Conv2D(64, (3, 3), activation=activation))
        conv_model_gaus_dr.add(MaxPool2D((2, 2)))
        conv_model_gaus_dr.add(SpatialDropout2D(sp_dropout))
        conv_model_gaus_dr.add(Conv2D(128, (3, 3), activation=activation))
        conv_model_gaus_dr.add(MaxPool2D((2, 2)))
        conv_model_gaus_dr.add(SpatialDropout2D(sp_dropout))
        conv_model_gaus_dr.add(Conv2D(256, (3, 3), activation=activation))
        conv_model_gaus_dr.add(MaxPool2D((2, 2)))
        conv_model_gaus_dr.add(Flatten())
        conv_model_gaus_dr.add(Dropout(dropout))
        conv_model_gaus_dr.add(Dense(512, activation='relu'))
        conv_model_gaus_dr.add(Dense(4, activation='softmax'))

        conv_model_gaus_dr.compile(loss=loss,
                                   optimizer=optimizer,
                                   metrics=['accuracy'])

        return conv_model_gaus_dr
Beispiel #8
0
def model_encoder(latent_dim,
                  input_shape,
                  nch=512,
                  reg=lambda: l1l2(l1=1e-7, l2=1e-7),
                  dropout=0.5):
    k = 5
    x = Input(input_shape)
    h = Convolution2D(nch / 4, k, k, border_mode='same',
                      W_regularizer=reg())(x)
    h = SpatialDropout2D(dropout)(h)
    h = MaxPooling2D(pool_size=(2, 2))(h)
    h = LeakyReLU(0.2)(h)
    h = Convolution2D(nch / 2, k, k, border_mode='same',
                      W_regularizer=reg())(h)
    h = SpatialDropout2D(dropout)(h)
    h = MaxPooling2D(pool_size=(2, 2))(h)
    h = LeakyReLU(0.2)(h)
    h = Convolution2D(nch / 2, k, k, border_mode='same',
                      W_regularizer=reg())(h)
    h = SpatialDropout2D(dropout)(h)
    h = MaxPooling2D(pool_size=(2, 2))(h)
    h = LeakyReLU(0.2)(h)
    h = Convolution2D(nch, k, k, border_mode='same', W_regularizer=reg())(h)
    h = SpatialDropout2D(dropout)(h)
    h = LeakyReLU(0.2)(h)
    h = Flatten()(h)
    mu = Dense(latent_dim, name="encoder_mu", W_regularizer=reg())(h)
    log_sigma_sq = Dense(latent_dim,
                         name="encoder_log_sigma_sq",
                         W_regularizer=reg())(h)
    z = merge(
        [mu, log_sigma_sq],
        mode=lambda p: p[0] + K.random_normal(K.shape(p[0])) * K.exp(p[1] / 2),
        output_shape=lambda p: p[0])
    return Model(x, z, name="encoder")
Beispiel #9
0
def model_nvidia(img_shape):
    """Model based on the following paper by Nvidia
    http://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf
    """
    model = Sequential()
    model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=img_shape, output_shape=img_shape))
    # model.add(Cropping2D(cropping=((70, 25),(0, 0))))
    model.add(Convolution2D(24, 5, 5, border_mode="same", subsample=(2,2), activation="relu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(36, 5, 5, border_mode="same", subsample=(2,2), activation="relu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(48, 5, 5, border_mode="valid", subsample=(2,2), activation="relu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="relu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="relu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Flatten())
    model.add(Dropout(0.5))
    model.add(Dense(100, activation="relu"))
    model.add(Dense(50, activation="relu"))
    model.add(Dense(10, activation="relu"))
    model.add(Dropout(0.5))
    model.add(Dense(1))    
    return model
Beispiel #10
0
def make_model_tinycnn():
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=input_shape))
    model.add(Conv2D(32, kernel_size=(3, 3),
                    activation='relu'))
    model.add(SpatialDropout2D(0.3))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(32, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(32, (3, 3), activation='relu'))
    model.add(SpatialDropout2D(0.4))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(32, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    
    model.add(Flatten())
    
    model.add(Dropout(0.5))
    model.add(Dense(256, activation='relu'))
    
    
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))    
    model.add(Dense(num_classes, activation='softmax'))
    optim = Adam(lr=0.0001)
    
    model.compile(loss=keras.losses.categorical_crossentropy,
                optimizer=optim,
                metrics=['accuracy'])
    return model
def create_multi_res_model():
    '''Create a PPT text detector model using multi-resolution responses
    '''
    base_model = create_single_res_model()
    # create a multi-resolution model
    inputs = Input(shape=(3, None, None))
    a2 = AveragePooling2D((2, 2))(inputs)
    a3 = AveragePooling2D((3, 3))(inputs)
    a4 = AveragePooling2D((4, 4))(inputs)
    # decode at each resolution
    p1 = base_model(inputs)
    p2 = base_model(a2)
    p3 = base_model(a3)
    p4 = base_model(a4)
    # dropout
    d1 = SpatialDropout2D(0.25)(p1)
    d2 = SpatialDropout2D(0.25)(p2)
    d3 = SpatialDropout2D(0.25)(p3)
    d4 = SpatialDropout2D(0.25)(p4)
    # map to original resolution
    o2 = UpSampling2D((2, 2))(d2)
    o3 = UpSampling2D((3, 3))(d3)
    o4 = UpSampling2D((4, 4))(d4)
    # merge all response
    f = merge([d1, o2, o3, o4], mode='concat', concat_axis=1)
    f_pad = ZeroPadding2D((5, 5))(f)
    bottle = Convolution2D(8, 11, 11, activation='relu', name='bottle')(f_pad)
    output = Convolution2D(3, 1, 1, activation=softmax4)(bottle)
    model = Model(input=inputs, output=output)
    return model
def regression_model(input_shape=(image_height, image_width, 3), use_adadelta=True, learning_rate=0.01, W_l2=0.0001, ):
        """
        """
        model = Sequential()
        model.add(Conv2D(16, (5, 5), input_shape=input_shape, kernel_initializer="he_normal", activation='relu', padding='same'))
        model.add(SpatialDropout2D(0.1))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Conv2D(20, (5, 5), kernel_initializer="he_normal", activation='relu', padding='same'))
        model.add(SpatialDropout2D(0.1))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Conv2D(40, (3, 3), kernel_initializer="he_normal", activation='relu', padding='same'))
        model.add(SpatialDropout2D(0.1))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Conv2D(60, (3, 3), kernel_initializer="he_normal", activation='relu', padding='same'))
        model.add(SpatialDropout2D(0.1))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Conv2D(80, (2, 2), kernel_initializer="he_normal", activation='relu', padding='same'))
        model.add(SpatialDropout2D(0.1))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Conv2D(128, (2, 2), kernel_initializer="he_normal", activation='relu', padding='same'))
        model.add(Flatten())
        model.add(Dropout(0.5))
        model.add(Dense(output_dim=1, kernel_initializer='he_normal', W_regularizer=l2(W_l2)))

        optimizer = SGD(lr=learning_rate, momentum=0.9)

        model.compile(loss=rmse, optimizer=optimizer)

        return model
Beispiel #13
0
def keras_model(input_image):
    model = Sequential()

    model.add(Lambda(resize_images, input_image=input_image))
    model.add(Lambda(lambda x: x / 255. - 0.5))
    model.add(Convolution2D(24, 5, 5, border_mode="same", subsample=(2, 2), activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(36, 5, 5, border_mode="same", subsample=(2, 2), activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(48, 5, 5, border_mode="valid", subsample=(2, 2), activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="elu"))
    model.add(SpatialDropout2D(0.2))

    model.add(Flatten())
    model.add(Dropout(0.5))
    model.add(Dense(100, activation="elu"))
    model.add(Dense(50, activation="elu"))
    model.add(Dense(10, activation="elu"))
    model.add(Dropout(0.5))
    model.add(Dense(1))

    model.compile(optimizer=Adam(lr=0.001), loss='mse')
    return model
Beispiel #14
0
    def decoder_a(self):
        """ Decoder for side A """
        kwargs = dict(kernel_size=5, kernel_initializer=self.kernel_initializer)
        decoder_complexity = 320 if self.low_mem else self.config["complexity_decoder_a"]
        dense_dim = 384 if self.low_mem else 512
        decoder_shape = self.input_shape[0] // 16
        input_ = Input(shape=(decoder_shape, decoder_shape, dense_dim))

        var_x = input_

        var_x = UpscaleBlock(decoder_complexity, activation="leakyrelu", **kwargs)(var_x)
        var_x = SpatialDropout2D(0.25)(var_x)
        var_x = UpscaleBlock(decoder_complexity, activation="leakyrelu", **kwargs)(var_x)
        if self.low_mem:
            var_x = SpatialDropout2D(0.15)(var_x)
        else:
            var_x = SpatialDropout2D(0.25)(var_x)
        var_x = UpscaleBlock(decoder_complexity // 2, activation="leakyrelu", **kwargs)(var_x)
        var_x = UpscaleBlock(decoder_complexity // 4, activation="leakyrelu", **kwargs)(var_x)
        var_x = Conv2DOutput(3, 5, name="face_out_a")(var_x)
        outputs = [var_x]

        if self.config.get("learn_mask", False):
            var_y = input_
            var_y = UpscaleBlock(decoder_complexity, activation="leakyrelu")(var_y)
            var_y = UpscaleBlock(decoder_complexity, activation="leakyrelu")(var_y)
            var_y = UpscaleBlock(decoder_complexity // 2, activation="leakyrelu")(var_y)
            var_y = UpscaleBlock(decoder_complexity // 4, activation="leakyrelu")(var_y)
            var_y = Conv2DOutput(1, 5, name="mask_out_a")(var_y)
            outputs.append(var_y)
        return KerasModel(input_, outputs=outputs, name="decoder_a")
Beispiel #15
0
def nvidia_model():
   row, col, depth = 66,200,3 
   model = Sequential()
   model.add(Lambda(process,input_shape = (160,320,3) ))
   model.add(Lambda(lambda x: x/255.-0.5))
   model.add(Convolution2D(24, 5, 5, border_mode="same", subsample=(2,2), activation="relu"))
   model.add(SpatialDropout2D(0.2))
   model.add(Convolution2D(36, 5, 5, border_mode="same", subsample=(2,2), activation="relu"))
   model.add(SpatialDropout2D(0.2))
   model.add(Convolution2D(48, 5, 5, border_mode="valid", subsample=(2,2), activation="relu"))
   model.add(SpatialDropout2D(0.2))
   model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="relu"))
   model.add(SpatialDropout2D(0.2))
   model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="relu"))
   model.add(SpatialDropout2D(0.2))
   model.add(Flatten())
   model.add(Dropout(0.5))
   model.add(Dense(100, activation="relu"))
   model.add(Dense(50, activation="relu"))
   model.add(Dense(10, activation="relu"))
   model.add(Dropout(0.5))
   model.add(Dense(1))  
   model.summary()
   # Adam loss optimizer to reduce error 
   model.compile(loss = 'mse' , optimizer = 'adam')
   return model
def layer1(inputmodel):
    #model = Model()(inputmodel)
    model = Conv2D(50, kernel_size=(5, 5), dilation_rate=2,
                   activation='relu')(inputmodel)
    model = SpatialDropout2D(0.3)(model)
    model = Conv2D(50, kernel_size=(5, 5), dilation_rate=2,
                   activation='relu')(model)
    model = MaxPooling2D(pool_size=(2, 2))(model)
    # model = SpatialDropout2D(0.3)(model)
    model = Conv2D(50, kernel_size=(5, 5), dilation_rate=2,
                   activation='relu')(model)
    model = SpatialDropout2D(0.3)(model)
    model = Conv2D(50, kernel_size=(5, 5), dilation_rate=2,
                   activation='relu')(model)
    model = MaxPooling2D(pool_size=(2, 2))(model)
    # model = SpatialDropout2D(0.3)(model)
    model = Conv2D(50, kernel_size=(5, 5), dilation_rate=2,
                   activation='relu')(model)
    model = SpatialDropout2D(0.3)(model)
    model = Conv2D(50, kernel_size=(5, 5), dilation_rate=2,
                   activation='relu')(model)
    model = MaxPooling2D(pool_size=(2, 2))(model)

    model = SpatialDropout2D(0.3)(model)
    return model
Beispiel #17
0
    def def_model(self):
        base_map_num = 32
        inputs = Input(shape=self.input_shape)

        x = Conv2D(base_map_num, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(self.weight_decay),
                   input_shape=self.input_shape)(inputs)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)
        x = Conv2D(base_map_num, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(self.weight_decay))(x)
        x = Activation('relu')(x)
        x = Dropout(0.25)(x)
        x = BatchNormalization()(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)
        x = SpatialDropout2D(0.25)(x)

        x = Conv2D(2 * base_map_num, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(self.weight_decay))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)
        x = Conv2D(2 * base_map_num, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(self.weight_decay))(x)
        x = Activation('relu')(x)
        x = Dropout(0.35)(x)
        x = BatchNormalization()(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)
        x = SpatialDropout2D(0.35)(x)

        x = Conv2D(4 * base_map_num, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(self.weight_decay))(x)
        x = Activation('relu')(x)
        x = BatchNormalization()(x)
        x = Conv2D(4 * base_map_num, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(self.weight_decay))(x)
        x = Activation('relu')(x)
        x = Dropout(0.4)(x)
        x = BatchNormalization()(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)
        x = SpatialDropout2D(0.4)(x)

        x = Flatten()(x)
        embedding = Dense(self.embedding_dim,
                          kernel_regularizer=regularizers.l2(
                              self.weight_decay))(x)
        embedding = Lambda(lambda x_: K.l2_normalize(x_, axis=-1),
                           name="embedding_output")(embedding)
        x = BatchNormalization()(embedding)
        x = Activation('relu')(x)
        classify = Dense(10, activation='softmax', name="class_output")(x)
        final = Model(inputs=inputs, outputs=[embedding, classify])
        final.summary()
        return final
Beispiel #18
0
def model_nvidia(input_shape):
    def resize_images(img):
        import tensorflow as tf
        return tf.image.resize_images(img, (66, 200))

    model = Sequential()
    model.add(Lambda(resize_images, input_shape=input_shape))
    model.add(Lambda(lambda x: x / 255. - 0.5))
    model.add(
        Convolution2D(24,
                      5,
                      5,
                      border_mode=SAME_BORDER_MODE,
                      subsample=SUB_SAMPLE,
                      activation=ACTIVATION))
    model.add(SpatialDropout2D(0.2))
    model.add(
        Convolution2D(36,
                      5,
                      5,
                      border_mode=SAME_BORDER_MODE,
                      subsample=SUB_SAMPLE,
                      activation=ACTIVATION))
    model.add(SpatialDropout2D(0.2))
    model.add(
        Convolution2D(48,
                      5,
                      5,
                      border_mode=VALID_BORDER_MODE,
                      subsample=SUB_SAMPLE,
                      activation=ACTIVATION))
    model.add(SpatialDropout2D(0.2))
    model.add(
        Convolution2D(64,
                      3,
                      3,
                      border_mode=VALID_BORDER_MODE,
                      activation=ACTIVATION))
    model.add(SpatialDropout2D(0.2))
    model.add(
        Convolution2D(64,
                      3,
                      3,
                      border_mode=VALID_BORDER_MODE,
                      activation=ACTIVATION))
    model.add(SpatialDropout2D(0.2))

    model.add(Flatten())
    model.add(Dropout(0.5))
    model.add(Dense(100, activation=ACTIVATION))
    model.add(Dense(50, activation=ACTIVATION))
    model.add(Dense(10, activation=ACTIVATION))
    model.add(Dropout(0.5))
    model.add(Dense(1))

    model.compile(optimizer=Adam(lr=LOSS_RATE), loss=LOSS_FUNCTION)
    return model
Beispiel #19
0
def conv_block(m, dim, acti, bn, res, do=0):
    n = Conv2D(dim, 3, activation=acti, padding='same')(m)
    n = BatchNormalization()(n) if bn else n
    n = SpatialDropout2D(do/2.0)(n) if do else n
    n = Conv2D(dim, 3, activation=acti, padding='same')(n)
    n = BatchNormalization()(n) if bn else n
    n = Concatenate()([m, n]) if res else n
    n = SpatialDropout2D(do)(n) if do else n
    return n
Beispiel #20
0
def small_2d_conv_net_dynamic(size_y=32,
                              size_x=32,
                              n_channels=1,
                              n_frames=5,
                              filters=10):

    inp = Input(shape=(size_y, size_x, n_channels, n_frames))
    x1 = Reshape(target_shape=(size_y, size_x, n_channels * n_frames))(inp)
    x1 = DynImage(filters=filters)(x1)
    x1 = GaussianNoise(0.05)(x1)
    x1 = Conv2D(32, (3, 3), padding='same')(x1)
    x1 = SpatialDropout2D(0.3)(x1)
    x1 = LeakyReLU(alpha=0.2)(x1)
    x1 = MaxPooling2D(pool_size=(2, 2))(x1)

    x1 = GaussianNoise(0.02)(x1)
    x1 = Conv2D(64, (3, 3), padding='same')(x1)
    x1 = SpatialDropout2D(0.3)(x1)
    x1 = LeakyReLU(alpha=0.2)(x1)
    x1 = MaxPooling2D(pool_size=(2, 2))(x1)

    x1 = GaussianNoise(0.01)(x1)
    x1 = Conv2D(256, (3, 3), padding='same')(x1)
    x1 = SpatialDropout2D(0.3)(x1)
    x1 = LeakyReLU(alpha=0.2)(x1)
    x1 = MaxPooling2D(pool_size=(2, 2))(x1)

    # x1 = Flatten()(x1)
    # x1 = Dropout(0.3)(x1)
    # x1 = Dense(units=h_units)(x1)
    # x1 = LeakyReLU(alpha=0.2)(x1)
    #
    # x1 = Dense(units=(size_y/4)*(size_x/4)*128)(x1)
    # x1 = LeakyReLU(alpha=0.2)(x1)
    # x1 = Reshape((size_x/4,size_y/4,128))(x1)

    x1 = UpSampling2D(size=(2, 2))(x1)
    x1 = Conv2D(256, (3, 3), padding='same')(x1)
    x1 = LeakyReLU(alpha=0.2)(x1)

    x1 = UpSampling2D(size=(2, 2))(x1)
    x1 = Conv2D(128, (3, 3), padding='same')(x1)
    x1 = LeakyReLU(alpha=0.2)(x1)

    x1 = UpSampling2D(size=(2, 2))(x1)
    x1 = Conv2D(64, (3, 3), padding='same')(x1)
    x1 = LeakyReLU(alpha=0.2)(x1)

    x1 = Conv2D(n_frames, (3, 3), activation='tanh', padding='same')(x1)
    x1 = Reshape(target_shape=(size_y, size_x, n_channels, n_frames))(x1)

    model = Model(inputs=[inp], outputs=[x1])
    rmsprop = RMSprop(lr=0.0001)

    model.compile(optimizer=rmsprop, loss='mse')

    return model
def nvidia_dropout_model(input_shape=(80, 160, 3)):

    ch, row, col = 3, 80, 320  # Trimmed image format
    model = Sequential()
    model.add(Cropping2D(cropping=((50, 30), (0, 0)), input_shape=input_shape))
    # Preprocess incoming data, centered around zero with small standard deviation
    model.add(
        Lambda(lambda x: x / 127.5 - 1.,
               input_shape=(row, col, ch),
               output_shape=(row, col, ch)))
    model.add(
        Convolution2D(24,
                      5,
                      5,
                      subsample=(2, 2),
                      activation="relu",
                      border_mode='same'))
    model.add(SpatialDropout2D(0.2))
    model.add(
        Convolution2D(36,
                      5,
                      5,
                      subsample=(2, 2),
                      activation="relu",
                      border_mode='same'))
    model.add(SpatialDropout2D(0.2))
    model.add(
        Convolution2D(48,
                      5,
                      5,
                      subsample=(2, 2),
                      activation="relu",
                      border_mode='same'))
    model.add(SpatialDropout2D(0.2))
    model.add(
        Convolution2D(64,
                      3,
                      3,
                      subsample=(2, 2),
                      activation="relu",
                      border_mode='same'))
    model.add(SpatialDropout2D(0.2))
    model.add(
        Convolution2D(64,
                      3,
                      3,
                      subsample=(2, 2),
                      activation="relu",
                      border_mode='same'))
    model.add(Flatten())
    model.add(Dense(100, activation="elu"))
    model.add(Dense(50, activation="elu"))
    model.add(Dense(10, activation="elu"))
    model.add(Dropout(0.5))
    model.add(Dense(1))

    return model
Beispiel #22
0
def nvidia(img):
    """
    Model based on Nvidia paper
    http://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf
    """

    shape = (img[0], img[1], 3)

    model = Sequential()

    def process(img):
        import tensorflow as tf
        # img = tf.image.rgb_to_grayscale(img)
        img = tf.image.resize_images(img, (66, 200))
        return img

    model.add(Lambda(process, input_shape=shape))

    model.add(Lambda(lambda x: x / 255. - 0.5))
    model.add(
        Convolution2D(24,
                      5,
                      5,
                      border_mode="same",
                      subsample=(2, 2),
                      activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(
        Convolution2D(36,
                      5,
                      5,
                      border_mode="same",
                      subsample=(2, 2),
                      activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(
        Convolution2D(48,
                      5,
                      5,
                      border_mode="valid",
                      subsample=(2, 2),
                      activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="elu"))
    model.add(SpatialDropout2D(0.2))

    model.add(Flatten())
    model.add(Dropout(0.5))
    model.add(Dense(100, activation="elu"))
    model.add(Dense(50, activation="elu"))
    model.add(Dense(10, activation="elu"))
    model.add(Dropout(0.5))
    model.add(Dense(1))

    return model
Beispiel #23
0
def nvidia_model(img):
    """
    Model from Nvidia
    http://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf
    """

    shape = img.shape

    model = Sequential()

    model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=shape))

    model.add(Cropping2D(cropping=((70, 24), (
        60, 60))))  # crop off 70px top, 25px bottom and 60px off left/right

    model.add(
        Convolution2D(24,
                      5,
                      5,
                      border_mode="same",
                      subsample=(2, 2),
                      activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(
        Convolution2D(36,
                      5,
                      5,
                      border_mode="same",
                      subsample=(2, 2),
                      activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(
        Convolution2D(48,
                      5,
                      5,
                      border_mode="valid",
                      subsample=(2, 2),
                      activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="elu"))
    model.add(SpatialDropout2D(0.2))

    model.add(Flatten())
    # model.add(Dropout(0.25))
    model.add(Dense(100, activation="elu"))
    model.add(Dense(50, activation="elu"))
    model.add(Dense(10, activation="elu"))
    # model.add(Dropout(0.25))
    model.add(Dense(1))

    print("nvidia model returned successfully\n")

    return model
Beispiel #24
0
def EEGNet_SSVEP(nb_classes, Chans = 64, Samples = 128, regRate = 0.0001,
           dropoutRate = 0.25, kernLength = 64, numFilters = 8):
    """ Keras Implementation of the variant of EEGNet that was used to classify
    signals from an SSVEP task (https://arxiv.org/abs/1803.04566)

       
    Inputs:
        
        nb_classes     : int, number of classes to classify
        Chans, Samples : number of channels and time points in the EEG data
        regRate        : regularization parameter for L1 and L2 penalties
        dropoutRate    : dropout fraction
        kernLength     : length of temporal convolution in first layer
        numFilters     : number of temporal-spatial filter pairs to learn
    
    """

    input1   = Input(shape = (1, Chans, Samples))

    ##################################################################
    layer1       = Conv2D(numFilters, (1, kernLength), padding = 'same',
                          kernel_regularizer = l1_l2(l1=0.0, l2=0.0),
                          input_shape = (1, Chans, Samples),
                          use_bias = False)(input1)
    layer1       = BatchNormalization(axis = 1)(layer1)
    layer1       = DepthwiseConv2D((Chans, 1), 
                              depthwise_regularizer = l1_l2(l1=regRate, l2=regRate),
                              use_bias = False)(layer1)
    layer1       = BatchNormalization(axis = 1)(layer1)
    layer1       = Activation('elu')(layer1)
    layer1       = SpatialDropout2D(dropoutRate)(layer1)
    
    layer2       = SeparableConv2D(numFilters, (1, 8), 
                              depthwise_regularizer=l1_l2(l1=0.0, l2=regRate),
                              use_bias = False, padding = 'same')(layer1)
    layer2       = BatchNormalization(axis=1)(layer2)
    layer2       = Activation('elu')(layer2)
    layer2       = AveragePooling2D((1, 4))(layer2)
    layer2       = SpatialDropout2D(dropoutRate)(layer2)
    
    layer3       = SeparableConv2D(numFilters*2, (1, 8), depth_multiplier = 2,
                              depthwise_regularizer=l1_l2(l1=0.0, l2=regRate), 
                              use_bias = False, padding = 'same')(layer2)
    layer3       = BatchNormalization(axis=1)(layer3)
    layer3       = Activation('elu')(layer3)
    layer3       = AveragePooling2D((1, 4))(layer3)
    layer3       = SpatialDropout2D(dropoutRate)(layer3)
    
    
    flatten      = Flatten(name = 'flatten')(layer3)
    
    dense        = Dense(nb_classes, name = 'dense')(flatten)
    softmax      = Activation('softmax', name = 'softmax')(dense)
    
    return Model(inputs=input1, outputs=softmax)
def model_88(input_shape=None,
             keep_prob=0.5,
             classes=10,
             r=1e-2,
             name='model88'):
    Inpt = Input(shape=input_shape, name='Input_' + name)
    x = Conv2D(32, (3, 3),
               activation='relu',
               kernel_initializer='he_uniform',
               padding='same')(Inpt)
    #x = BatchNormalization()(x)
    x = Conv2D(32, (3, 3),
               activation='relu',
               kernel_initializer='he_uniform',
               padding='same')(x)
    #x = BatchNormalization()(x)
    x = MaxPooling2D((2, 2))(x)
    # x = Dropout(0.2)(x)
    x = SpatialDropout2D(0.2)(x)
    x = Conv2D(64, (3, 3),
               activation='relu',
               kernel_initializer='he_uniform',
               padding='same')(x)
    #x = BatchNormalization()(x)
    x = Conv2D(64, (3, 3),
               activation='relu',
               kernel_initializer='he_uniform',
               padding='same')(x)
    #x = BatchNormalization()(x)
    x = MaxPooling2D((2, 2))(x)
    x = Dropout(0.3)(x)
    # x = SpatialDropout2D(0.3)(x)
    x = Conv2D(128, (3, 3),
               activation='relu',
               kernel_initializer='he_uniform',
               padding='same')(x)
    #x = BatchNormalization()(x)
    x = Conv2D(128, (3, 3),
               activation='relu',
               kernel_initializer='he_uniform',
               padding='same')(x)
    #x = BatchNormalization()(x)
    x = MaxPooling2D((2, 2))(x)
    # x = Dropout(0.4)(x)
    x = SpatialDropout2D(0.2)(x)
    x = GlobalAveragePooling2D()(x)
    # Orignal network use FC layers with BN and Dropout
    #x = Flatten()(x)
    #x = Dense(128, activation='relu', kernel_initializer='he_uniform')(x)
    #x = BatchNormalization()(x)
    #x = Dropout(0.5)(x)
    prediction = Dense(classes, activation='softmax')(x)
    model = Model(Inpt, prediction, name=name)
    model.summary()
    return model
Beispiel #26
0
def nvidia_model(input_shape):
    """
    model referenced from
    http://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf
    I modified the model by adding Convolution2D and W_regularizer, and change activation function to elu
    """
    _model = Sequential()
    _model.add(Cropping2D(cropping=((50, 20), (0, 0)),
                          input_shape=input_shape))
    _model.add(Lambda(lambda x: x / 255. - 0.5))
    _model.add(
        Convolution2D(24, (5, 5),
                      padding="same",
                      strides=(2, 2),
                      activation="elu"))
    _model.add(SpatialDropout2D(0.2))
    _model.add(
        Convolution2D(36, (5, 5),
                      padding="same",
                      strides=(2, 2),
                      activation="elu"))
    _model.add(SpatialDropout2D(0.2))
    _model.add(
        Convolution2D(48, (5, 5),
                      padding="valid",
                      strides=(2, 2),
                      activation="elu"))
    _model.add(SpatialDropout2D(0.2))
    _model.add(
        Convolution2D(64, (3, 3),
                      padding="valid",
                      strides=(2, 2),
                      activation="elu"))
    _model.add(SpatialDropout2D(0.2))
    _model.add(
        Convolution2D(64, (3, 3),
                      padding="valid",
                      strides=(2, 2),
                      activation="elu"))
    _model.add(SpatialDropout2D(0.2))

    _model.add(Flatten())
    _model.add(Dropout(0.2))
    _model.add(Dense(100, activation="elu",
                     kernel_regularizer=l2(REGULARIZER)))
    _model.add(Dropout(0.2))
    _model.add(Dense(50, activation="elu", kernel_regularizer=l2(REGULARIZER)))
    _model.add(Dropout(0.2))
    _model.add(Dense(10, activation="elu", kernel_regularizer=l2(REGULARIZER)))
    _model.add(Dropout(0.2))
    _model.add(Dense(1))

    _model.compile(optimizer=Adam(lr=.001), loss='mse')
    return _model
Beispiel #27
0
    def __init__(self):
        self.model = Sequential()

        #CONV1
        self.model.add(
            Convolution2D(N_FILTERS1,
                          3,
                          3,
                          init=INIT,
                          input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3)))
        self.model.add(LeakyReLU(alpha=ReLU))

        #MAX_POOL1
        self.model.add(MaxPooling2D((2, 2), strides=(2, 2)))
        self.model.add(BatchNormalization())

        #CONV2
        self.model.add(Convolution2D(N_FILTERS2, 2, 2, init=INIT))
        self.model.add(LeakyReLU(alpha=ReLU))
        self.model.add(SpatialDropout2D(0.1))

        #MAX_POOL2
        self.model.add(MaxPooling2D((2, 2), strides=(2, 2)))
        self.model.add(BatchNormalization())

        #CONV3
        self.model.add(Convolution2D(N_FILTERS3, 2, 2, init=INIT))
        self.model.add(LeakyReLU(alpha=ReLU))
        self.model.add(SpatialDropout2D(0.2))

        #MAX_POOL3
        self.model.add(MaxPooling2D((2, 2), strides=(2, 2)))
        self.model.add(BatchNormalization())

        #FC1
        self.model.add(Flatten())
        self.model.add(Dense(N_FILTERS4, init=INIT))
        self.model.add(LeakyReLU(alpha=ReLU))
        self.model.add(Dropout(DROP_OUT))

        #FC2
        self.model.add(Dense(N_FILTERS4, init=INIT))
        self.model.add(LeakyReLU(alpha=ReLU))
        self.model.add(Dropout(DROP_OUT))

        #FC3
        self.model.add(Dense(N_FILTERS4, init=INIT))
        self.model.add(LeakyReLU(alpha=ReLU))

        #KEY_POINTS
        self.model.add(Dense(2 * FACEPOINTS_COUNT, init='he_uniform'))
        self.model.add(
            Reshape((FACEPOINTS_COUNT, 2),
                    input_shape=(2 * FACEPOINTS_COUNT, )))
Beispiel #28
0
def model_nvidia(input_shape):
    """Model based on the following paper by Nvidia
    http://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf
    """
    def resize_images(img):
        """Returns resized image
        Cannot be directly used in lambda function
        as tf is not understood by keras
        """
        import tensorflow as tf
        return tf.image.resize_images(img, (66, 200))

    model = Sequential()
    model.add(Lambda(resize_images, input_shape=input_shape))
    model.add(Lambda(lambda x: x / 255. - 0.5))
    model.add(
        Convolution2D(24,
                      5,
                      5,
                      border_mode="same",
                      subsample=(2, 2),
                      activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(
        Convolution2D(36,
                      5,
                      5,
                      border_mode="same",
                      subsample=(2, 2),
                      activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(
        Convolution2D(48,
                      5,
                      5,
                      border_mode="valid",
                      subsample=(2, 2),
                      activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="elu"))
    model.add(SpatialDropout2D(0.2))
    model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="elu"))
    model.add(SpatialDropout2D(0.2))

    model.add(Flatten())
    model.add(Dropout(0.5))
    model.add(Dense(100, activation="elu"))
    model.add(Dense(50, activation="elu"))
    model.add(Dense(10, activation="elu"))
    model.add(Dropout(0.5))
    model.add(Dense(1))

    model.compile(optimizer=Adam(lr=0.001), loss='mse')
    return model
Beispiel #29
0
 def setup_model(self):
     drop_out = 0.1
     model = Sequential()
     model.add(Lambda(lambda x: x/127.5 - 1.,
      input_shape=(80, 160, 3),
      output_shape=(80, 160, 3)))
     model.add(Convolution2D(24, 5, 5, border_mode='valid',  subsample=(2,2)))
     model.add(BatchNormalization())
     model.add(Activation('relu'))
     model.add(Convolution2D(36, 5, 5, border_mode='valid',  subsample=(2,2)))
     model.add(BatchNormalization())
     model.add(Activation('relu'))
     model.add(Convolution2D(48, 5, 5, border_mode='valid',  subsample=(2,2)))
     model.add(BatchNormalization())
     model.add(Activation('relu'))
     model.add(SpatialDropout2D(drop_out))
     
      
     model.add(Convolution2D(64, 3, 3, border_mode='valid',  subsample=(1,1)))
     model.add(BatchNormalization())
     model.add(Activation('relu'))
     model.add(Convolution2D(64, 3, 3, border_mode='valid',  subsample=(1,1)))
     model.add(BatchNormalization())
     model.add(Activation('relu'))
     model.add(SpatialDropout2D(drop_out))
     
     
     model.add(Flatten())
     model.add(Dense(1164))
     model.add(BatchNormalization())
     model.add(Activation('relu'))
     model.add(Dropout(drop_out))
     
     model.add(Dense(100))
     model.add(BatchNormalization())
     model.add(Activation('relu'))
     model.add(Dropout(drop_out))
     
     model.add(Dense(50))
     model.add(BatchNormalization())
     model.add(Activation('relu'))
     model.add(Dropout(drop_out))
     
     model.add(Dense(10,))
     model.add(BatchNormalization())
     model.add(Activation('relu'))
     model.add(Dropout(drop_out))
     
     model.add(Dense(1))
     optimizer = Adam(lr=1e-2, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
     model.compile(optimizer=optimizer, loss='mse',metrics=['mean_absolute_error'])
     self.model = model
     return
Beispiel #30
0
    def build_nvidia(self, input_shape=(HEIGHT, WIDTH, CHANNELS), crop=8):

        model = Sequential()
        #model.add(img_input)
        # Normalize input planes
        model.add(Lambda(lambda x: x / 255 - 0.5, input_shape=input_shape))
        # Resize image
        model.add(
            Lambda(lambda image: ktf.image.resize_images(image, (48, 64))))
        # Cropping image for focus only on the road
        model.add(Cropping2D(cropping=((crop, 0), (0, 0))))
        # Conv 1 layer
        model.add(
            Convolution2D(24, (5, 5),
                          border_mode="same",
                          subsample=(1, 1),
                          activation="relu"))
        # Conv 2 layer
        model.add(
            Convolution2D(36, (5, 5),
                          border_mode="same",
                          subsample=(1, 1),
                          activation="relu"))
        model.add(SpatialDropout2D(0.2))
        # Conv 3 layer
        model.add(
            Convolution2D(48, (5, 5),
                          border_mode="valid",
                          subsample=(1, 1),
                          activation="relu"))
        model.add(SpatialDropout2D(0.2))
        # Conv 4 layer
        model.add(
            Convolution2D(64, (3, 3), border_mode="valid", activation="relu"))
        model.add(SpatialDropout2D(0.2))
        # Conv 5 layer
        model.add(
            Convolution2D(64, (3, 3), border_mode="valid", activation="relu"))
        model.add(SpatialDropout2D(0.2))

        model.add(Flatten())
        # Fully Connect layer 1
        model.add(Dense(100, activation='relu'))
        # Fully Connect layer 2
        model.add(Dense(50, activation='relu'))
        # Fully Connect layer 3
        model.add(Dense(10, activation='relu'))
        model.add(Dropout(.5))
        # Output
        model.add(Dense(1))
        # lr=0.001
        #model.compile(optimizer=Adam(lr=0.0001), loss='mse')
        return model