Example #1
0
    def fit(self, X, y, X_val, y_val):
        ## scaler
        #        self.scaler = StandardScaler()
        #        X = self.scaler.fit_transform(X)

        #### build model
        self.model = Sequential()
        ## input layer
        self.model.add(Dropout(self.input_dropout, input_shape=(X.shape[1], )))
        ## hidden layers
        first = True
        hidden_layers = self.hidden_layers
        while hidden_layers > 0:
            self.model.add(Dense(self.hidden_units))
            if self.batch_norm == "before_act":
                self.model.add(BatchNormalization())
            if self.hidden_activation == "prelu":
                self.model.add(PReLU())
            elif self.hidden_activation == "elu":
                self.model.add(ELU())
            else:
                self.model.add(Activation(self.hidden_activation))
            if self.batch_norm == "after_act":
                self.model.add(BatchNormalization())
            self.model.add(Dropout(self.hidden_dropout))
            hidden_layers -= 1

        ## output layer
        output_dim = 1
        output_act = "linear"
        self.model.add(Dense(output_dim))
        self.model.add(Activation(output_act))

        ## loss
        if self.optimizer == "sgd":
            sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
            self.model.compile(loss="mse", optimizer=sgd)
        else:
            self.model.compile(loss="mse", optimizer=self.optimizer)

        logger.info(self.model.summary())

        ## callback
        early_stopping = EarlyStopping(monitor='val_loss',
                                       min_delta=1e-2,
                                       patience=10,
                                       verbose=0,
                                       mode='auto')

        cb_my = LossHistory()

        ## fit
        self.model.fit(X,
                       y,
                       epochs=self.epochs,
                       batch_size=self.batch_size,
                       validation_data=[X_val, y_val],
                       callbacks=[early_stopping, cb_my],
                       verbose=1)
        return self
Example #2
0
def create_keras_model(inputShape, nClasses, output_activation='linear'):
    """
    SegNet model
    ----------
    inputShape : tuple
        Tuple with the dimensions of the input data (ny, nx, nBands). 
    nClasses : int
         Number of classes.
    """

    filter_size = 64
    kernel = (3, 3)
    pad = (1, 1)
    pool_size = (2, 2)

    inputs = Input(shape=inputShape, name='image')

    # Encoder
    x = Conv2D(64, kernel, padding='same')(inputs)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size=pool_size)(x)

    x = Conv2D(128, kernel, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size=pool_size)(x)

    x = Conv2D(256, kernel, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size=pool_size)(x)

    x = Conv2D(512, kernel, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # Decoder
    x = Conv2D(512, kernel, padding='same')(x)
    x = BatchNormalization()(x)
    x = UpSampling2D(size=pool_size)(x)

    x = Conv2D(256, kernel, padding='same')(x)
    x = BatchNormalization()(x)
    x = UpSampling2D(size=pool_size)(x)

    x = Conv2D(128, kernel, padding='same')(x)
    x = BatchNormalization()(x)
    x = UpSampling2D(size=pool_size)(x)

    x = Conv2D(64, kernel, padding='same')(x)
    x = BatchNormalization()(x)

    x = Conv2D(nClasses, (1, 1), padding='valid')(x)

    outputs = Activation(output_activation, name='output')(x)

    model = Model(inputs=inputs, outputs=outputs, name='segnet')

    return model
Example #3
0
    def __init__(self):
        ''' The Constructor '''
        self.x_train, self.y_train, self.x_test, self.y_test = load_external_data(
        )
        # # If you wish to use the mnist dataset, uncomment the line below
        # (self.x_train, self.y_train), (self.x_test, self.y_test) = mnist.load_data()
        self.x_train = self.x_train.reshape(60000, 784)
        self.x_train = self.x_train.astype('float32')
        self.x_test = self.x_test.astype('float32')
        self.x_train /= 255
        self.x_test /= 255

        # Encoding labels. Example 4 becomes [0,0,0,0,1,0,0,0,0,0]
        n_classes = 10
        self.y_train = np_utils.to_categorical(self.y_train, n_classes)
        self.y_test = np_utils.to_categorical(self.y_test, n_classes)

        # building a linear stack of layers with the sequential model
        self.model = Sequential()
        self.model.add(Dense(512, input_shape=(784, )))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.2))

        self.model.add(Dense(512))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.2))

        self.model.add(Dense(10))
        self.model.add(Activation('softmax'))

        # compiling the sequential model
        self.model.compile(loss='categorical_crossentropy',
                           metrics=['accuracy'],
                           optimizer='adam')
Example #4
0
    def _build_residual_block(self, x, index):
        # mc = self.config.model
        mc = self.config

        in_x = x
        res_name = "res" + str(index)
        x = Conv2D(filters=mc.cnn_filter_num,
                   kernel_size=mc.cnn_filter_size,
                   padding="same",
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name=res_name + "_conv1-" + str(mc.cnn_filter_size) + "-" +
                   str(mc.cnn_filter_num))(x)
        x = BatchNormalization(axis=1, name=res_name + "_batchnorm1")(x)
        x = Activation("relu", name=res_name + "_relu1")(x)
        x = Conv2D(filters=mc.cnn_filter_num,
                   kernel_size=mc.cnn_filter_size,
                   padding="same",
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name=res_name + "_conv2-" + str(mc.cnn_filter_size) + "-" +
                   str(mc.cnn_filter_num))(x)
        x = BatchNormalization(axis=1,
                               name="res" + str(index) + "_batchnorm2")(x)
        x = Add(name=res_name + "_add")([in_x, x])
        x = Activation("relu", name=res_name + "_relu2")(x)
        return x
Example #5
0
    def build(self):
        """
        Builds the full Keras model and stores it in self.model.
        """
        mc = self.config
        in_x = x = Input((12, 8, 8))

        # (batch, channels, height, width)
        x = Conv2D(filters=mc.cnn_filter_num,
                   kernel_size=mc.cnn_first_filter_size,
                   padding="same",
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="input_conv-" + str(mc.cnn_first_filter_size) + "-" +
                   str(mc.cnn_filter_num))(x)
        x = BatchNormalization(axis=1, name="input_batchnorm")(x)
        x = Activation("relu", name="input_relu")(x)

        for i in range(mc.res_layer_num):
            x = self._build_residual_block(x, i + 1)

        res_out = x

        # for policy output
        x = Conv2D(filters=2,
                   kernel_size=1,
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="policy_conv-1-2")(res_out)
        x = BatchNormalization(axis=1, name="policy_batchnorm")(x)
        x = Activation("relu", name="policy_relu")(x)
        x = Flatten(name="policy_flatten")(x)

        policy_out = Dense(self.config.n_labels,
                           kernel_regularizer=l2(mc.l2_reg),
                           activation="softmax",
                           name="policy_out")(x)

        # for value output
        x = Conv2D(filters=4,
                   kernel_size=1,
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="value_conv-1-4")(res_out)
        x = BatchNormalization(axis=1, name="value_batchnorm")(x)
        x = Activation("relu", name="value_relu")(x)
        x = Flatten(name="value_flatten")(x)
        x = Dense(mc.value_fc_size,
                  kernel_regularizer=l2(mc.l2_reg),
                  activation="relu",
                  name="value_dense")(x)
        value_out = Dense(1,
                          kernel_regularizer=l2(mc.l2_reg),
                          activation="tanh",
                          name="value_out")(x)

        self.model = Model(in_x, [policy_out, value_out], name="chess_model")
def initialize_model():

    model = Sequential()
    model.add(
        Conv2D(40, 11, strides=1, padding='same', input_shape=(1, 1024, 4)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation('relu'))

    model.add(Conv2D(40, 11, strides=1, padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation('relu'))

    model.add(MaxPooling2D(pool_size=(1, 64)))

    model.add(Flatten())

    model.add(Dense(units=500))

    model.add(Dense(units=640))

    model.add(Reshape((1, 16, 40)))

    model.add(Conv2DTranspose(40, 11, strides=(1, 64), padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation('relu'))

    model.add(Conv2DTranspose(40, 11, strides=(1, 1), padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation('relu'))

    model.add(Conv2D(4, 11, strides=1, padding='same', activation='sigmoid'))
    model.summary()
    model.compile(optimizer='adam', loss='mse')

    return model
Example #7
0
    def get_model(self, embedding_matrix, vocab_size, question_len=15, img_feat=2048, embed_dim=300):
        number_of_hidden_units_LSTM = 512
        number_of_dense_layers      = 3
        number_of_hidden_units      = 1024
        activation_function         = 'tanh'
        dropout_pct                 = 0.5

        # Image model - loading image features and reshaping
        model_image = Sequential()
        model_image.add(Reshape((img_feat,), input_shape=(img_feat,)))

        # Language Model - 3 LSTMs
        model_language = Sequential()
        # model_language.add(Embedding(vocab_size, embedding_matrix.shape[1], input_length=question_len,
                                        # weights=[embedding_matrix], trainable=False))
        model_language.add(LSTM(number_of_hidden_units_LSTM, return_sequences=True, input_shape=(question_len, embed_dim)))
        model_language.add(LSTM(number_of_hidden_units_LSTM, return_sequences=True))
        model_language.add(LSTM(number_of_hidden_units_LSTM, return_sequences=False))

        # combined model
        model = Sequential()
    
        model.add(concatenate([model_language, model_image]))

        for _ in range(number_of_dense_layers):
            model.add(Dense(number_of_hidden_units, kernel_initializer='uniform'))
            model.add(Activation(activation_function))
            model.add(Dropout(dropout_pct))

        model.add(Dense(1000))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        
        return model
Example #8
0
def initialize_model():

    one_filter_keras_model = Sequential()
    one_filter_keras_model.add(
        Conv2D(filters=40,
               kernel_size=(1, 11),
               padding="same",
               input_shape=(1, 1500, 5),
               kernel_constraint=NonNeg()))
    one_filter_keras_model.add(BatchNormalization(axis=-1))
    one_filter_keras_model.add(Activation('relu'))

    one_filter_keras_model.add(MaxPooling2D(pool_size=(1, 30)))

    one_filter_keras_model.add(Flatten())
    one_filter_keras_model.add(Dense(40))
    one_filter_keras_model.add(BatchNormalization(axis=-1))
    one_filter_keras_model.add(Activation('relu'))
    one_filter_keras_model.add(Dropout(0.5))

    one_filter_keras_model.add(Dense(1))
    one_filter_keras_model.add(Activation("sigmoid"))

    one_filter_keras_model.summary()
    one_filter_keras_model.compile(optimizer='adam',
                                   loss='binary_crossentropy',
                                   metrics=[precision, recall, specificity])

    return one_filter_keras_model
Example #9
0
    def build(width, height, depth, classes):
        # initialize the model along with the input shape to be
        # "channels last"

        model = Sequential()
        #the image input
        inputShape = (height, width, depth)

        # if we are using "channels first", update the input shape
        if image_data_format() == "channels_first":
            inputShape = (depth, height, width)

        #Every CNN that you implement will have a build method this function will accept a
        #number of parameters, construct the network architecture, and then return it to the calling function
        #It will accept a number of parameters

        #define the first (and only) CONV=>RELU layer
        #This layer will have 32 filters each of which are 3x3, apply the asame padding 
        #to ensure the size of the output of the convolution operations matches the input
        #(using same padding isn't strictly neccessary for this example, but it's a good)
        #habbit to start forming now
        model.add(Conv2D(32,(3,3),padding="same"))
        model.add(Activation("relu"))

        #softmax classifier
        model.add(Flatten())
        model.add(Dense(classes))
        model.add(Activation("softmax"))

        #return the constructed network architechture
        return model
Example #10
0
    def generator(self):

        gen_input = Input(shape=self.noise_shape)

        model = Conv2D(filters=64, kernel_size=9, strides=1,
                       padding="same")(gen_input)
        model = PReLU(alpha_initializer='zeros',
                      alpha_regularizer=None,
                      alpha_constraint=None,
                      shared_axes=[1, 2])(model)

        gen_model = model

        # Using 16 Residual Blocks
        for index in range(16):
            model = res_block_gen(model, 3, 64, 1)

        model = Conv2D(filters=64, kernel_size=3, strides=1,
                       padding="same")(model)
        model = BatchNormalization(momentum=0.5)(model)
        model = add([gen_model, model])

        # Using 2 UpSampling Blocks
        for index in range(2):
            model = up_sampling_block(model, 3, 256, 1)

        model = Conv2D(filters=3, kernel_size=9, strides=1,
                       padding="same")(model)
        model = Activation('tanh')(model)

        generator_model = Model(inputs=gen_input, outputs=model)

        return generator_model
Example #11
0
File: rnn.py Project: ss-koishi/RNN
def main():

    sin = md.make_noised_sin()
    size = 60
    (x_train, y_train), (x_test, y_test) = train_test_split(sin, n_prev = size)

    model = Sequential([
        LSTM(hidden, batch_input_shape=(None, size, io), return_sequences=False),
        Dense(io),
        Activation('linear')
    ])

    model.compile(loss='mean_squared_error', optimizer='adam')
    stopping = EarlyStopping(monitor='val_loss', mode='auto', patience=10)
    model.fit(x_train, y_train, batch_size=256, nb_epoch=1000, validation_split=0.1, callbacks=[stopping])

    result = []
    future_steps = 1000
    future_data = [x_train[-1:][-1:][-size:]]
    for i in range(future_steps):
      print(i)
      pred = model.predict(future_data)
      future_data = np.delete(future_data, 0)
      future_data = np.append(future_data, pred[-1:]).reshape(1, size, 1)
      result = np.append(result, pred[-1:])

    plt.figure()
    #plt.plot(y_test.flatten())
    output = y_train[-100:]
    plt.plot(range(0, len(output)), output, label='input')
    plt.plot(range(len(output), len(output) + future_steps), result, label='future')
    plt.title('Sin Curve prediction')
    plt.legend(loc='upper right')
    plt.savefig('result.png')
Example #12
0
def discriminator_model(input_img_shape, classifier_model):
    img_input = keras.layers.Input(shape=(input_img_shape[0], input_img_shape[1], 1))
    x = Conv2D(64, (5, 5), padding="same")(img_input)
    x = Activation('tanh')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Conv2D(128, (5, 5))(x)
    x = Activation('tanh')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Flatten()(x)
    x = Dense(1024)(x)
    x = Activation('tanh')(x)

    fake = Dense(1, activation='sigmoid')(x)
    classifier_model.trainable = False
    aux = classifier_model(inputs=[img_input])
    return Model(inputs=[img_input], outputs=[fake, aux])
Example #13
0
 def conv_func(x):
     x = Conv2D(nb_filter, (nb_row, nb_col), strides=stride,
                padding='same')(x)
     x = BatchNormalization()(x)
     #x = LeakyReLU(0.2)(x)
     x = Activation("relu")(x)
     return x
Example #14
0
 def testKerasModel(self):
   model = Sequential(
       [Dense(10, input_shape=(100,)),
        Activation('relu', name='my_relu')])
   summary = self.keras_model(name='my_name', data=model, step=1)
   first_val = summary.value[0]
   self.assertEqual(model.to_json(), first_val.tensor.string_val[0])
Example #15
0
def transition_block(input,
                     nb_filter,
                     use_pool=True,
                     dropout_rate=None,
                     pooltype=1,
                     weight_decay=1e-4):
    x = BatchNormalization(axis=-1, epsilon=1.1e-5)(input)
    x = Activation('relu')(x)
    x = Conv2D(nb_filter, (1, 1),
               kernel_initializer='he_normal',
               padding='same',
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(x)

    if (dropout_rate):
        x = Dropout(dropout_rate)(x)
    if use_pool:
        if (pooltype == 2):
            x = AveragePooling2D((2, 2), strides=(2, 2))(x)
        elif (pooltype == 1):
            x = ZeroPadding2D(padding=(0, 1))(x)
            x = AveragePooling2D((2, 2), strides=(2, 1))(x)
        elif (pooltype == 3):
            x = AveragePooling2D((2, 2), strides=(2, 1))(x)
    return x, nb_filter
Example #16
0
    def fit(self, X, y):
        ## scaler
        self.scaler = StandardScaler()
        X = self.scaler.fit_transform(X)

        #### build model
        self.model = Sequential()
        ## input layer
        self.model.add(Dropout(self.input_dropout, input_shape=(X.shape[1],)))
        ## hidden layers
        first = True
        hidden_layers = self.hidden_layers
        while hidden_layers > 0:
            self.model.add(Dense(self.hidden_units))
            if self.batch_norm == "before_act":
                self.model.add(BatchNormalization())
            if self.hidden_activation == "prelu":
                self.model.add(PReLU())
            elif self.hidden_activation == "elu":
                self.model.add(ELU())
            else:
                self.model.add(Activation(self.hidden_activation))
            if self.batch_norm == "after_act":
                self.model.add(BatchNormalization())
            self.model.add(Dropout(self.hidden_dropout))
            hidden_layers -= 1

        ## output layer
        output_dim = 1
        output_act = "linear"
        self.model.add(Dense(output_dim))
        self.model.add(Activation(output_act))
        
        ## loss
        if self.optimizer == "sgd":
            sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
            self.model.compile(loss="mse", optimizer=sgd)
        else:
            self.model.compile(loss="mse", optimizer=self.optimizer)

        ## fit
        self.model.fit(X, y,
                    epochs=self.nb_epoch, 
                    batch_size=self.batch_size,
                    validation_split=0, verbose=1)
        return self
def __conv1_block(input):
    x = Conv2D(16, (3, 3), padding='same', kernel_initializer=initia)(input)

    channel_axis = 1 if 'channels_last' == 'channels_first' else -1

    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    return x
Example #18
0
def conv_block(input, growth_rate, dropout_rate=None, weight_decay=1e-4):
    x = BatchNormalization(axis=-1, epsilon=1.1e-5)(input)
    x = Activation('relu')(x)
    x = Conv2D(growth_rate, (3, 3),
               kernel_initializer='he_normal',
               padding='same')(x)
    if (dropout_rate):
        x = Dropout(dropout_rate)(x)
    return x
Example #19
0
    def build(input_shape_width, input_shape_height, classes, 
              weight_path = '', input_shape_depth = 3):
        '''
        weight_path: a .hdf5 file. If exists, we can load model.
        '''
        
        # initialize the model
        model = Sequential()
        
        input_shape = (input_shape_height, input_shape_width, 
                       input_shape_depth)
        # if we are using "channels first", update the input shape
        if K.image_data_format() == 'channels_first':
             input_shape = (input_shape_depth, input_shape_height, 
                            input_shape_width)
        
        # first Convolution + relu + pooling layer
        model.add(Conv2D(filters = 20, kernel_size = (5, 5), 
                         padding = 'same', input_shape = input_shape))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size = (2, 2), strides=(2, 2)))
        
        # second convolutional layer
        model.add(Conv2D(filters = 50, kernel_size = (5, 5), 
                         padding = 'same'))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        
        # Flattening
        model.add(Flatten())

        # Full connection
        model.add(Dense(units = 500))
        model.add(Activation('relu'))

        # output layer
        model.add(Dense(units = classes))
        model.add(Activation('softmax'))

        if weight_path:
            model.load_weights(weight_path)

        # return the constructed network architecture
        return model
Example #20
0
 def _dconv_bn(x):
     #TODO: Deconvolution2D
     #x = Deconvolution2D(nb_filter,nb_row, nb_col, output_shape=output_shape, subsample=stride, border_mode='same')(x)
     #x = UpSampling2D(size=stride)(x)
     x = UnPooling2D(size=stride)(x)
     x = ReflectionPadding2D(padding=stride)(x)
     x = Conv2D(nb_filter, (nb_row, nb_col), padding='valid')(x)
     x = BatchNormalization()(x)
     x = Activation(activation)(x)
     return x
Example #21
0
def make_model(dimData):
    model = Sequential()

    model.add(Dense(1024, input_shape=(dimData, )))
    model.add(Activation("relu"))
    model.add(Dropout(0.5))

    model.add(Dense(1024))
    model.add(Activation("relu"))
    model.add(Dropout(0.5))

    model.add(Dense(CLASSES))
    model.add(Activation("softmax"))

    model.compile(loss="categorical_crossentropy",
                  optimizer='adam',
                  metrics=["accuracy"])

    return model
Example #22
0
    def build(height, width, depth, classes):
        # initialize the model
        model = Sequential()
        #the shape of our image inputs
        inputShape = (height, width, depth)
        #if we are using "channels first" update the input shape
        if (K.image_data_format() == "channels_first"):
            inputShape = (depth, height, width)

        #first set of CONV=>RELU=> POOL layers
        model.add(
            Conv2D(24, (5, 5),
                   strides=(2, 2),
                   padding="valid",
                   input_shape=inputShape))
        model.add(Activation('relu'))

        #second set of 5x5 CONV=>RELU=> POOL layers
        model.add(Conv2D(36, (5, 5), strides=(2, 2), padding="valid"))
        model.add(Activation('relu'))

        #third set of 5x5 CONV=>RELU=> POOL layers
        model.add(Conv2D(48, (5, 5), strides=(2, 2), padding="valid"))
        model.add(Activation('relu'))

        #first set of 3x3 CONV=>RELU=> POOL layers
        model.add(Conv2D(64, (3, 3), padding="valid"))
        model.add(Activation('relu'))

        #second set of 3x3 CONV=>RELU=> POOL layers
        model.add(Conv2D(64, (3, 3), padding="valid"))
        model.add(Activation('relu'))

        #set of fully connected layers
        model.add(Flatten())
        model.add(Dense(1164))
        model.add(Activation('relu'))
        model.add(Dense(100))
        model.add(Activation('relu'))
        model.add(Dense(10))
        model.add(Activation('relu'))

        #output
        model.add(Dense(classes))
        model.add(Activation('tanh'))

        return model
Example #23
0
 def testKerasModel_usesDefaultStep(self):
   model = Sequential(
       [Dense(10, input_shape=(100,)),
        Activation('relu', name='my_relu')])
   try:
     summary_ops.set_step(42)
     event = self.keras_model(name='my_name', data=model)
     self.assertEqual(42, event.step)
   finally:
     # Reset to default state for other tests.
     summary_ops.set_step(None)
Example #24
0
def conv_gan(inp, filters, use_norm=False, strides=2, norm="none"):
    """ GAN Conv Block """
    var_x = Conv2D(filters,
                   kernel_size=3,
                   strides=strides,
                   kernel_regularizer=regularizers.l2(GAN22_REGULARIZER),
                   kernel_initializer=GAN22_CONV_INIT,
                   use_bias=False,
                   padding="same")(inp)
    var_x = Activation("relu")(var_x)
    var_x = normalization(var_x, norm, filters) if use_norm else var_x
    return var_x
Example #25
0
def residual(inputs, filter_size, kernel):
    x = Conv2D(filter_size,
               kernel,
               padding='same',
               kernel_initializer='he_normal')(inputs)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2D(filter_size,
               kernel,
               padding='same',
               kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = Add()([x, inputs])
    return x
def ___conv4_block(input, k=1, dropout=0.0, se_net=False):
    init = input

    channel_axis = 1 if 'tf' == 'th' else -1

    # Check if input number of filters is same as 64 * k, else
    # create convolution2d for this input
    if 'tf' == 'th':
        if input.get_shape().as_list()[-1] != 64 * k:
            init = Conv2D(64 * k, (1, 1),
                          activation='linear',
                          padding='same',
                          kernel_initializer=initia)(init)
    else:
        if input.get_shape().as_list()[-1] != 64 * k:
            init = Conv2D(64 * k, (1, 1),
                          activation='linear',
                          padding='same',
                          kernel_initializer=initia)(init)

    x = Conv2D(64 * k, (3, 3), padding='same',
               kernel_initializer=initia)(input)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)

    if dropout > 0.0:
        x = Dropout(dropout)(x)

    x = Conv2D(64 * k, (3, 3), padding='same', kernel_initializer=initia)(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)

    if se_net:
        x = squeeze_and_excitation_layer(x)

    m = add([init, x])
    return m
Example #27
0
 def conv_sep(self, inp, filters, kernel_size=5, strides=2, **kwargs):
     """ Seperable Convolution Layer """
     logger.debug(
         "inp: %s, filters: %s, kernel_size: %s, strides: %s, kwargs: %s)",
         inp, filters, kernel_size, strides, kwargs)
     name = self.get_name("separableconv2d_{}".format(inp.shape[1]))
     kwargs = self.set_default_initializer(kwargs)
     var_x = SeparableConv2D(filters,
                             kernel_size=kernel_size,
                             strides=strides,
                             padding="same",
                             name="{}_seperableconv2d".format(name),
                             **kwargs)(inp)
     var_x = Activation("relu", name="{}_relu".format(name))(var_x)
     return var_x
Example #28
0
def get_model(data, params):
    input = Input(shape=data.x_train.shape[1:])

    x = input
    for element in params["network"]:
        if element[0] == "C2D":
            x = Conv2D(filters=element[1],
                       kernel_size=element[2],
                       padding='same')(x)
            if element[3]:
                x = BatchNormalization()(x)
        elif element[0] == "Dense":
            x = Dense(units=element[1])(x)
        elif element[0] == "A":
            x = Activation(element[1])(x)
        elif element[0] == "MaxPool2D":
            x = MaxPool2D()(x)
        elif element[0] == "Flatten":
            x = Flatten()(x)
        else:
            print("Invalid element: " + element[0])

    # There has to be a Dense layer at the end
    x = Dense(units=data.num_classes)(x)

    y_pred = Activation("softmax")(x)

    # Build the model
    model = Model(inputs=[input], outputs=[y_pred])

    model.compile(
        loss="categorical_crossentropy",
        optimizer=params["optimizer"](learning_rate=params["learning_rate"]),
        metrics=["accuracy"])

    return model
Example #29
0
    def _res_func(x):
        identity = Cropping2D(cropping=((2, 2), (2, 2)))(x)

        a = Conv2D(nb_filter, (nb_row, nb_col),
                   strides=stride,
                   padding='valid')(x)
        a = BatchNormalization()(a)
        #a = LeakyReLU(0.2)(a)
        a = Activation("relu")(a)
        a = Conv2D(nb_filter, (nb_row, nb_col),
                   strides=stride,
                   padding='valid')(a)
        y = BatchNormalization()(a)

        return add([identity, y])
Example #30
0
def modelDecode(cae, filterSize, poolSize, gpus):
    if gpus > 1:
        cae = cae.layers[-2]

    # initialize decoder
    decode = Sequential()
    decode.add(
        Dense(128 * 4 * 4,
              input_dim=(1024),
              weights=cae.layers[18].get_weights()))
    decode.add(Activation('relu'))
    decode.add(Reshape((128, 4, 4)))
    decode.add(Activation('relu'))
    decode.add(UpSampling2D(size=(poolSize, poolSize)))
    decode.add(
        Convolution2D(64, (filterSize, filterSize),
                      padding='same',
                      weights=cae.layers[23].get_weights()))
    decode.add(Activation('relu'))
    decode.add(UpSampling2D(size=(poolSize, poolSize)))
    decode.add(
        Convolution2D(32, (filterSize, filterSize),
                      padding='same',
                      weights=cae.layers[26].get_weights()))
    decode.add(Activation('relu'))
    decode.add(UpSampling2D(size=(poolSize, poolSize)))
    decode.add(
        Convolution2D(16, (filterSize, filterSize),
                      padding='same',
                      weights=cae.layers[29].get_weights()))
    decode.add(Activation('relu'))
    decode.add(UpSampling2D(size=(poolSize, poolSize)))
    decode.add(
        Convolution2D(8, (filterSize, filterSize),
                      padding='same',
                      weights=cae.layers[32].get_weights()))
    decode.add(Activation('relu'))
    decode.add(UpSampling2D(size=(poolSize, poolSize)))
    decode.add(
        Convolution2D(3, (filterSize, filterSize),
                      padding='same',
                      weights=cae.layers[35].get_weights()))
    decode.add(Activation('sigmoid'))

    if gpus > 1:
        decode = multi_gpu_model(decode, gpus=gpus)

    decode.compile(loss='mse', optimizer='adam')

    return decode