Exemple #1
0
    def conv_block(input_tensor,
                   kernel_size,
                   filters,
                   stage,
                   block,
                   strides=(2, 2)):
        def name_fn(type, name):
            return name_builder(type, stage, block, name)

        F1, F2, F3 = filters

        x = Conv2D(F1, (1, 1), strides=strides,
                   name=name_fn("res", "2a"))(input_tensor)
        x = BatchNormalization(name=name_fn("bn", "2a"))(x)
        x = PReLU()(x)

        x = Conv2D(F2, kernel_size, padding='same', name=name_fn("res",
                                                                 "2b"))(x)
        x = BatchNormalization(name=name_fn("bn", "2b"))(x)
        x = PReLU()(x)

        x = Conv2D(F3, (1, 1), name=name_fn("res", "2c"))(x)
        x = BatchNormalization(name=name_fn("bn", "2c"))(x)

        sc = Conv2D(F3, (1, 1), strides=strides,
                    name=name_fn("res", "1"))(input_tensor)
        sc = BatchNormalization(name=name_fn("bn", "1"))(sc)

        x = add([x, sc])
        x = PReLU()(x)

        return x
Exemple #2
0
    def fit(self, X, y, X_val, y_val):
        ## scaler
        #        self.scaler = StandardScaler()
        #        X = self.scaler.fit_transform(X)

        #### build model
        self.model = Sequential()
        ## input layer
        self.model.add(Dropout(self.input_dropout, input_shape=(X.shape[1], )))
        ## hidden layers
        first = True
        hidden_layers = self.hidden_layers
        while hidden_layers > 0:
            self.model.add(Dense(self.hidden_units))
            if self.batch_norm == "before_act":
                self.model.add(BatchNormalization())
            if self.hidden_activation == "prelu":
                self.model.add(PReLU())
            elif self.hidden_activation == "elu":
                self.model.add(ELU())
            else:
                self.model.add(Activation(self.hidden_activation))
            if self.batch_norm == "after_act":
                self.model.add(BatchNormalization())
            self.model.add(Dropout(self.hidden_dropout))
            hidden_layers -= 1

        ## output layer
        output_dim = 1
        output_act = "linear"
        self.model.add(Dense(output_dim))
        self.model.add(Activation(output_act))

        ## loss
        if self.optimizer == "sgd":
            sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
            self.model.compile(loss="mse", optimizer=sgd)
        else:
            self.model.compile(loss="mse", optimizer=self.optimizer)

        logger.info(self.model.summary())

        ## callback
        early_stopping = EarlyStopping(monitor='val_loss',
                                       min_delta=1e-2,
                                       patience=10,
                                       verbose=0,
                                       mode='auto')

        cb_my = LossHistory()

        ## fit
        self.model.fit(X,
                       y,
                       epochs=self.epochs,
                       batch_size=self.batch_size,
                       validation_data=[X_val, y_val],
                       callbacks=[early_stopping, cb_my],
                       verbose=1)
        return self
Exemple #3
0
def generator(input_shape, upscale_times=2):

    gen_input = Input(shape=input_shape)

    model = Conv2D(filters=64, kernel_size=9, strides=1,
                   padding="same")(gen_input)
    model = PReLU(alpha_initializer='zeros',
                  alpha_regularizer=None,
                  alpha_constraint=None,
                  shared_axes=[1, 2])(model)

    gen_model = model

    # Using 16 Residual Blocks
    for index in range(8):
        model = res_block_gen(model, 3, 64, 1)

    model = Conv2D(filters=64, kernel_size=3, strides=1, padding="same")(model)
    model = BatchNormalization(momentum=0.5)(model)
    model = add([gen_model, model])

    # Using 2 UpSampling Blocks
    for index in range(upscale_times):
        model = up_sampling_block(model, 3, 256, 1)

    model = Conv2D(filters=3, kernel_size=9, strides=1, padding="same")(model)
    model = Activation('tanh')(model)

    generator_model = Model(inputs=gen_input, outputs=model)

    return generator_model
Exemple #4
0
    def identity_block(input_tensor, kernel_size, filters, stage, block):
        F1, F2, F3 = filters

        def name_fn(type, name):
            return name_builder(type, stage, block, name)

        x = Conv2D(F1, (1, 1), name=name_fn('res', '2a'))(input_tensor)
        x = BatchNormalization(name=name_fn('bn', '2a'))(x)
        x = PReLU()(x)

        x = Conv2D(F2, kernel_size, padding='same', name=name_fn('res',
                                                                 '2b'))(x)
        x = BatchNormalization(name=name_fn('bn', '2b'))(x)
        x = PReLU()(x)

        x = Conv2D(F3, (1, 1), name=name_fn('res', '2c'))(x)
        x = BatchNormalization(name=name_fn('bn', '2c'))(x)
        x = PReLU()(x)

        x = add([x, input_tensor])
        x = PReLU()(x)

        return x
Exemple #5
0
    def fit(self, X, y):
        ## scaler
        self.scaler = StandardScaler()
        X = self.scaler.fit_transform(X)

        #### build model
        self.model = Sequential()
        ## input layer
        self.model.add(Dropout(self.input_dropout, input_shape=(X.shape[1],)))
        ## hidden layers
        first = True
        hidden_layers = self.hidden_layers
        while hidden_layers > 0:
            self.model.add(Dense(self.hidden_units))
            if self.batch_norm == "before_act":
                self.model.add(BatchNormalization())
            if self.hidden_activation == "prelu":
                self.model.add(PReLU())
            elif self.hidden_activation == "elu":
                self.model.add(ELU())
            else:
                self.model.add(Activation(self.hidden_activation))
            if self.batch_norm == "after_act":
                self.model.add(BatchNormalization())
            self.model.add(Dropout(self.hidden_dropout))
            hidden_layers -= 1

        ## output layer
        output_dim = 1
        output_act = "linear"
        self.model.add(Dense(output_dim))
        self.model.add(Activation(output_act))
        
        ## loss
        if self.optimizer == "sgd":
            sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
            self.model.compile(loss="mse", optimizer=sgd)
        else:
            self.model.compile(loss="mse", optimizer=self.optimizer)

        ## fit
        self.model.fit(X, y,
                    epochs=self.nb_epoch, 
                    batch_size=self.batch_size,
                    validation_split=0, verbose=1)
        return self
Exemple #6
0
def get_cnn1d_prelu(output_size, img_height, img_width, show=True):
    model_input = Input(shape=(img_height * img_width, ), name='Main_input')
    x = Reshape((img_height * img_width, 1))(model_input)
    x = Conv1D(filters=16,
               kernel_size=48,
               strides=2,
               padding='same',
               name='Conv1D_1')(x)
    x = Conv1D(filters=8,
               kernel_size=48,
               strides=2,
               padding='same',
               name='Conv1D_2')(x)
    x = Conv1D(filters=4,
               kernel_size=48,
               strides=2,
               padding='same',
               name='Conv1D_3')(x)
    x = Conv1D(filters=2,
               kernel_size=48,
               strides=2,
               padding='same',
               name='Conv1D_4')(x)
    x = Conv1D(filters=1,
               kernel_size=48,
               strides=2,
               padding='same',
               name='Conv1D_5')(x)
    x = Flatten(name='Flatten')(x)
    x = PReLU(name='PReLU_1')(x)
    x = BatchNormalization(name='BN_1')(x)
    x = Dense(512, activation='tanh', name='Dense_tanh_1')(x)
    x = BatchNormalization(name='BN_2')(x)
    x = Dense(512, activation='tanh', name='Dense_tanh_2')(x)
    cnn1d_output = Dense(output_size,
                         activation='linear',
                         name='Output_Dense_linear')(x)
    cnn1d = Model(inputs=model_input, outputs=cnn1d_output, name='CNN1D_PReLU')

    if show:
        print('CNN1D_PReLU summary:')
        cnn1d.summary()
        print()

    return cnn1d
Exemple #7
0
def res_block_gen(model, kernal_size, filters, strides):

    model = Conv2D(filters=filters,
                   kernel_size=kernal_size,
                   strides=strides,
                   padding="same")(model)
    model = BatchNormalization(momentum=0.5)(model)
    # Using Parametric ReLU
    model = PReLU(alpha_initializer='zeros',
                  alpha_regularizer=None,
                  alpha_constraint=None,
                  shared_axes=[1, 2])(model)
    model = Conv2D(filters=filters,
                   kernel_size=kernal_size,
                   strides=strides,
                   padding="same")(model)
    model = BatchNormalization(momentum=0.5)(model)

    return model
Exemple #8
0
def resnet(input_tensor):
    """Inference function for ResNet

    y = resnet(X)

    Parameters
    ----------
    input_tensor : keras.layers.Input

    Returns
    ----------
    y : softmax output
    """
    def name_builder(type, stage, block, name):
        return "{}{}{}_branch{}".format(type, stage, block, name)

    def identity_block(input_tensor, kernel_size, filters, stage, block):
        F1, F2, F3 = filters

        def name_fn(type, name):
            return name_builder(type, stage, block, name)

        x = Conv2D(F1, (1, 1), name=name_fn('res', '2a'))(input_tensor)
        x = BatchNormalization(name=name_fn('bn', '2a'))(x)
        x = PReLU()(x)

        x = Conv2D(F2, kernel_size, padding='same', name=name_fn('res',
                                                                 '2b'))(x)
        x = BatchNormalization(name=name_fn('bn', '2b'))(x)
        x = PReLU()(x)

        x = Conv2D(F3, (1, 1), name=name_fn('res', '2c'))(x)
        x = BatchNormalization(name=name_fn('bn', '2c'))(x)
        x = PReLU()(x)

        x = add([x, input_tensor])
        x = PReLU()(x)

        return x

    def conv_block(input_tensor,
                   kernel_size,
                   filters,
                   stage,
                   block,
                   strides=(2, 2)):
        def name_fn(type, name):
            return name_builder(type, stage, block, name)

        F1, F2, F3 = filters

        x = Conv2D(F1, (1, 1), strides=strides,
                   name=name_fn("res", "2a"))(input_tensor)
        x = BatchNormalization(name=name_fn("bn", "2a"))(x)
        x = PReLU()(x)

        x = Conv2D(F2, kernel_size, padding='same', name=name_fn("res",
                                                                 "2b"))(x)
        x = BatchNormalization(name=name_fn("bn", "2b"))(x)
        x = PReLU()(x)

        x = Conv2D(F3, (1, 1), name=name_fn("res", "2c"))(x)
        x = BatchNormalization(name=name_fn("bn", "2c"))(x)

        sc = Conv2D(F3, (1, 1), strides=strides,
                    name=name_fn("res", "1"))(input_tensor)
        sc = BatchNormalization(name=name_fn("bn", "1"))(sc)

        x = add([x, sc])
        x = PReLU()(x)

        return x

    net = ZeroPadding2D((3, 3))(input_tensor)
    net = Conv2D(64, (7, 7), strides=(2, 2), name="conv1")(net)
    net = BatchNormalization(name="bn_conv1")(net)
    net = PReLU()(net)
    net = MaxPooling2D((3, 3), strides=(2, 2))(net)

    net = conv_block(net, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    net = identity_block(net, 3, [64, 64, 256], stage=2, block='b')
    net = identity_block(net, 3, [64, 64, 256], stage=2, block='c')

    net = conv_block(net, 3, [128, 128, 512], stage=3, block='a')
    net = identity_block(net, 3, [128, 128, 512], stage=3, block='b')
    net = identity_block(net, 3, [128, 128, 512], stage=3, block='c')
    net = identity_block(net, 3, [128, 128, 512], stage=3, block='d')

    net = conv_block(net, 3, [256, 256, 1024], stage=4, block='a')
    net = identity_block(net, 3, [256, 256, 1024], stage=4, block='b')
    net = identity_block(net, 3, [256, 256, 1024], stage=4, block='c')
    net = identity_block(net, 3, [256, 256, 1024], stage=4, block='d')
    net = identity_block(net, 3, [256, 256, 1024], stage=4, block='e')
    net = identity_block(net, 3, [256, 256, 1024], stage=4, block='f')
    net = AveragePooling2D((2, 2))(net)

    net = Flatten()(net)
    net = Dense(10, activation="softmax", name="softmax")(net)

    return net
Exemple #9
0
    def conv_prelu(input, filters, kernel_size):
        with tf.name_scope("Conv2D_PRelu"):
            conv2d = Conv2D(filters, kernel_size, padding='same')(input)
            prelu = PReLU()(conv2d)

        return prelu
Exemple #10
0
def get_cnn_prelu(output_size, img_height, img_width, show=True):
    """"""
    img_channels = 1
    model_input = Input(shape=(img_height * img_width,), name='Main_input')
    x = Reshape((img_height, img_width, img_channels))(model_input)
    x = Conv2D(filters=6, kernel_size=(16, 12),
               # activation='relu',
               strides=(2, 2), padding='same',
               kernel_initializer="he_normal",
               # kernel_regularizer=l2(0.00001),
               data_format="channels_last",
               name='Conv2D_1')(x)
    x = PReLU(name='PReLU_1')(x)
    c1_output = Dropout(0.4, noise_shape=None, seed=None, name='Dropout_1')(x)
    # c1_output = MaxPooling2D(pool_size=(2, 2)) (x)
    x = BatchNormalization(name='BN_1')(c1_output)
    x = Conv2D(filters=3, kernel_size=(16, 12),
               # activation='relu',
               strides=(2, 2), padding='same',
               kernel_initializer="he_normal",
               # kernel_regularizer=l2(0.00001),
               data_format="channels_last",
               name='Conv2D_2')(x)
    x = PReLU(name='PReLU_2')(x)
    c2_output = Dropout(0.4, noise_shape=None, seed=None)(x)
    # c2_output = MaxPooling2D(pool_size=(2, 2)) (x)
    x = BatchNormalization(name='BN_2')(c2_output)
    x = Conv2D(filters=1, kernel_size=(8, 12),
               # activation='relu',
               strides=(2, 2), padding='same',
               kernel_initializer="he_normal",
               # kernel_regularizer=l2(0.00001),
               data_format="channels_last",
               name='Conv2D_3')(x)
    x = PReLU(name='PReLU_3')(x)
    c3_output = Dropout(0.4, noise_shape=None, seed=None)(x)
    # c3_output = MaxPooling2D(pool_size=(2, 2)) (x)
    # x = BatchNormalization()(c3_output)
    # x = Conv2D(filters=1, kernel_size=(2, 3),
    # activation='relu',
    # strides=(2, 3), padding='same',
    # kernel_initializer="he_normal",
    # kernel_regularizer=l2(0.00001),
    # data_format="channels_last") (x)
    # C4_output = Dropout(0.4, noise_shape=None, seed=None)(x)
    # C4_output = MaxPooling2D(pool_size=(2, 2)) (x)
    x = Flatten(name='Flatten')(c3_output)
    # x = BatchNormalization()(x)
    x = Dense(128, activation='selu', name='Dense_selu_1')(x)
    x = Dropout(0.4, noise_shape=None, seed=None)(x)
    x = BatchNormalization(name='BN_3')(x)
    x = Dense(512, activation='selu', name='Dense_tanh_1')(x)
    x = Dropout(0.4, noise_shape=None, seed=None)(x)
    # x = BatchNormalization()(x)
    x = Dense(512, activation='selu', name='Dense_selu_2')(x)
    x = Dropout(0.4, noise_shape=None, seed=None)(x)
    x = BatchNormalization(name='BN_4')(x)
    x = Dense(512, activation='selu', name='Dense_tanh_2')(x)
    x = Dropout(0.4, noise_shape=None, seed=None)(x)
    x = BatchNormalization(name='BN_5')(x)
    # x = Dense(512, activation='tanh')(x)
    # x = Dropout(0.4, noise_shape=None, seed=None)(x)
    # x = BatchNormalization()(x)
    cnn_output = Dense(output_size, activation='linear',
                       name='Output_Dense_linear')(x)
    cnn = Model(inputs=model_input, outputs=cnn_output, name='CNN_PReLU')

    if show:
        print('CNN_PReLU summary:')
        cnn.summary()
        print()

    return cnn