Пример #1
0
def conv2d_bn(x,
              layer=None,
              cv1_out=None,
              cv1_filter=(1, 1),
              cv1_strides=(1, 1),
              cv2_out=None,
              cv2_filter=(3, 3),
              cv2_strides=(1, 1),
              padding=None):
    num = '' if cv2_out == None else '1'
    tensor = Conv2D(cv1_out,
                    cv1_filter,
                    strides=cv1_strides,
                    data_format='channels_first',
                    name=layer + '_conv' + num)(x)
    tensor = BatchNormalization(axis=1,
                                epsilon=0.00001,
                                name=layer + '_bn' + num)(tensor)
    tensor = Activation('relu')(tensor)
    if padding == None:
        return tensor
    tensor = ZeroPadding2D(padding=padding,
                           data_format='channels_first')(tensor)
    if cv2_out == None:
        return tensor
    tensor = Conv2D(cv2_out,
                    cv2_filter,
                    strides=cv2_strides,
                    data_format='channels_first',
                    name=layer + '_conv' + '2')(tensor)
    tensor = BatchNormalization(axis=1,
                                epsilon=0.00001,
                                name=layer + '_bn' + '2')(tensor)
    tensor = Activation('relu')(tensor)
    return tensor
Пример #2
0
def discriminator_model_rgb(input_img_shape, classifier_model):
    img_input = keras.layers.Input(shape=(input_img_shape[0], input_img_shape[1], 3))
    x = Conv2D(16, (3, 3), strides=2)(img_input)
    x = LeakyReLU(0.2)(x)
    x = Dropout(0.5)(x)
    x = BatchNormalization()(x)
    x = Conv2D(32, (3, 3), strides=1)(x)
    x = LeakyReLU(0.2)(x)
    x = Dropout(0.5)(x)
    x = BatchNormalization()(x)
    x = Conv2D(64, (3, 3), strides=2)(x)
    x = LeakyReLU(0.2)(x)
    x = Dropout(0.5)(x)
    x = BatchNormalization()(x)
    x = Conv2D(128, (3, 3), strides=1)(x)
    x = LeakyReLU(0.2)(x)
    x = Dropout(0.5)(x)
    x = BatchNormalization()(x)
    x = Conv2D(256, (3, 3), strides=2)(x)
    x = LeakyReLU(0.2)(x)
    x = Dropout(0.5)(x)
    x = BatchNormalization()(x)
    x = Conv2D(512, (3, 3), strides=1)(x)
    x = LeakyReLU(0.2)(x)
    x = Dropout(0.5)(x)
    x = Flatten()(x)
    
    fake = Dense(1, activation='sigmoid')(x)
    classifier_model.trainable = False
    aux = classifier_model(inputs=[img_input])
    return Model(inputs=[img_input], outputs=[fake, aux])
Пример #3
0
def resnet(img_width, img_height, color_type=3):
    # create the base pre-trained model
    base_model = applications.resnet50.ResNet50(weights='imagenet', include_top=False, input_shape=(img_width, img_height, color_type))
    for layer in enumerate(base_model.layers):
        layer[1].trainable = False

    #flatten the results from conv block
    x = Flatten()(base_model.output)

    #add another fully connected layers with batch norm and dropout
    x = Dense(2048, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dropout(0.8)(x)

    #add another fully connected layers with batch norm and dropout
    x = Dense(4096, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dropout(0.8)(x)
     #add another fully connected layers with batch norm and dropout
    x = Dense(4096, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dropout(0.8)(x)


    #add logistic layer with all car classes
    predictions = Dense(10, activation='softmax', kernel_initializer='random_uniform', bias_initializer='random_uniform', bias_regularizer=regularizers.l2(0.01), name='predictions')(x)

    # this is the model we will train
    model = Model(inputs=base_model.input, outputs=predictions)

    return model
Пример #4
0
    def res_block(self, input_tensor, kernel_size, filters, stage, block):
        filters1, filters2, filters3 = filters
        if K.image_data_format() == 'channels_last':
            bn_axis = 3
        else:
            bn_axis = 1
        conv_name_base = 'res' + stage + block + '_branch'
        bn_name_base = 'bn' + stage + block + '_branch'

        x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
        x = Activation('relu')(x)

        x = Conv2D(filters2,
                   kernel_size,
                   padding='same',
                   name=conv_name_base + '2b')(x)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
        x = Activation('relu')(x)

        x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

        x = layers.add([x, input_tensor])
        x = Activation('relu')(x)
        return x
Пример #5
0
def initialize_model():

    one_filter_keras_model = Sequential()
    one_filter_keras_model.add(
        Conv2D(filters=40,
               kernel_size=(1, 11),
               padding="same",
               input_shape=(1, 1500, 5),
               kernel_constraint=NonNeg()))
    one_filter_keras_model.add(BatchNormalization(axis=-1))
    one_filter_keras_model.add(Activation('relu'))

    one_filter_keras_model.add(MaxPooling2D(pool_size=(1, 30)))

    one_filter_keras_model.add(Flatten())
    one_filter_keras_model.add(Dense(40))
    one_filter_keras_model.add(BatchNormalization(axis=-1))
    one_filter_keras_model.add(Activation('relu'))
    one_filter_keras_model.add(Dropout(0.5))

    one_filter_keras_model.add(Dense(1))
    one_filter_keras_model.add(Activation("sigmoid"))

    one_filter_keras_model.summary()
    one_filter_keras_model.compile(optimizer='adam',
                                   loss='binary_crossentropy',
                                   metrics=[precision, recall, specificity])

    return one_filter_keras_model
Пример #6
0
def conv_block(x, stage, branch, nb_filter, dropout_rate=None):
    conv_name_base = 'conv' + str(stage) + '_' + str(branch)
    relu_name_base = 'relu' + str(stage) + '_' + str(branch)

    # 1x1 Convolution (Bottleneck layer)
    inter_channel = nb_filter * 4
    x = BatchNormalization(name=conv_name_base + '_x1_bn')(x)
    x = Activation('relu', name=relu_name_base + '_x1')(x)
    x = Convolution2D(inter_channel,
                      1,
                      1,
                      name=conv_name_base + '_x1',
                      use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    # 3x3 Convolution
    x = BatchNormalization(name=conv_name_base + '_x2_bn')(x)
    x = Activation('relu', name=relu_name_base + '_x2')(x)
    x = ZeroPadding2D((1, 1), name=conv_name_base + '_x2_zeropadding')(x)
    x = Convolution2D(nb_filter,
                      3,
                      1,
                      name=conv_name_base + '_x2',
                      use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)
    return x
Пример #7
0
    def build(self):
        """
        Builds the full Keras model and stores it in self.model.
        """
        mc = self.config
        in_x = x = Input((12, 8, 8))

        # (batch, channels, height, width)
        x = Conv2D(filters=mc.cnn_filter_num,
                   kernel_size=mc.cnn_first_filter_size,
                   padding="same",
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="input_conv-" + str(mc.cnn_first_filter_size) + "-" +
                   str(mc.cnn_filter_num))(x)
        x = BatchNormalization(axis=1, name="input_batchnorm")(x)
        x = Activation("relu", name="input_relu")(x)

        for i in range(mc.res_layer_num):
            x = self._build_residual_block(x, i + 1)

        res_out = x

        # for policy output
        x = Conv2D(filters=2,
                   kernel_size=1,
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="policy_conv-1-2")(res_out)
        x = BatchNormalization(axis=1, name="policy_batchnorm")(x)
        x = Activation("relu", name="policy_relu")(x)
        x = Flatten(name="policy_flatten")(x)

        policy_out = Dense(self.config.n_labels,
                           kernel_regularizer=l2(mc.l2_reg),
                           activation="softmax",
                           name="policy_out")(x)

        # for value output
        x = Conv2D(filters=4,
                   kernel_size=1,
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name="value_conv-1-4")(res_out)
        x = BatchNormalization(axis=1, name="value_batchnorm")(x)
        x = Activation("relu", name="value_relu")(x)
        x = Flatten(name="value_flatten")(x)
        x = Dense(mc.value_fc_size,
                  kernel_regularizer=l2(mc.l2_reg),
                  activation="relu",
                  name="value_dense")(x)
        value_out = Dense(1,
                          kernel_regularizer=l2(mc.l2_reg),
                          activation="tanh",
                          name="value_out")(x)

        self.model = Model(in_x, [policy_out, value_out], name="chess_model")
Пример #8
0
    def _build_residual_block(self, x, index):
        # mc = self.config.model
        mc = self.config

        in_x = x
        res_name = "res" + str(index)
        x = Conv2D(filters=mc.cnn_filter_num,
                   kernel_size=mc.cnn_filter_size,
                   padding="same",
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name=res_name + "_conv1-" + str(mc.cnn_filter_size) + "-" +
                   str(mc.cnn_filter_num))(x)
        x = BatchNormalization(axis=1, name=res_name + "_batchnorm1")(x)
        x = Activation("relu", name=res_name + "_relu1")(x)
        x = Conv2D(filters=mc.cnn_filter_num,
                   kernel_size=mc.cnn_filter_size,
                   padding="same",
                   data_format="channels_first",
                   use_bias=False,
                   kernel_regularizer=l2(mc.l2_reg),
                   name=res_name + "_conv2-" + str(mc.cnn_filter_size) + "-" +
                   str(mc.cnn_filter_num))(x)
        x = BatchNormalization(axis=1,
                               name="res" + str(index) + "_batchnorm2")(x)
        x = Add(name=res_name + "_add")([in_x, x])
        x = Activation("relu", name=res_name + "_relu2")(x)
        return x
Пример #9
0
    def fit(self, X, y, X_val, y_val):
        ## scaler
        #        self.scaler = StandardScaler()
        #        X = self.scaler.fit_transform(X)

        #### build model
        self.model = Sequential()
        ## input layer
        self.model.add(Dropout(self.input_dropout, input_shape=(X.shape[1], )))
        ## hidden layers
        first = True
        hidden_layers = self.hidden_layers
        while hidden_layers > 0:
            self.model.add(Dense(self.hidden_units))
            if self.batch_norm == "before_act":
                self.model.add(BatchNormalization())
            if self.hidden_activation == "prelu":
                self.model.add(PReLU())
            elif self.hidden_activation == "elu":
                self.model.add(ELU())
            else:
                self.model.add(Activation(self.hidden_activation))
            if self.batch_norm == "after_act":
                self.model.add(BatchNormalization())
            self.model.add(Dropout(self.hidden_dropout))
            hidden_layers -= 1

        ## output layer
        output_dim = 1
        output_act = "linear"
        self.model.add(Dense(output_dim))
        self.model.add(Activation(output_act))

        ## loss
        if self.optimizer == "sgd":
            sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
            self.model.compile(loss="mse", optimizer=sgd)
        else:
            self.model.compile(loss="mse", optimizer=self.optimizer)

        logger.info(self.model.summary())

        ## callback
        early_stopping = EarlyStopping(monitor='val_loss',
                                       min_delta=1e-2,
                                       patience=10,
                                       verbose=0,
                                       mode='auto')

        cb_my = LossHistory()

        ## fit
        self.model.fit(X,
                       y,
                       epochs=self.epochs,
                       batch_size=self.batch_size,
                       validation_data=[X_val, y_val],
                       callbacks=[early_stopping, cb_my],
                       verbose=1)
        return self
def initialize_model():

    model = Sequential()
    model.add(
        Conv2D(40, 11, strides=1, padding='same', input_shape=(1, 1024, 4)))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation('relu'))

    model.add(Conv2D(40, 11, strides=1, padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation('relu'))

    model.add(MaxPooling2D(pool_size=(1, 64)))

    model.add(Flatten())

    model.add(Dense(units=500))

    model.add(Dense(units=640))

    model.add(Reshape((1, 16, 40)))

    model.add(Conv2DTranspose(40, 11, strides=(1, 64), padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation('relu'))

    model.add(Conv2DTranspose(40, 11, strides=(1, 1), padding='same'))
    model.add(BatchNormalization(axis=-1))
    model.add(Activation('relu'))

    model.add(Conv2D(4, 11, strides=1, padding='same', activation='sigmoid'))
    model.summary()
    model.compile(optimizer='adam', loss='mse')

    return model
Пример #11
0
def discriminator_model():
    """Build discriminator architecture."""
    n_layers, use_sigmoid = 3, False
    inputs = Input(shape=input_shape_discriminator)

    x = Conv2D(filters=ndf, kernel_size=(4, 4), strides=2, padding='same')(inputs)
    x = LeakyReLU(0.2)(x)

    nf_mult, nf_mult_prev = 1, 1
    for n in range(n_layers):
        nf_mult_prev, nf_mult = nf_mult, min(2**n, 8)
        x = Conv2D(filters=ndf*nf_mult, kernel_size=(4, 4), strides=2, padding='same')(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.2)(x)

    nf_mult_prev, nf_mult = nf_mult, min(2**n_layers, 8)
    x = Conv2D(filters=ndf*nf_mult, kernel_size=(4, 4), strides=1, padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x)

    x = Conv2D(filters=1, kernel_size=(4, 4), strides=1, padding='same')(x)
    if use_sigmoid:
        x = Activation('sigmoid')(x)

    x = Flatten()(x)
    x = Dense(1024, activation='tanh')(x)
    x = Dense(1, activation='sigmoid')(x)

    model = Model(inputs=inputs, outputs=x, name='Discriminator')
    return model
Пример #12
0
def build_model():
    input_img = Input(shape=(224, 224, 3), name='ImageInput')
    x = Conv2D(64, (3, 3), activation='relu', padding='same',
               name='Conv1_1')(input_img)
    x = Conv2D(64, (3, 3), activation='relu', padding='same',
               name='Conv1_2')(x)
    x = MaxPooling2D((2, 2), name='pool1')(x)

    x = SeparableConv2D(128, (3, 3),
                        activation='relu',
                        padding='same',
                        name='Conv2_1')(x)
    x = SeparableConv2D(128, (3, 3),
                        activation='relu',
                        padding='same',
                        name='Conv2_2')(x)
    x = MaxPooling2D((2, 2), name='pool2')(x)

    x = SeparableConv2D(256, (3, 3),
                        activation='relu',
                        padding='same',
                        name='Conv3_1')(x)
    x = BatchNormalization(name='bn1')(x)
    x = SeparableConv2D(256, (3, 3),
                        activation='relu',
                        padding='same',
                        name='Conv3_2')(x)
    x = BatchNormalization(name='bn2')(x)
    x = SeparableConv2D(256, (3, 3),
                        activation='relu',
                        padding='same',
                        name='Conv3_3')(x)
    x = MaxPooling2D((2, 2), name='pool3')(x)

    x = SeparableConv2D(512, (3, 3),
                        activation='relu',
                        padding='same',
                        name='Conv4_1')(x)
    x = BatchNormalization(name='bn3')(x)
    x = SeparableConv2D(512, (3, 3),
                        activation='relu',
                        padding='same',
                        name='Conv4_2')(x)
    x = BatchNormalization(name='bn4')(x)
    x = SeparableConv2D(512, (3, 3),
                        activation='relu',
                        padding='same',
                        name='Conv4_3')(x)
    x = MaxPooling2D((2, 2), name='pool4')(x)

    x = Flatten(name='flatten')(x)
    x = Dense(1024, activation='relu', name='fc1')(x)
    x = Dropout(0.7, name='dropout1')(x)
    x = Dense(512, activation='relu', name='fc2')(x)
    x = Dropout(0.5, name='dropout2')(x)
    x = Dense(2, activation='softmax', name='fc3')(x)

    model = Model(inputs=input_img, outputs=x)
    return model
 def __init__(self, latent_dim, original_dim):
     super(Decoder, self).__init__()
     # decoder sub layers
     self.hidden_layer1 = Dense(
         units=latent_dim, activation='relu', kernel_initializer='he_uniform')
     self.bn1 = BatchNormalization()
     self.leakyr1 = LeakyReLU()
     self.hidden_layer2 = Dense(units=32, activation='relu')
     self.bn2 = BatchNormalization()
     self.leakyr2 = LeakyReLU()
     self.output_layer = Dense(units=original_dim, activation='sigmoid')
    def __init__(self, input_dim):
        super(Encoder, self).__init__()
        self.output_dim = 16  # bottleneck size

        # encoder sub layers
        self.hidden_layer1 = Dense(
            units=64, activation='relu', kernel_initializer='he_uniform', input_dim=input_dim)
        self.bn1 = BatchNormalization()
        self.leakyr1 = LeakyReLU()
        self.hidden_layer2 = Dense(units=32, activation='relu')
        self.bn2 = BatchNormalization()
        self.leakyr2 = LeakyReLU()
        self.output_layer = Dense(units=self.output_dim, activation='sigmoid')
Пример #15
0
 def _create_model(self):
     inputs = Input((self.nBits_, ))
     x = Dense(units=self.mid_units,
               activation=self.activation, name='dense')(inputs)
     x = BatchNormalization(name='bn')(x)
     x = Activation(self.activation, name='activation')(x)
     x = Dropout(0.2, name='dropout')(x)
     for i in range(self.num_layer):
         x = Dense(units=self.mid_units, name=f'dense{i}')(x)
         x = BatchNormalization(name=f'bn{i}')(x)
         x = Activation(self.activation, name=f'activation{i}')(x)
         x = Dropout(0.2, name=f'dropout{i}')(x)
     outputs = Dense(units=1, activation='linear', name='output')(x)
     return Model(inputs=inputs, outputs=outputs)
Пример #16
0
def create_keras_model(inputShape, nClasses, output_activation='linear'):
    """
    SegNet model
    ----------
    inputShape : tuple
        Tuple with the dimensions of the input data (ny, nx, nBands). 
    nClasses : int
         Number of classes.
    """

    filter_size = 64
    kernel = (3, 3)
    pad = (1, 1)
    pool_size = (2, 2)

    inputs = Input(shape=inputShape, name='image')

    # Encoder
    x = Conv2D(64, kernel, padding='same')(inputs)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size=pool_size)(x)

    x = Conv2D(128, kernel, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size=pool_size)(x)

    x = Conv2D(256, kernel, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size=pool_size)(x)

    x = Conv2D(512, kernel, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # Decoder
    x = Conv2D(512, kernel, padding='same')(x)
    x = BatchNormalization()(x)
    x = UpSampling2D(size=pool_size)(x)

    x = Conv2D(256, kernel, padding='same')(x)
    x = BatchNormalization()(x)
    x = UpSampling2D(size=pool_size)(x)

    x = Conv2D(128, kernel, padding='same')(x)
    x = BatchNormalization()(x)
    x = UpSampling2D(size=pool_size)(x)

    x = Conv2D(64, kernel, padding='same')(x)
    x = BatchNormalization()(x)

    x = Conv2D(nClasses, (1, 1), padding='valid')(x)

    outputs = Activation(output_activation, name='output')(x)

    model = Model(inputs=inputs, outputs=outputs, name='segnet')

    return model
Пример #17
0
def residual(inputs, filter_size, kernel):
    x = Conv2D(filter_size,
               kernel,
               padding='same',
               kernel_initializer='he_normal')(inputs)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2D(filter_size,
               kernel,
               padding='same',
               kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = Add()([x, inputs])
    return x
Пример #18
0
    def buildModel(self, model_path=None):
        try:
            if model_path is None:
                model_path = './model_tensorboard_2.h5'
            mymodel = load_model(model_path)
            print('retrain model...........')
            history = mymodel.fit(self.x_train, self.y_train, batch_size=50, epochs=500, verbose=0, validation_split=0.2, callbacks=[TensorBoard('./logs2')])
            self.history = history.history
            mymodel.save('./model_tensorboard_2.h5')
            self.model = mymodel
            self._write_val_loss_to_csv()
        except:
            print('train new model.........')
            start = datetime.datetime.now()
            mymodel = Sequential()
            mymodel.add(CuDNNLSTM(50, input_shape=(20, 1), return_sequences=True))
            mymodel.add(Activation('sigmoid'))
            mymodel.add(BatchNormalization())
            mymodel.add(Dropout(0.2))

            mymodel.add(CuDNNLSTM(100, return_sequences=True))
            mymodel.add(Activation('sigmoid'))
            mymodel.add(BatchNormalization())
            mymodel.add(Dropout(0.2))

            mymodel.add(CuDNNLSTM(100))
            mymodel.add(Activation('tanh'))
            mymodel.add(BatchNormalization())
            mymodel.add(Dropout(0.2))

            mymodel.add(Dense(50, activation='sigmoid'))
            mymodel.add(BatchNormalization())
            mymodel.add(Dropout(0.2))

            mymodel.add(Dense(20, activation='sigmoid'))
            mymodel.add(BatchNormalization())
            mymodel.add(Dropout(0.2))

            mymodel.add(Dense(22, activation='relu'))

            mymodel.compile('adam', 'mae', metrics=['mae'])
            print(mymodel.summary())
            self.model = mymodel
            history = mymodel.fit(self.x_train, self.y_train, batch_size=50, epochs=3000, verbose=2, validation_split=0.2, callbacks=[TensorBoard()])
            self.history = history.history
            mymodel.save('./model_tensorboard_2.h5')
            end = datetime.datetime.now()
            print('耗时',end-start)
            self._write_val_loss_to_csv()
Пример #19
0
    def _res_func(x):
        identity = Cropping2D(cropping=((2, 2), (2, 2)))(x)

        a = Conv2D(nb_filter, (nb_row, nb_col),
                   strides=stride,
                   padding='valid')(x)
        a = BatchNormalization()(a)
        #a = LeakyReLU(0.2)(a)
        a = Activation("relu")(a)
        a = Conv2D(nb_filter, (nb_row, nb_col),
                   strides=stride,
                   padding='valid')(a)
        y = BatchNormalization()(a)

        return add([identity, y])
Пример #20
0
 def conv_func(x):
     x = Conv2D(nb_filter, (nb_row, nb_col), strides=stride,
                padding='same')(x)
     x = BatchNormalization()(x)
     #x = LeakyReLU(0.2)(x)
     x = Activation("relu")(x)
     return x
Пример #21
0
    def create_model(self):
        """ DEFINE NEURAL NETWORK """
        # define model as a linear stack of dense layers
        self.model = Sequential()

        # iteratively add hidden layers
        for layer_n in range(1, self.n_layers+1):
            print layer_n, "hidden layer\n",
            if layer_n == 1:  # input_shape needs to be specified for the first layer
                self.model.add(Dense(units=self.n_hidden[layer_n], input_shape=(self.n_features,),
                                     kernel_initializer=self.weights_init, bias_initializer=self.bias_init))
            else:
                self.model.add(Dense(units=self.n_hidden[layer_n], kernel_initializer=self.weights_init,
                                     bias_initializer=self.bias_init))

            if self.batch_norm:
                self.model.add(BatchNormalization())  # add batch normalization before activation

            # add the activation layer explicitly
            if self.activation == 'LeakyReLU':
                self.model.add(LeakyReLU(alpha=self.alpha))  # for x < 0, y = alpha*x -> non-zero slope in the negative region

            elif self.activation == 'ReLU':
                self.model.add(ReLU())

            elif self.activation == 'eLU':
                self.model.add(ELU())

        # add output layer; no activation for the output layer
        self.model.add(Dense(units=self.n_outputs, kernel_initializer=self.weights_init,
                             bias_initializer=self.bias_init))
Пример #22
0
    def create_posla_net(self, raw=120, column=320, channel=1):
        # model setting

        inputShape = (raw, column, channel)

        activation = 'relu'
        keep_prob_conv = 0.25
        keep_prob_dense = 0.5

        # init = 'glorot_normal'
        # init = 'he_normal'
        init = 'he_uniform'
        chanDim = -1
        classes = 3

        model = Sequential()

        # CONV => RELU => POOL
        model.add(
            Conv2D(3, (3, 3),
                   padding="valid",
                   input_shape=inputShape,
                   kernel_initializer=init,
                   activation=activation))
        model.add(BatchNormalization(axis=chanDim))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(
            Conv2D(9, (3, 3),
                   padding="valid",
                   kernel_initializer=init,
                   activation=activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(
            Conv2D(18, (3, 3),
                   padding="valid",
                   kernel_initializer=init,
                   activation=activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(
            Conv2D(32, (3, 3),
                   padding="valid",
                   kernel_initializer=init,
                   activation=activation))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten())

        model.add(Dense(80, kernel_initializer=init, activation=activation))
        model.add(Dropout(keep_prob_dense))

        model.add(Dense(15, kernel_initializer=init, activation=activation))
        model.add(Dropout(keep_prob_dense))

        # softmax classifier
        model.add(Dense(classes, activation='softmax'))

        self.model = model
Пример #23
0
    def build_model(self, f_sizes):
        """
        :param f_size: sparse feature nunique
        :return:
        """
        dim_input = len(f_sizes)  # +1

        input_x = [Input(shape=(1, ))
                   for i in range(dim_input)]  # 多列 sparse feature
        biases = [
            self.get_embed(x, size, 1) for (x, size) in zip(input_x, f_sizes)
        ]

        factors = [
            self.get_embed(x, size) for (x, size) in zip(input_x, f_sizes)
        ]

        s = Add()(factors)
        diffs = [Subtract()([s, x]) for x in factors]
        dots = [Dot(axes=1)([d, x]) for d, x in zip(diffs, factors)]

        x = Concatenate()(biases + dots)
        x = BatchNormalization()(x)
        output = Dense(1,
                       activation='relu',
                       kernel_regularizer=l2(self.kernel_l2))(x)
        model = Model(inputs=input_x, outputs=[output])
        model.compile(optimizer=Adam(clipnorm=0.5),
                      loss='mean_squared_error')  # TODO: radam

        output_f = factors + biases
        model_features = Model(inputs=input_x, outputs=output_f)
        return model, model_features
Пример #24
0
def generator(input_shape, upscale_times=2):

    gen_input = Input(shape=input_shape)

    model = Conv2D(filters=64, kernel_size=9, strides=1,
                   padding="same")(gen_input)
    model = PReLU(alpha_initializer='zeros',
                  alpha_regularizer=None,
                  alpha_constraint=None,
                  shared_axes=[1, 2])(model)

    gen_model = model

    # Using 16 Residual Blocks
    for index in range(8):
        model = res_block_gen(model, 3, 64, 1)

    model = Conv2D(filters=64, kernel_size=3, strides=1, padding="same")(model)
    model = BatchNormalization(momentum=0.5)(model)
    model = add([gen_model, model])

    # Using 2 UpSampling Blocks
    for index in range(upscale_times):
        model = up_sampling_block(model, 3, 256, 1)

    model = Conv2D(filters=3, kernel_size=9, strides=1, padding="same")(model)
    model = Activation('tanh')(model)

    generator_model = Model(inputs=gen_input, outputs=model)

    return generator_model
Пример #25
0
def transition_block(input,
                     nb_filter,
                     use_pool=True,
                     dropout_rate=None,
                     pooltype=1,
                     weight_decay=1e-4):
    x = BatchNormalization(axis=-1, epsilon=1.1e-5)(input)
    x = Activation('relu')(x)
    x = Conv2D(nb_filter, (1, 1),
               kernel_initializer='he_normal',
               padding='same',
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(x)

    if (dropout_rate):
        x = Dropout(dropout_rate)(x)
    if use_pool:
        if (pooltype == 2):
            x = AveragePooling2D((2, 2), strides=(2, 2))(x)
        elif (pooltype == 1):
            x = ZeroPadding2D(padding=(0, 1))(x)
            x = AveragePooling2D((2, 2), strides=(2, 1))(x)
        elif (pooltype == 3):
            x = AveragePooling2D((2, 2), strides=(2, 1))(x)
    return x, nb_filter
Пример #26
0
def _shortcut(input_feature, residual, conv_name_base=None, bn_name_base=None):
    """Adds a shortcut between input and residual block and merges them with "sum"
    """
    # Expand channels of shortcut to match residual.
    # Stride appropriately to match residual (width, height)
    # Should be int if network architecture is correctly configured.
    input_shape = K.int_shape(input_feature)
    residual_shape = K.int_shape(residual)
    stride_width = int(round(input_shape[ROW_AXIS] / residual_shape[ROW_AXIS]))
    stride_height = int(round(input_shape[COL_AXIS] /
                              residual_shape[COL_AXIS]))
    equal_channels = input_shape[CHANNEL_AXIS] == residual_shape[CHANNEL_AXIS]

    shortcut = input_feature
    # 1 X 1 conv if shape is different. Else identity.
    if stride_width > 1 or stride_height > 1 or not equal_channels:
        print('reshaping via a convolution...')
        if conv_name_base is not None:
            conv_name_base = conv_name_base + '1'
        shortcut = Conv2D(filters=residual_shape[CHANNEL_AXIS],
                          kernel_size=(1, 1),
                          strides=(stride_width, stride_height),
                          padding="valid",
                          kernel_initializer="he_normal",
                          kernel_regularizer=l2(0.0001),
                          name=conv_name_base)(input_feature)
        if bn_name_base is not None:
            bn_name_base = bn_name_base + '1'
        shortcut = BatchNormalization(axis=CHANNEL_AXIS,
                                      name=bn_name_base)(shortcut)

    return add([shortcut, residual])
Пример #27
0
def DarknetConv2D_BN_Leaky(*args, **kwargs):
    # batch normalization を使用する場合、畳み込み層の bias は不要である。
    no_bias_kwargs = {'use_bias': False}
    # num_blocks 個の residual block を作成する。
    no_bias_kwargs.update(kwargs)
    return compose(DarknetConv2D(*args, **no_bias_kwargs),
                   BatchNormalization(), LeakyReLU(alpha=0.1))
Пример #28
0
def inception_resnet_v2_C(input, scale_residual=True):
    channel_axis = -1

    # Input is relu activation
    init = input

    ir1 = Conv2D(192, (1, 1), activation='relu', padding='same')(input)

    ir2 = Conv2D(192, (1, 1), activation='relu', padding='same')(input)
    ir2 = Conv2D(224, (1, 3), activation='relu', padding='same')(ir2)
    ir2 = Conv2D(256, (3, 1), activation='relu', padding='same')(ir2)

    ir_merge = merge.concatenate([ir1, ir2], axis=channel_axis)

    ir_conv = Conv2D(backend.int_shape(input)[channel_axis], (1, 1),
                     activation='relu')(ir_merge)
    out = Lambda(lambda inputs, scale: inputs[0] + inputs[1] * scale,
                 output_shape=backend.int_shape(input)[1:],
                 arguments={'scale': 0.1})([input, ir_conv])

    # ir_conv = Conv2D(2144, (1, 1), activation='linear', padding='same')(ir_merge)
    # if scale_residual: ir_conv = Lambda(lambda x: x * 0.1)(ir_conv)
    # out = merge.concatenate([init, ir_conv], axis=channel_axis)

    out = BatchNormalization(axis=channel_axis)(out)
    out = Activation("relu")(out)
    return out
def AutoEncoder_MLP_enc_dec_output(MLPStructure, TrainParams, LayerArgList,
                                   img_rows):
    count = 0
    print("MLPStructure: " + str(MLPStructure))
    print("LayerArgList :" + str(LayerArgList))
    NLayers = len(LayerArgList)
    input_img = Input(shape=(img_rows, ))
    OutLayer = Dense(MLPStructure[0],
                     activation=LayerArgList[0]['Dense_activation'])(input_img)
    OutLayer = BatchNormalization()(OutLayer)
    OutLayer = Dropout(rate=float(LayerArgList[0]['Dropout_rate']))(OutLayer)
    if NLayers >= 3:
        for count in range(1, int(NLayers - 1)):
            #print("count: "+str(count))
            #print("LayerArgList[count]: "+str(LayerArgList[count]))
            #print("activation=LayerArgList[count]['Dense_activation']: "+str(LayerArgList[count]['Dense_activation']))
            OutLayer = Dense(
                MLPStructure[count],
                activation=LayerArgList[count]['Dense_activation'])(OutLayer)
            OutLayer = BatchNormalization()(OutLayer)
            OutLayer = Dropout(
                rate=float(LayerArgList[count]['Dropout_rate']))(OutLayer)

    if NLayers >= 2:
        OutLayer = Dense(
            MLPStructure[-1],
            activation=LayerArgList[-1]['Dense_activation'])(OutLayer)
        encoded = BatchNormalization(name="encoded_vals")(OutLayer)
        OutLayer = encoded
        OutLayer = Dropout(
            rate=float(LayerArgList[-1]['Dropout_rate']))(OutLayer)

    if NLayers >= 2:
        for count in range(int(NLayers) - 2, -1, -1):
            OutLayer = Dense(
                MLPStructure[count],
                activation=LayerArgList[count]['Dense_activation'])(OutLayer)
            OutLayer = BatchNormalization()(OutLayer)
            OutLayer = Dropout(
                rate=float(LayerArgList[count]['Dropout_rate']))(OutLayer)

    decoded = Dense(
        img_rows,
        activation=TrainParams['Autoeoncoder_lastlayer_activation'],
        name="decoded_vals")(OutLayer)
    autoencoder_model = Model(inputs=input_img, outputs=[encoded, decoded])
    return (autoencoder_model, input_img, encoded, decoded)
Пример #30
0
	def build(width, height, depth, classes):
		# initialize the model along with the input shape to be
		# "channels last" and the channels dimension itself
		model = Sequential()
		inputShape = (height, width, depth)
		chanDim = -1

		# if we are using "channels first", update the input shape
		# and channels dimension
		if K.image_data_format() == "channels_first":
			inputShape = (depth, height, width)
			chanDim = 1

		# first CONV => RELU => CONV => RELU => POOL layer set
		model.add(Conv2D(32, (3, 3), padding="same",
			input_shape=inputShape))
		model.add(Activation("relu"))
		model.add(BatchNormalization(axis=chanDim))
		model.add(Conv2D(32, (3, 3), padding="same"))
		model.add(Activation("relu"))
		model.add(BatchNormalization(axis=chanDim))
		model.add(MaxPooling2D(pool_size=(2, 2)))
		model.add(Dropout(0.25))

		# second CONV => RELU => CONV => RELU => POOL layer set
		model.add(Conv2D(64, (3, 3), padding="same"))
		model.add(Activation("relu"))
		model.add(BatchNormalization(axis=chanDim))
		model.add(Conv2D(64, (3, 3), padding="same"))
		model.add(Activation("relu"))
		model.add(BatchNormalization(axis=chanDim))
		model.add(MaxPooling2D(pool_size=(2, 2)))
		model.add(Dropout(0.25))

		# first (and only) set of FC => RELU layers
		model.add(Flatten())
		model.add(Dense(512))
		model.add(Activation("relu"))
		model.add(BatchNormalization())
		model.add(Dropout(0.5))

		# softmax classifier
		model.add(Dense(classes))
		model.add(Activation("softmax"))

		# return the constructed network architecture
		return model