def resunet_lstm(input_shape=(128, 128, 16), n_class=1, n_filter=64, one_hot=False, input_split=4):
    def resunet_encoder():
        img_input = layers.Input(shape=(input_shape[0], input_shape[1], input_shape[-1] // input_split))
        # 256 -> 128
        conv1 = layers.Conv2D(n_filter * 1, (3, 3), activation=None, padding="same",
                           kernel_initializer=KERNEL_INITIALIZER,
                           kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                           )(img_input)
        conv1 = residual_block(conv1, n_filter * 1)
        conv1 = residual_block(conv1, n_filter * 1, True)
        pool1 = layers.MaxPooling2D((2, 2))(conv1)
        pool1 = layers.Dropout(DROPOUT)(pool1)

        # 128 -> 64
        conv2 = layers.Conv2D(n_filter * 2, (3, 3), activation=None, padding="same",
                              kernel_initializer=KERNEL_INITIALIZER,
                              kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                              )(pool1)
        conv2 = residual_block(conv2, n_filter * 2)
        conv2 = residual_block(conv2, n_filter * 2, True)
        pool2 = layers.MaxPooling2D((2, 2))(conv2)
        pool2 = layers.Dropout(DROPOUT)(pool2)

        # 64 -> 32
        conv3 = layers.Conv2D(n_filter * 4, (3, 3), activation=None, padding="same",
                              kernel_initializer=KERNEL_INITIALIZER,
                              kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                              )(pool2)
        conv3 = residual_block(conv3, n_filter * 4)
        conv3 = residual_block(conv3, n_filter * 4, True)
        pool3 = layers.MaxPooling2D((2, 2))(conv3)
        pool3 = layers.Dropout(DROPOUT)(pool3)

        # 32 -> 16
        conv4 = layers.Conv2D(n_filter * 8, (3, 3), activation=None, padding="same",
                              kernel_initializer=KERNEL_INITIALIZER,
                              kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                              )(pool3)
        conv4 = residual_block(conv4, n_filter * 8)
        conv4 = residual_block(conv4, n_filter * 8, True)
        pool4 = layers.MaxPooling2D((2, 2))(conv4)
        pool4 = layers.Dropout(DROPOUT)(pool4)

        # Middle
        convm = layers.Conv2D(n_filter * 16, (3, 3), activation=None, padding="same",
                              kernel_initializer=KERNEL_INITIALIZER,
                              kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                              )(pool4)
        convm = residual_block(convm, n_filter * 16)
        convm = residual_block(convm, n_filter * 16, True)
        # convm = convolutional_block_attention_module(convm, hidden_size=n_filter, conv_size=7)

        return models.Model(img_input, [conv1, conv2, conv3, conv4, convm])

    assert input_shape[-1] % input_split == 0
    img_input = layers.Input(shape=input_shape)
    img_inputs = []
    channels_per_split = input_shape[-1] // input_split
    for i in range(input_split):
        _x = layers.Lambda(lambda xx: xx[:, :, :, i * channels_per_split: (i + 1) * channels_per_split])(img_input)
        img_inputs.append(_x)

    feature_extractor = resunet_encoder()
    F0s = []
    F1s = []
    F2s = []
    F3s = []
    F4s = []
    for i in range(input_split):
        F01234 = feature_extractor(img_inputs[i])
        F0s.append(F01234[0])
        F1s.append(F01234[1])
        F2s.append(F01234[2])
        F3s.append(F01234[3])
        F4s.append(F01234[4])

    F0s = layers.Lambda(lambda xx: K.stack(xx, axis=1))(F0s)
    F1s = layers.Lambda(lambda xx: K.stack(xx, axis=1))(F1s)
    F2s = layers.Lambda(lambda xx: K.stack(xx, axis=1))(F2s)
    F3s = layers.Lambda(lambda xx: K.stack(xx, axis=1))(F3s)
    F4s = layers.Lambda(lambda xx: K.stack(xx, axis=1))(F4s)

    F0 = layers.ConvLSTM2D(filters=n_filter, kernel_size=3, padding='same', activation='relu',
                           return_sequences=False)(F0s)
    F1 = layers.ConvLSTM2D(filters=n_filter, kernel_size=3, padding='same', activation='relu',
                           return_sequences=False)(F1s)
    F2 = layers.ConvLSTM2D(filters=n_filter, kernel_size=3, padding='same', activation='relu',
                           return_sequences=False)(F2s)
    F3 = layers.ConvLSTM2D(filters=n_filter, kernel_size=3, padding='same', activation='relu',
                           return_sequences=False)(F3s)
    F4 = layers.ConvLSTM2D(filters=n_filter, kernel_size=3, padding='same', activation='relu',
                           return_sequences=False)(F4s)

    # 16 -> 32
    deconv4 = layers.UpSampling2D((2, 2))(F4)
    uconv4 = layers.concatenate([deconv4, F3])
    uconv4 = layers.Dropout(DROPOUT)(uconv4)

    uconv4 = layers.Conv2D(n_filter * 8, (3, 3), activation=None, padding="same",
                           kernel_initializer=KERNEL_INITIALIZER,
                           kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(uconv4)
    uconv4 = residual_block(uconv4, n_filter * 8)
    uconv4 = residual_block(uconv4, n_filter * 8, True)

    # 32 -> 64
    deconv3 = layers.UpSampling2D((2, 2))(uconv4)
    uconv3 = layers.concatenate([deconv3, F2])
    uconv3 = layers.Dropout(DROPOUT)(uconv3)

    uconv3 = layers.Conv2D(n_filter * 4, (3, 3), activation=None, padding="same",
                           kernel_initializer=KERNEL_INITIALIZER,
                           kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                           )(uconv3)
    uconv3 = residual_block(uconv3, n_filter * 4)
    uconv3 = residual_block(uconv3, n_filter * 4, True)

    # 64 -> 128
    deconv2 = layers.UpSampling2D((2, 2))(uconv3)
    uconv2 = layers.concatenate([deconv2, F1])
    uconv2 = layers.Dropout(DROPOUT)(uconv2)

    uconv2 = layers.Conv2D(n_filter * 2, (3, 3), activation=None, padding="same",
                           kernel_initializer=KERNEL_INITIALIZER,
                           kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(uconv2)
    uconv2 = residual_block(uconv2, n_filter * 2)
    uconv2 = residual_block(uconv2, n_filter * 2, True)

    # 128 -> 256
    deconv1 = layers.UpSampling2D((2, 2))(uconv2)
    uconv1 = layers.concatenate([deconv1, F0])
    uconv1 = layers.Dropout(DROPOUT)(uconv1)

    uconv1 = layers.Conv2D(n_filter * 1, (3, 3), activation=None, padding="same",
                           kernel_initializer=KERNEL_INITIALIZER,
                           kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(uconv1)
    uconv1 = residual_block(uconv1, n_filter * 1)
    uconv1 = residual_block(uconv1, n_filter * 1, True)

    x = layers.Conv2D(n_class, (1, 1), padding="same", activation=None)(uconv1)
    if n_class == 1 or (n_class == 2 and one_hot is False):
        x = layers.Activation('sigmoid')(x)
    else:
        x = layers.Activation('softmax')(x)
    return models.Model(img_input, x)
示例#2
0
    modelA.add(layers.Flatten())
    modelA.add(layers.Dense(batch))
    modelA.add(layers.Reshape((1, batch)))
    return modelA

modelu = modelCreator()


modelu.summary()
X1 = layers.Input((img_width,img_height,1))
X2 = layers.Input((img_width,img_height,1))
my1 = modelu(X1)
my2 = modelu(X2)
myo = layers.Subtract()([my1, my2])
m = layers.Dense(1, activation = "sigmoid")(myo)
model = models.Model(inputs = [X1, X2], outputs = m)


model.compile(loss = "binary_crossentropy", optimizer = 'adam', metrics = ['accuracy'])
model.summary()

path_to_dataset = os.getcwd() + "\\dataset"
#For training
anchor_main = []
real_forged_main = []
y_list = []
for x in range(1, 5):
    if(x != 2):
        forge = []
        forge.extend(os.listdir(path_to_dataset+str(x)+ "\\forge"))
        forge = np.array(forge)
示例#3
0
def Cresnet(image_size, num_labels):
    num_channels = 1
    inputs = Input(shape=(image_size, image_size, image_size, num_channels))
    m = Convolution3D(32, 3, 3, 3, activation='relu',
                      border_mode='same')(inputs)
    m = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2))(m)

    shortcut = Convolution3D(16,
                             3,
                             3,
                             3,
                             activation='relu',
                             border_mode='same')(m)

    #Bottleneck
    mNew = Convolution3D(16, 1, 1, 1, activation='relu', border_mode='same')(m)
    mNew = Convolution3D(32, 3, 3, 3, activation='relu',
                         border_mode='same')(mNew)
    mNew = Convolution3D(16, 1, 1, 1, border_mode='same')(mNew)
    mNew = Dropout(0.7)(mNew)
    mMerge = merge([shortcut, mNew], mode='concat', concat_axis=-1)

    m = Activation('relu')(mMerge)

    shortcut2 = Convolution3D(16,
                              3,
                              3,
                              3,
                              activation='relu',
                              border_mode='same')(m)

    #Bottleneck
    mNew2 = Convolution3D(16, 1, 1, 1, activation='relu',
                          border_mode='same')(m)
    mNew2 = Convolution3D(32, 3, 3, 3, activation='relu',
                          border_mode='same')(mNew2)
    mNew2 = Convolution3D(16, 1, 1, 1, border_mode='same')(mNew2)
    mNew2 = Dropout(0.7)(mNew2)
    mMerge2 = merge([shortcut2, mNew2], mode='concat', concat_axis=-1)
    m = Activation('relu')(mMerge2)

    shortcut3 = Convolution3D(16,
                              3,
                              3,
                              3,
                              activation='relu',
                              border_mode='same')(m)

    #Bottleneck
    mNew3 = Convolution3D(16, 1, 1, 1, activation='relu',
                          border_mode='same')(m)
    mNew3 = Convolution3D(32, 3, 3, 3, activation='relu',
                          border_mode='same')(mNew3)
    mNew3 = Convolution3D(16, 1, 1, 1, border_mode='same')(mNew3)
    mNew3 = Dropout(0.7)(mNew3)
    mMerge3 = merge([shortcut3, mNew3], mode='concat', concat_axis=-1)

    m = Activation('relu')(mMerge3)

    shortcut4 = Convolution3D(16,
                              3,
                              3,
                              3,
                              activation='relu',
                              border_mode='same')(m)

    #Bottleneck
    mNew4 = Convolution3D(16, 1, 1, 1, activation='relu',
                          border_mode='same')(m)
    mNew4 = Convolution3D(32, 3, 3, 3, activation='relu',
                          border_mode='same')(mNew4)
    mNew4 = Convolution3D(16, 1, 1, 1, border_mode='same')(mNew4)
    mNew4 = Dropout(0.7)(mNew4)
    mMerge4 = merge([shortcut4, mNew4], mode='concat', concat_axis=-1)
    m = Activation('relu')(mMerge4)

    m = Flatten(name='flatten')(m)
    m = Dense(1024, activation='relu', name='fc1')(m)
    m = Dropout(0.5)(m)

    m = Dense(1024, activation='relu', name='fc2')(m)
    m = Dropout(0.5)(m)
    m = Dense(num_labels, activation='softmax')(m)

    mod = KM.Model(input=inputs, output=m)

    return mod
示例#4
0
 def create_model(self, input_shape):
     """Return keras model of the FC-Densenet"""
     inputs = layers.Input(shape=input_shape)
     outputs = self._model(inputs)
     model = models.Model(inputs, outputs)
     return model
示例#5
0
    def build_model(self):
        """Build a critic (value) network that maps (state, action) pairs -> Q-values."""
        # Define input layers
        states = layers.Input(shape=(self.state_size, ), name='states')
        actions = layers.Input(shape=(self.action_size, ), name='actions')

        # Add hidden layer(s) for state pathway
        net_states = layers.Dense(units=32, activation='relu')(states)
        net_states = layers.BatchNormalization()(net_states)
        net_states = layers.Dropout(0.5)(net_states)

        net_states = layers.Dense(units=64, activation='relu')(net_states)
        net_states = layers.BatchNormalization()(net_states)
        net_states = layers.Dropout(0.5)(net_states)

        net_states = layers.Dense(units=128, activation='relu')(net_states)
        net_states = layers.BatchNormalization()(net_states)
        net_states = layers.Dropout(0.5)(net_states)

        net_states = layers.Dense(units=32, activation='relu')(net_states)
        net_states = layers.BatchNormalization()(net_states)
        net_states = layers.Dropout(0.5)(net_states)

        # Add hidden layer(s) for action pathway
        net_actions = layers.Dense(units=32, activation='relu')(actions)
        net_actions = layers.BatchNormalization()(net_actions)
        net_actions = layers.Dropout(0.5)(net_actions)

        net_actions = layers.Dense(units=64, activation='relu')(net_actions)
        net_actions = layers.BatchNormalization()(net_actions)
        net_actions = layers.Dropout(0.5)(net_actions)

        net_actions = layers.Dense(units=128, activation='relu')(net_actions)
        net_actions = layers.BatchNormalization()(net_actions)
        net_actions = layers.Dropout(0.7)(net_actions)

        net_actions = layers.Dense(units=32, activation='relu')(net_actions)
        net_actions = layers.BatchNormalization()(net_actions)
        net_actions = layers.Dropout(0.5)(net_actions)

        # Try different layer sizes, activations, add batch normalization, regularizers, etc.

        # Combine state and action pathways
        net = layers.Add()([net_states, net_actions])
        net = layers.Activation('relu')(net)

        # Add more layers to the combined network if needed

        # Add final output layer to produce action values (Q values)
        Q_values = layers.Dense(units=1, name='q_values')(net)

        # Create Keras model
        self.model = models.Model(inputs=[states, actions], outputs=Q_values)

        # Define optimizer and compile model for training with built-in loss function
        optimizer = optimizers.Adam()
        self.model.compile(optimizer=optimizer, loss='mse')

        # Compute action gradients (derivative of Q values w.r.t. to actions)
        action_gradients = K.gradients(Q_values, actions)

        # Define an additional function to fetch action gradients (to be used by actor model)
        self.get_action_gradients = K.function(
            inputs=[*self.model.input, K.learning_phase()],
            outputs=action_gradients)
示例#6
0
def CapsNet(input_shape, n_class, routings):
    """
    A Capsule Network on MNIST.
    :param input_shape: data shape, 3d, [width, height, channels]
    :param n_class: number of classes
    :param routings: number of routing iterations
    :return: Two Keras Models, the first one used for training, and the second one for evaluation.
            `eval_model` can also be used for training.
    """
    x = layers.Input(shape=input_shape)

    # Layer 1: Conv2D layer
    conv1 = layers.Conv2D(filters=64,
                          kernel_size=5,
                          strides=1,
                          padding='valid',
                          activation='relu',
                          name='conv1')(x)
    dp1 = layers.Dropout(0.1)(conv1)

    # Layer 2: Primary Capsule layer
    primarycaps = PrimaryCap(dp1,
                             dim_capsule=8,
                             n_channels=32,
                             kernel_size=5,
                             strides=2,
                             padding='valid')

    # Layer 3: Capsule layer
    digitcaps = CapsuleLayer(num_capsule=n_class,
                             dim_capsule=16,
                             routings=routings,
                             name='digitcaps')(primarycaps)

    # Layer 4: Conversion layer
    # outputs = layers.Reshape(target_shape=[-1, 64], name='primarycap_reshape')(conv5)
    out_caps = Length(name='capsnet')(digitcaps)

    # out = layers.Dense(2, activation='sigmoid')(out_caps)

    # Reconstruct the initial input from the encoded capsule representation
    y = layers.Input(shape=(n_class, ))
    masked_by_y = Mask()([digitcaps, y])  # select one capsule by using GT
    masked = Mask()(digitcaps)

    # Shared Decoder model in training and prediction
    decoder = models.Sequential(name='decoder')
    decoder.add(layers.Dense(512, activation='relu', input_dim=16 * n_class))

    decoder.add(layers.Dense(1024, activation='relu'))

    decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))
    decoder.add(layers.Reshape(target_shape=input_shape, name='out_recon'))

    train_model = models.Model([x, y], [out_caps, decoder(masked_by_y)])
    eval_model = models.Model(x, [out_caps, decoder(masked)])

    # capsule representation noise model
    noise = layers.Input(shape=(n_class, 16))
    noised_digitcaps = layers.Add()([digitcaps, noise])
    masked_noised_y = Mask()([noised_digitcaps, y])
    manipulate_model = models.Model([x, y, noise], decoder(masked_noised_y))
    return train_model, eval_model, manipulate_model
示例#7
0
img_tensor /= 255

import matplotlib.pyplot as plt

plt.imshow(img_tensor[0])
plt.show()
"""
Part 3. 构建每一层输入输出对应关系
"""

from keras import models

# 提取前8层的输出
layer_outputs = [layer.output for layer in model.layers[:8]]
# 创建模型,给定模型输入,可以返回输出:
activation_model = models.Model(inputs=model.input, outputs=layer_outputs)
# 返回8个Numpy数组组成的列表,每层激活对应一个Numpy数组
activations = activation_model.predict(img_tensor)

# 绘制第一层激活的通道输出,第一层是conv2d_5(Conv2D)(None, 148, 148, 32),有32个输出通道
first_layer_activation = activations[0]
print(first_layer_activation.shape)
plt.matshow(first_layer_activation[0, :, :, 0], cmap='viridis')
plt.show()
"""
Part 4. 将所有通道可视化
"""

import keras

# 存储每一层的名字
示例#8
0
 def Encoder(self):
     return models.Model(self.x, self.z)
示例#9
0
 def Decoder(self):
     z_shape = (self.z_dim, )
     z = layers.Input(shape=z_shape)
     y_layer = self.layers[-1]
     y = y_layer(z)
     return models.Model(z, y)
示例#10
0
    def init_model(self, train, std=0.01):

        #current_item = kl.Input( ( 1, ), name="current_item" )

        item = kl.Input((1, ),
                        dtype=self.intX)  #, batch_shape=(self.,self.steps) )
        user = kl.Input((1, ),
                        dtype=self.intX)  #, batch_shape=(self.batch,1) )

        if self.include_artist:
            artist = kl.Input((1, ),
                              dtype=self.intX)  #, batch_shape=(self.batch,1) )

        trainable = True
        if self.embeddings == 'fixed':
            trainable = False

        emb_user = Embedding(embeddings_initializer='random_normal',
                             output_dim=self.factors,
                             input_dim=self.num_users,
                             embeddings_regularizer=l2(self.emb_reg),
                             trainable=trainable)
        emb_item = Embedding(embeddings_initializer='random_normal',
                             output_dim=self.factors,
                             input_dim=self.num_items,
                             embeddings_regularizer=l2(self.emb_reg),
                             trainable=trainable)

        if self.embeddings != None:
            userw = self.get_latent(self.usermap.index,
                                    size=self.factors,
                                    col='playlist_id')
            itemw = self.get_latent(self.itemmap.index, size=self.factors)
            emb_user.build((None, ))
            emb_item.build((None, ))
            emb_user.set_weights([userw])
            emb_item.set_weights([itemw])

        if self.include_artist:
            emb_artist = Embedding(output_dim=self.factors,
                                   input_dim=self.num_artists,
                                   embeddings_regularizer=l2(self.emb_reg))

        #MLP PART

        uemb = kl.Flatten()(emb_user(user))
        iemb = kl.Flatten()(emb_item(item))

        mlp_vector = kl.Concatenate()([uemb, iemb])
        if self.include_artist:
            emba = kl.Flatten()(emb_artist(artist))
            mlp_vector = kl.Concatenate()([mlp_vector, emba])

        for i in range(len(self.layers)):
            layer = kl.Dense(self.layers[i],
                             activation=self.hidden_act,
                             name="layer%d" % i,
                             kernel_regularizer=l2(self.layer_reg))
            #bn = kl.BatchNormalization()
            #act = kl.Activation('relu')
            #mlp_vector = act( bn( layer(mlp_vector) ) )
            mlp_vector = layer(mlp_vector)

        #PRED PART

        fff = kl.Dense(1,
                       activation=self.final_act,
                       kernel_initializer='lecun_uniform',
                       kernel_regularizer=l2(self.final_reg))
        res = fff(mlp_vector)

        inputs = [user, item]  #+ [artist]
        if self.include_artist:
            inputs += [artist]
        outputs = [res]

        model = km.Model(inputs, outputs)

        if self.optimizer == 'adam':
            opt = keras.optimizers.Adam(lr=self.learning_rate)
        elif self.optimizer == 'nadam':
            opt = keras.optimizers.Nadam(lr=self.learning_rate)
        elif self.optimizer == 'adamax':
            opt = keras.optimizers.Adamax(lr=self.learning_rate)
        elif self.optimizer == 'adagrad':
            opt = keras.optimizers.Adagrad(lr=self.learning_rate)
        elif self.optimizer == 'adadelta':
            opt = keras.optimizers.Adadelta(lr=self.learning_rate)

        model.compile(optimizer=opt, loss='binary_crossentropy')
        plot_model(model, to_file='mlp.png')

        return model
示例#11
0
def Conv2DClassifierIn1(x_train, y_train, x_test, y_test):
    summary = True
    verbose = 1

    # setHyperParams------------------------------------------------------------------------------------------------
    batch_size = {{choice([32, 64, 128, 256])}}
    epoch = {{choice([25, 50, 75, 100, 125, 150, 175, 200])}}

    conv_block = {{choice(['two', 'three', 'four'])}}

    conv1_num = {{choice([8, 16, 32, 64])}}
    conv2_num = {{choice([16, 32, 64, 128])}}
    conv3_num = {{choice([32, 64, 128])}}
    conv4_num = {{choice([32, 64, 128, 256])}}

    dense1_num = {{choice([128, 256, 512])}}
    dense2_num = {{choice([64, 128, 256])}}

    l1_regular_rate = {{uniform(0.00001, 1)}}
    l2_regular_rate = {{uniform(0.000001, 1)}}
    drop1_num = {{uniform(0.1, 1)}}
    drop2_num = {{uniform(0.0001, 1)}}

    activator = {{choice(['elu', 'relu', 'tanh'])}}
    optimizer = {{choice(['adam', 'rmsprop', 'SGD'])}}

    #---------------------------------------------------------------------------------------------------------------
    kernel_size = (3, 3)
    pool_size = (2, 2)
    initializer = 'random_uniform'
    padding_style = 'same'
    loss_type = 'binary_crossentropy'
    metrics = ['accuracy']
    my_callback = None
    # early_stopping = EarlyStopping(monitor='val_loss', patience=4)
    # checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5',
    #                                verbose=1,
    #                                save_best_only=True)
    # my_callback = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2,
    #                                           patience=5, min_lr=0.0001)

    # build --------------------------------------------------------------------------------------------------------
    input_layer = Input(shape=x_train.shape[1:])
    conv = layers.Conv2D(conv1_num,
                         kernel_size,
                         padding=padding_style,
                         kernel_initializer=initializer,
                         activation=activator)(input_layer)
    conv = layers.Conv2D(conv1_num,
                         kernel_size,
                         padding=padding_style,
                         kernel_initializer=initializer,
                         activation=activator)(conv)
    pool = layers.MaxPooling2D(pool_size, padding=padding_style)(conv)
    if conv_block == 'two':
        conv = layers.Conv2D(conv2_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(pool)
        conv = layers.Conv2D(conv2_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(conv)
        BatchNorm = layers.BatchNormalization(axis=-1)(conv)
        pool = layers.MaxPooling2D(pool_size, padding=padding_style)(BatchNorm)
    elif conv_block == 'three':
        conv = layers.Conv2D(conv2_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(pool)
        conv = layers.Conv2D(conv2_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(conv)
        BatchNorm = layers.BatchNormalization(axis=-1)(conv)
        pool = layers.MaxPooling2D(pool_size, padding=padding_style)(BatchNorm)

        conv = layers.Conv2D(conv3_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(pool)
        conv = layers.Conv2D(conv3_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(conv)
        BatchNorm = layers.BatchNormalization(axis=-1)(conv)
        pool = layers.MaxPooling2D(pool_size, padding=padding_style)(BatchNorm)
    elif conv_block == 'four':
        conv = layers.Conv2D(conv2_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(pool)
        conv = layers.Conv2D(conv2_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(conv)
        BatchNorm = layers.BatchNormalization(axis=-1)(conv)
        pool = layers.MaxPooling2D(pool_size, padding=padding_style)(BatchNorm)

        conv = layers.Conv2D(conv3_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(pool)
        conv = layers.Conv2D(conv3_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(conv)
        BatchNorm = layers.BatchNormalization(axis=-1)(conv)
        pool = layers.MaxPooling2D(pool_size, padding=padding_style)(BatchNorm)

        conv = layers.Conv2D(conv4_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(pool)
        conv = layers.Conv2D(conv4_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(conv)
        BatchNorm = layers.BatchNormalization(axis=-1)(conv)
        pool = layers.MaxPooling2D(pool_size, padding=padding_style)(BatchNorm)

    flat = layers.Flatten()(pool)
    drop = layers.Dropout(drop1_num)(flat)

    dense = layers.Dense(dense1_num,
                         activation=activator,
                         kernel_regularizer=regularizers.l1_l2(
                             l1=l1_regular_rate, l2=l2_regular_rate))(drop)
    BatchNorm = layers.BatchNormalization(axis=-1)(dense)
    drop = layers.Dropout(drop2_num)(BatchNorm)

    dense = layers.Dense(dense2_num,
                         activation=activator,
                         kernel_regularizer=regularizers.l1_l2(
                             l1=l1_regular_rate, l2=l2_regular_rate))(drop)

    output_layer = layers.Dense(len(np.unique(y_train)),
                                activation='softmax')(dense)

    model = models.Model(inputs=input_layer, outputs=output_layer)

    if summary:
        model.summary()

# train(self):
    class_weights = class_weight.compute_class_weight('balanced',
                                                      np.unique(y_train),
                                                      y_train.reshape(-1))
    class_weights_dict = dict(enumerate(class_weights))
    model.compile(
        optimizer=optimizer,
        loss=loss_type,
        metrics=metrics  # accuracy
    )

    result = model.fit(x=x_train,
                       y=y_train,
                       batch_size=batch_size,
                       epochs=epoch,
                       verbose=verbose,
                       callbacks=my_callback,
                       validation_data=(x_test, y_test),
                       shuffle=True,
                       class_weight=class_weights_dict)

    validation_acc = np.amax(result.history['val_acc'])
    print('Best validation acc of epoch:', validation_acc)
    return {'loss': -validation_acc, 'status': STATUS_OK, 'model': model}
示例#12
0
    def build_default_model(self):
        ''' default Aachen-DNN model as used in the analysis '''
        K.set_learning_phase(True)

        number_of_input_neurons = self.data.n_input_neurons

        number_of_neurons_per_layer = self.architecture["prenet_layer"]
        dropout = self.architecture["Dropout"]
        batchNorm = self.architecture["batchNorm"]
        activation_function = self.architecture["activation_function"]
        l2_regularization_beta = self.architecture["L2_Norm"]

        # build pre net ===========================================================================
        Inputs = keras.layers.Input(shape=(self.data.n_input_neurons, ),
                                    name="input")

        X = Inputs
        self.layer_list = [X]

        # loop over dense layers
        for i, nNeurons in enumerate(number_of_neurons_per_layer):
            Dense = keras.layers.Dense(
                nNeurons,
                activation=activation_function,
                kernel_regularizer=keras.regularizers.l2(
                    l2_regularization_beta),
                name="Dense_" + str(i))(X)
            self.layer_list.append(Dense)

            if dropout != 1:
                X = keras.layers.Dropout(dropout)(Dense)
            else:
                X = Dense

            if batchNorm:
                X = keras.layers.BatchNormalization()(Dense)
            else:
                X = Dense

        # generate output layer
        X = keras.layers.Dense(self.data.n_prenet_output_neurons,
                               activation="sigmoid",
                               kernel_regularizer=keras.regularizers.l2(
                                   l2_regularization_beta))(X)
        self.layer_list.append(X)

        # define model
        pre_net = models.Model(inputs=[Inputs], outputs=[X])
        pre_net.summary()

        # Make Parameters of first model untrainable
        for layer in pre_net.layers:
            layer.trainable = False

        # build main net ==========================================================================
        number_of_neurons_per_layer = self.architecture["prenet_layer"]

        # Create Input/conc layer for second NN
        conc_layer = keras.layers.concatenate(self.layer_list, axis=-1)
        Y = conc_layer

        # loop over dense layers
        for i, nNeurons in enumerate(number_of_neurons_per_layer):
            Y = keras.layers.Dense(nNeurons,
                                   activation=activation_function,
                                   kernel_regularizer=keras.regularizers.l2(
                                       l2_regularization_beta),
                                   name="Dense_main_" + str(i))(Y)

            if dropout != 1:
                Y = keras.layers.Dropout(dropout)(Y)

            if batchNorm:
                Y = keras.layers.BatchNormalization()(Y)

        # generate output layer
        Y = keras.layers.Dense(
            self.data.n_output_neurons,
            activation="softmax",
            kernel_regularizer=keras.regularizers.l2(l2_regularization_beta),
            name="output")(Y)

        # define model
        main_net = models.Model(inputs=[Inputs], outputs=[Y])
        main_net.summary()

        return pre_net, main_net
def unet_mini(input_shape, n_class, n_filter=32, one_hot=False):
    img_input = layers.Input(shape=input_shape)
    conv1 = layers.Conv2D(n_filter, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(img_input)
    conv1 = layers.Dropout(DROPOUT)(conv1)
    conv1 = layers.Conv2D(n_filter, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(conv1)
    pool1 = layers.MaxPooling2D((2, 2))(conv1)

    conv2 = layers.Conv2D(n_filter * 2, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(pool1)
    conv2 = layers.Dropout(DROPOUT)(conv2)
    conv2 = layers.Conv2D(n_filter * 2, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(conv2)
    pool2 = layers.MaxPooling2D((2, 2))(conv2)

    conv3 = layers.Conv2D(n_filter * 4, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(pool2)
    conv3 = layers.Dropout(DROPOUT)(conv3)
    conv3 = layers.Conv2D(n_filter * 4, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(conv3)

    up1 = layers.concatenate([layers.UpSampling2D((2, 2))(conv3), conv2], axis=-1)
    conv4 = layers.Conv2D(n_filter * 2, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(up1)
    conv4 = layers.Dropout(DROPOUT)(conv4)
    conv4 = layers.Conv2D(n_filter * 2, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(conv4)

    up2 = layers.concatenate([layers.UpSampling2D((2, 2))(conv4), conv1], axis=-1)
    conv5 = layers.Conv2D(n_filter, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(up2)
    conv5 = layers.Dropout(DROPOUT)(conv5)
    conv5 = layers.Conv2D(n_filter, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(conv5)

    x = layers.Conv2D(n_class, (1, 1), padding='same',
                      kernel_initializer=KERNEL_INITIALIZER,
                      kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                     )(conv5)
    if n_class == 1 or (n_class == 2 and one_hot is False):
        x = layers.Activation('sigmoid')(x)
    else:
        x = layers.Activation('softmax')(x)
    return models.Model(img_input, x)
def unet(input_shape=(128, 128, 4), n_class=1, num_filters=64, one_hot=False):
    img_input = layers.Input(shape=input_shape)
    conv1 = layers.Conv2D(num_filters * 1, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(img_input)
    conv1 = layers.Dropout(DROPOUT)(conv1)
    conv1 = layers.Conv2D(num_filters * 1, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(conv1)
    pool1 = layers.MaxPooling2D((2, 2))(conv1)

    conv2 = layers.Conv2D(num_filters * 2, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(pool1)
    conv2 = layers.Dropout(DROPOUT)(conv2)
    conv2 = layers.Conv2D(num_filters * 2, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(conv2)
    pool2 = layers.MaxPooling2D((2, 2))(conv2)

    conv3 = layers.Conv2D(num_filters * 4, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(pool2)
    conv3 = layers.Dropout(DROPOUT)(conv3)
    conv3 = layers.Conv2D(num_filters * 4, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(conv3)
    pool3 = layers.MaxPooling2D((2, 2))(conv3)

    conv4 = layers.Conv2D(num_filters * 8, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(pool3)
    conv4 = layers.Dropout(DROPOUT)(conv4)
    conv4 = layers.Conv2D(num_filters * 8, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(conv4)
    pool4 = layers.MaxPooling2D((2, 2))(conv4)

    conv5 = layers.Conv2D(n_class * 16, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(pool4)
    conv5 = layers.Dropout(DROPOUT)(conv5)
    conv5 = layers.Conv2D(num_filters * 16, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(conv5)

    up1 = layers.concatenate([layers.UpSampling2D((2, 2))(conv5), conv4], axis=-1)
    conv6 = layers.Conv2D(num_filters * 8, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(up1)
    conv6 = layers.Dropout(DROPOUT)(conv6)
    conv6 = layers.Conv2D(num_filters * 8, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(conv6)

    up2 = layers.concatenate([layers.UpSampling2D((2, 2))(conv6), conv3], axis=-1)
    conv7 = layers.Conv2D(num_filters * 4, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(up2)
    conv7 = layers.Dropout(DROPOUT)(conv7)
    conv7 = layers.Conv2D(num_filters * 4, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(conv7)

    up3 = layers.concatenate([layers.UpSampling2D((2, 2))(conv7), conv2], axis=-1)
    conv8 = layers.Conv2D(num_filters * 2, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(up3)
    conv8 = layers.Dropout(DROPOUT)(conv8)
    conv8 = layers.Conv2D(num_filters * 2, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(conv8)

    up4 = layers.concatenate([layers.UpSampling2D((2, 2))(conv8), conv1], axis=-1)
    conv9 = layers.Conv2D(num_filters * 1, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(up4)
    conv9 = layers.Dropout(DROPOUT)(conv9)
    conv9 = layers.Conv2D(num_filters * 1, (3, 3), activation='relu', padding='same',
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(conv9)

    x = layers.Conv2D(n_class, (1, 1), padding='same', use_bias=False, activation=None,
                      kernel_initializer=KERNEL_INITIALIZER,
                      kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                     )(conv9)
    if n_class == 1 or (n_class == 2 and one_hot is False):
        x = layers.Activation('sigmoid')(x)
    else:
        x = layers.Activation('softmax')(x)
    return models.Model(img_input, x)
def main():
    np.set_printoptions(threshold=np.nan)

    number_of_classes = 3
    input_shape = (64, 64, 1)

    x = layers.Input(shape=input_shape)
    '''
    Inputs to the model are MRI images which are down-sampled
    to 64 × 64 from 512 × 512, in order to reduce the number of
    parameters in the model and decrease the training time.
    Second (First?) layer is a convolutional layer with 64 × 9 × 9 filters
    and stride of 1 which leads to 64 feature maps of size 56×56.
    '''
    conv1 = layers.Conv2D(64, (9, 9), activation='relu', name="FirstLayer")(x)
    '''
    The second layer is a Primary Capsule layer resulting from
    256×9×9 convolutions with strides of 2.
    '''
    primaryCaps = PrimaryCap(inputs=conv1,
                             dim_capsule=8,
                             n_channels=32,
                             kernel_size=9,
                             strides=2,
                             padding='valid')
    '''
    Final capsule layer includes 3 capsules, referred to as “Class
    Capsules,’ ’one for each type of candidate brain tumor. The
    dimension of these capsules is 16.
    '''
    capLayer2 = CapsuleLayer(num_capsule=3,
                             dim_capsule=16,
                             routings=2,
                             name="ThirdLayer")(primaryCaps)

    # Layer 4: This is an auxiliary layer to replace each capsule with its
    # length. Just to match the true label's shape.
    # If using tensorflow, this will not be necessary. :)
    out_caps = Length(name='capsnet')(capLayer2)

    # Decoder network.
    y = layers.Input(shape=(number_of_classes, ))
    # The true label is used to mask the output of capsule layer. For training
    masked_by_y = Mask()([capLayer2, y])
    # Mask using the capsule with maximal length. For prediction
    masked = Mask()(capLayer2)

    # Shared Decoder model in training and prediction
    decoder = models.Sequential(name='decoder')
    decoder.add(
        layers.Dense(512, activation='relu', input_dim=16 * number_of_classes))
    decoder.add(layers.Dense(1024, activation='relu'))
    decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))
    decoder.add(layers.Reshape(target_shape=input_shape, name='out_recon'))

    # Models for training and evaluation (prediction)
    train_model = models.Model([x, y], [out_caps, decoder(masked_by_y)])

    train_data_directory = 'train/'
    validation_data_directory = 'test/'
    bsize = 32

    train_generator = create_generator(train_data_directory, batch_size=bsize)

    validation_generator = create_generator(validation_data_directory,
                                            batch_size=bsize)

    print(train_model.summary())

    train_model.compile(optimizer="rmsprop", loss='mse', metrics=['accuracy'])

    hst = train_model.fit_generator(train_generator,
                                    steps_per_epoch=72,
                                    epochs=8,
                                    validation_data=validation_generator,
                                    validation_steps=24,
                                    verbose=1).history

    train_model.save('Test.h5')
示例#16
0
input2 = layers.Input(shape=(3, ))

l = layers.Dense(1, kernel_initializer=initializers.RandomUniform(0, 1))
rel = layers.Lambda(lambda x: K.reverse(x, axes=0))
rel2 = layers.Lambda(lambda x: K.map_fn())
x1 = l(input1)
x2 = l(input2)
x3 = rel(x2)

#model0 = models.Model(inputs = input1, outputs = x1)
#model0.compile(loss = losses.binary_crossentropy , optimizer = optimizers.Adam())
#res = model0.predict(x)

merged = layers.merge.Subtract()([x2, x1])

model1 = models.Model(inputs=[input1, input2], outputs=[x1, x2])
model1.compile(loss=losses.binary_crossentropy, optimizer=optimizers.Adam())
res1 = model1.predict([x, xx])

model2 = models.Model(inputs=[input1, input2], outputs=merged)
model2.compile(loss=losses.binary_crossentropy, optimizer=optimizers.Adam())
res2 = model2.predict([x, xx])

model3 = models.Model(inputs=[input1, input2], outputs=[merged, x3])
model3.compile(loss=losses.binary_crossentropy, optimizer=optimizers.Adam())
res3 = model3.predict([x, xx])
print(res3[0])
print()
print(res3[1])
print()
print("-" * 30)
示例#17
0
inputLayer = layers.Input(shape=(41,41,1))

#adicionando as camadas
n_layers = num_layers
for i in range(n_layers):
    if i == 0:
        x = layers.Conv2D(64,(3,3),activation='relu',padding='same',kernel_initializer='he_normal')(inputLayer)
    elif i == n_layers-1:
        x = layers.Conv2D(1,(3,3),activation='linear',padding='same',kernel_initializer='he_normal')(x)
    else:
        x = layers.Conv2D(64,(3,3),activation='relu',padding='same',kernel_initializer='he_normal')(x)

#somando as camadas final e entrada para aprendizagem residual
#layer_out = layers.add([x,inputLayer])
layer_out = x
model = models.Model(inputs=inputLayer,outputs=layer_out)

'''
##model = models.Sequential()
###Definindo camada dummy para ter acesso à entrada
##model.add(layers.Lambda(lambda x: x,name='input',input_shape=(3840,2160,3))) 
###Definindo a camada de entrada
##model.add(layers.Conv2D(64,(3,3),activation='relu',padding='same',kernel_regularizer=l2(0.0001),use_bias=False))
###Adicionando as outras camadas
##n_layers = 5
##for i in range(n_layers):
##    model.add(layers.Conv2D(64,(3,3),activation='relu',padding='same',kernel_regularizer=l2(0.0001),use_bias=False))
###Camada de saída com único filtro
##model.add(layers.Conv2D(3,(3,3),activation='relu',padding='same',kernel_regularizer=l2(0.0001),use_bias=False,name='output'))
##the_input = model.get_layer(index = 0)
##the_output = model.get_layer(index = len(model.layers) -1)
示例#18
0
def run_segmentation(X_train,
                     y_train,
                     X_val,
                     y_val,
                     X_test,
                     y_test,
                     network_func,
                     n_classes=30,
                     img_shape=(128, 128, 3),
                     optimizer='adam',
                     loss='categorical_crossentropy',
                     metrics=['accuracy'],
                     callbacks=None,
                     batch_size=32,
                     steps_per_epoch=500,
                     num_epochs=30,
                     validation_steps=65,
                     path_save_weights=None,
                     path_save_pred_images=None,
                     num_save_pred_image=None,
                     dropout=False):

    #     import tensorflow as tf
    #     from keras import backend as K

    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.90

    train_iter = batch_generator.Batch_generator(X_train,
                                                 target=y_train,
                                                 image_shape=img_shape,
                                                 num_classes=n_classes,
                                                 batch_size=batch_size,
                                                 segment=True)
    val_iter = batch_generator.Batch_generator(X_val,
                                               target=y_val,
                                               image_shape=img_shape,
                                               num_classes=n_classes,
                                               batch_size=batch_size,
                                               segment=True)

    old_session = KTF.get_session()

    # with tf.Graph().as_default():
    session = tf.Session(config=config)
    KTF.set_session(session)

    # if using dropout (a situation when networks would be different between training and testing phase)
    if dropout:
        KTF.set_learning_phase(1)

    inputs = layers.Input(img_shape)
    output_layer = network_func(inputs, n_classes)
    model = models.Model(inputs, output_layer)
    model.summary()
    model.compile(optimizer=optimizer, loss=loss, metrics=metrics)

    model.fit_generator(train_iter,
                        steps_per_epoch=steps_per_epoch,
                        epochs=num_epochs,
                        verbose=1,
                        validation_data=val_iter,
                        validation_steps=validation_steps,
                        callbacks=callbacks,
                        workers=2)

    # save model
    # model.save('./test_log/models/fcn_model.hdf5')
    if path_save_weights != None:
        try:
            model.save_weights(path_save_weights)
        except:
            print("Path to save weigths data is not valid.")

    # predicting and calculate IoU
    print("\n")
    print("=====" * 15)
    print("\n")
    print("Testing model...")
    start = time()
    # extract data
    test = np.vstack([
        np.expand_dims(misc.imresize(misc.imread(t), img_shape), axis=0)
        for t in X_test
    ])

    pred = model_helper.prediction(model, test)  # predicted data
    target = pre.pixelwise_class_array(y_test)  # ground truths

    iou = model_helper.ave_iou_score(pred, target)
    end = time()
    print("\n")
    print("IoU score    : {:.6f}".format(iou))
    print("Calcuration time : {:.6f} sec.".format(end - start))

    # Save predicted image
    if path_save_pred_images != None:
        print("\n")
        print("=====" * 15)
        print("\n")
        print("Saving predict image...")

        path_save_pred_images = os.path.join(path_save_pred_images,
                                             'predictions')

        # create directory
        pre.makedirs_if_none(path_save_pred_images)

        # reduce save data if need
        if num_save_pred_image != None:
            pred = pred[:num_save_pred_image]
            X_test = X_test[:num_save_pred_image]

        # convert from class array to image(rgb) array
        pred = pre.pixelwise_array_to_img(pred)

        # Save data
        for img, file_path in zip(pred, X_test):
            misc.imsave(
                os.path.join(path_save_pred_images,
                             os.path.basename(file_path)), img)

        print("Done.")

    # close current session
    KTF.set_session(old_session)
示例#19
0
文件: cpg.py 项目: vreuter/deepcpg
    def _replicate_model(self, input):
        w_reg = kr.WeightRegularizer(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Dense(256, init=self.init, W_regularizer=w_reg)(input)
        x = kl.Activation(self.act_replicate)(x)

        return km.Model(input, x)
示例#20
0
def run_cnn(X_train,
            X_val,
            X_test,
            network_func,
            n_classes=30,
            img_shape=(128, 128, 3),
            optimizer='adam',
            loss='categorical_crossentropy',
            metrics=['accuracy'],
            callbacks=None,
            batch_size=32,
            steps_per_epoch=64,
            num_epochs=300,
            validation_steps=50,
            path_save_weights=None,
            path_plot_model=None,
            dropout=False):

    train_iter = batch_generator.Batch_generator(X_train,
                                                 image_shape=img_shape,
                                                 num_classes=n_classes,
                                                 batch_size=batch_size,
                                                 segment=False)
    val_iter = batch_generator.Batch_generator(X_val,
                                               image_shape=img_shape,
                                               num_classes=n_classes,
                                               batch_size=batch_size,
                                               segment=False)

    old_session = KTF.get_session()

    # with tf.Graph().as_default():
    session = tf.Session()
    KTF.set_session(session)

    # if using dropout (a situation when networks would be different between training and testing phase)
    if dropout:
        KTF.set_learning_phase(1)

    inputs = layers.Input(img_shape)
    output_layer = network_func(inputs, n_classes)
    model = models.Model(inputs, output_layer)
    model.summary()
    model.compile(optimizer=optimizer, loss=loss, metrics=metrics)

    history = model.fit_generator(train_iter,
                                  steps_per_epoch=steps_per_epoch,
                                  epochs=num_epochs,
                                  verbose=1,
                                  validation_data=val_iter,
                                  validation_steps=validation_steps,
                                  callbacks=callbacks,
                                  workers=2)

    # save model
    # model.save('./test_log/models/fcn_model.hdf5')
    if path_save_weights != None:
        try:
            model.save_weights(path_save_weights)
        except:
            print("Could not save weigths data.")

    if path_plot_model != None:
        try:
            plot_model(model, to_file=path_plot_model)
        except:
            print("Could not plot model properly.")

    # test the prediction
    print("\n")
    print("Testing model...")
    score = model_helper.predict_from_path(model, X_test, img_shape, n_classes)
    print("\n")
    print("=====" * 13)
    print("\n")
    print("Test score    : {:.6f}".format(score[0]))
    print("Test accuracy : {:.6f}\n".format(score[1]))

    KTF.set_session(old_session)
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()

# Check the correctness o the shape
print("train size: ", train_images.shape, " train lable size: ", train_labels)
print("test size: ", test_images.shape, " test label size: ", test_labels)

# Defien the network arch
from keras import models
from keras import layers

# The Forward Pass
input_tensor = layers.Input(shape=(784,))
x = layers.Dense(32, activation='relu')(input_tensor)
out_tensor = layers.Dense(10, activation='softmax')(x)

net  = models.Model(inputs=input_tensor, outputs=out_tensor)

# The Backward pass
net.compile(optimizer='rmsprop', loss= 'categorical_crossentropy', metrics=['accuracy']) # Add stuf for backpropagation

# Prepare the train data
train_images = train_images.reshape(60000, 28*28)
train_images = train_images.astype('float32')/255

# Prepae the labels
from keras .utils import to_categorical

train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)

# The Train command in Keras: which does the forward and backwad passes
示例#22
0
    def build_model(self):
        """Build an actor (policy) network that maps states -> actions."""
        # Define input layer (states)
        states = layers.Input(shape=(self.state_size, ), name='states')

        # Add hidden layers
        #         original
        #         net = layers.Dense(units=32, activation='relu')(states)
        #         net = layers.Dense(units=64, activation='relu')(net)
        #         net = layers.Dense(units=32, activation='relu')(net)

        net = layers.Dense(units=64,
                           kernel_regularizer=regularizers.l2(0.01),
                           activity_regularizer=regularizers.l2(0.01))(states)
        net = layers.BatchNormalization()(net)
        net = layers.Activation(activation='relu')(net)
        net = layers.Dropout(0.3)(net)

        net = layers.Dense(units=128,
                           kernel_regularizer=regularizers.l2(0.01),
                           activity_regularizer=regularizers.l2(0.01))(net)
        net = layers.BatchNormalization()(net)
        net = layers.Activation(activation='relu')(net)
        net = layers.Dropout(0.3)(net)

        net = layers.Dense(units=256,
                           kernel_regularizer=regularizers.l2(0.01),
                           activity_regularizer=regularizers.l2(0.01))(net)
        net = layers.BatchNormalization()(net)
        net = layers.Activation(activation='relu')(net)
        net = layers.Dropout(0.3)(net)

        # Try different layer sizes, activations, add batch normalization, regularizers, etc.

        # Add final output layer with sigmoid activation
        #         original
        #         raw_actions = layers.Dense(units=self.action_size, activation='sigmoid',
        #             name='raw_actions')(net)
        raw_actions = layers.Dense(units=self.action_size,
                                   activity_regularizer=regularizers.l2(0.01),
                                   activation='sigmoid',
                                   name='raw_actions')(net)

        # Scale [0, 1] output for each action dimension to proper range
        actions = layers.Lambda(lambda x:
                                (x * self.action_range) + self.action_low,
                                name='actions')(raw_actions)

        # Create Keras model
        self.model = models.Model(inputs=states, outputs=actions)

        # Define loss function using action value (Q value) gradients
        action_gradients = layers.Input(shape=(self.action_size, ))
        loss = K.mean(-action_gradients * actions)

        # Incorporate any additional losses here (e.g. from regularizers)

        # Define optimizer and training function
        optimizer = optimizers.Adam()
        updates_op = optimizer.get_updates(params=self.model.trainable_weights,
                                           loss=loss)
        self.train_fn = K.function(
            inputs=[self.model.input, action_gradients,
                    K.learning_phase()],
            outputs=[],
            updates=updates_op)
示例#23
0
def CapsNet(input_shape, n_class, routings, if_eval_manipulate):
    """
    A Capsule Network on MNIST.
    :param input_shape: data shape, 3d, [width, height, channels]
    :param n_class: number of classes
    :param routings: number of routing iterations
    :return: Two Keras Models, the first one used for training, and the second one for evaluation.
            `eval_model` can also be used for training.
    """
    x = layers.Input(shape=input_shape)

    # Layer 1: Just a conventional Conv2D layer
    conv1 = layers.Conv2D(filters=256,
                          kernel_size=9,
                          strides=1,
                          padding='valid',
                          activation='relu',
                          name='conv1')(x)

    # Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_capsule]
    primarycaps = PrimaryCap(conv1,
                             dim_capsule=8,
                             n_channels=32,
                             kernel_size=9,
                             strides=2,
                             padding='valid')

    # Layer 3: Capsule layer. Routing algorithm works here.
    digitcaps = CapsuleLayer(num_capsule=n_class,
                             dim_capsule=16,
                             routings=routings,
                             name='digitcaps')(primarycaps)

    # Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.
    # If using tensorflow, this will not be necessary. :)
    out_caps = Length(name='capsnet')(digitcaps)

    # Decoder network.
    y = layers.Input(shape=(n_class, ))
    masked_by_y = Mask()(
        [digitcaps, y]
    )  # The true label is used to mask the output of capsule layer. For training
    masked = Mask(
    )(digitcaps)  # Mask using the capsule with maximal length. For prediction

    # Shared Decoder model in training and prediction
    decoder = models.Sequential(name='decoder')
    decoder.add(layers.Dense(512, activation='relu', input_dim=16 * n_class))
    decoder.add(layers.Dense(1024, activation='relu'))
    decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))
    decoder.add(layers.Reshape(target_shape=input_shape, name='out_recon'))

    # Models for training and evaluation (prediction)
    if_eval_manipulate = False
    if if_eval_manipulate:
        train_model = models.Model([x, y], [out_caps, decoder(masked_by_y)])
        eval_model = models.Model(x, [out_caps, decoder(masked)])

        # manipulate model
        noise = layers.Input(shape=(n_class, 16))
        noised_digitcaps = layers.Add()([digitcaps, noise])
        masked_noised_y = Mask()([noised_digitcaps, y])
        manipulate_model = models.Model([x, y, noise],
                                        decoder(masked_noised_y))
        return train_model, eval_model, manipulate_model
    else:
        train_model = models.Model([x], [out_caps])
        return train_model
        decoder.add(layers.Dense(128, activation='relu', input_dim=4*n_class))
        decoder.add(layers.Dense(np.prod(input_shape)/96, activation='relu'))
        decoder.add(layers.Reshape(target_shape=(52/(RzFaktor),Rx/12,Ry/8,3), name='out_recon_unupsized1'))
        decoder.add(layers.BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None))
        decoder.add(layers.UpSampling3D(size=(1, 2, 2)))        
        decoder.add(layers.Conv3D(filters=16, kernel_size=(3,3,3), strides=1, padding='same', activation='relu', name='conv3Dout0'))
        decoder.add(layers.BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None))
        decoder.add(layers.UpSampling3D(size=(1, 2, 3)))
        decoder.add(layers.Conv3D(filters=8, kernel_size=(3,3,3), strides=1, padding='same', activation='relu', name='conv3Dout1'))
        decoder.add(layers.BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None))
        decoder.add(layers.UpSampling3D(size=(1, 2, 2)))
        decoder.add(layers.Conv3D(filters=3, kernel_size=(3,3,3), strides=1, padding='same', activation='sigmoid', name='conv3Dout2'))


        # Models for training and evaluation (prediction)
        model = models.Model([x, y], [out_caps, decoder(masked_by_y)])
        eval_model = models.Model(x, [out_caps, decoder(masked)])

        # manipulate model
        noise = layers.Input(shape=(n_class, 4))
        noised_digitcaps = layers.Add()([digitcaps, noise])
        masked_noised_y = Mask()([noised_digitcaps, y])
        manipulate_model = models.Model([x, y, noise], decoder(masked_noised_y))



        model.summary()


        namafilenya = './result/' + '_' + scenario_name  + '_validasiK_' + str(k_val) + '_' + 'weights.h5'
        tb = callbacks.TensorBoard(log_dir='./sken4', batch_size=2, histogram_freq=0, write_graph=True,
示例#25
0
def VGGnet(input_shape, n_class):
    """
    
    :param input_shape: data shape, 3d, [width, height, channels]
    :param n_class: number of classes
    
    :return:  Keras Model used for training
    """
#    x = layers.Input(shape=input_shape)

    # Layer 1: Just a conventional Conv2D layer
    inputs_depth = layers.Input(shape=input_shape)
    pre_trained_model = VGG16_BN(input_tensor=None, input_shape=input_shape, conv_dropout=0.1, activation='relu')
    conv_model_depth = pre_trained_model(inputs_depth)
    
    flat_model = layers.Flatten(name='flatten')(conv_model_depth)
    fc6 = layers.Dense(1024, activation='relu', name='fc6')(flat_model)
    bn_1 = BatchNormalization(name='1_bn')(fc6)
    dropout_1 = layers.Dropout(0.2)(bn_1)
    
    
    
#    flatten_concat = layers.Flatten(name='flatten_concat')(merge_rgb_depth)
#    fc6 = layers.Dense(2048, activation='relu', name='fc6')(merge_rgb_depth)
    fc7 = layers.Dense(512, activation='relu', name='fc7')(dropout_1)
    bn_2 = BatchNormalization(name='2_bn')(fc7)
    dropout_2 = layers.Dropout(0.2)(bn_2)
    
    #VECTORIZING OUTPUT
    output = layers.Dense(20, activation='softmax', name='output')(dropout_2)
    
    # MODAL [INPUTS , OUTPUTS]
    
    train_model = models.Model(inputs_depth,  output)
    train_model.summary()
    weights_path =  ('D:/tutorial/rgb+depth+thermal/allData/vgg_1layerblk_pandora_100-100/weights-37.h5')
    train_model.load_weights(weights_path)
    
    final_layer = train_model.layers[-2].output
    
    
    
#    fc6_final = layers.Dense(512, activation='relu', name='fc6_d')(final_layer)
#    dropout_1_final = layers.Dropout(0.2)(fc6_final)
#    
#    fc7_final = layers.Dense(512, activation='relu', name='fc7_final')(dropout_1_final)
#    dropout_2_final = layers.Dropout(0.2)(fc7_final)

#    
    output_final = layers.Dense(n_class, activation='softmax', name='output_final')(final_layer)

    
    
    new_train_model = models.Model(inputs_depth,  output_final)
#    for layer in new_train_model.layers[:-8]:
#        layer.trainable = False
#    for layer in new_train_model.layers[1].layers[:-10]:
#        layer.trainable = False



    return new_train_model
示例#26
0
    args.add_argument('--strmaxlen', type=int, default=150)
    args.add_argument('--embedding', type=int, default=256)
    config = args.parse_args()

    inputs = layers.Input((config.strmaxlen,))
    layer = layers.Embedding(251, config.embedding, input_length=config.strmaxlen)(inputs)
    layer = layers.Bidirectional(layers.CuDNNGRU(512, return_sequences=True))(layer)
    layer = layers.Bidirectional(layers.CuDNNGRU(512, return_sequences=False))(layer)

    layer1 = layers.Dense(3)(layer)
    outputs1 = layers.Activation('softmax')(layer1)

    layer2 = layers.Dense(1)(layer1)
    outputs2 = layers.Activation('sigmoid')(layer2)
    outputs2 = layers.Lambda(lambda layer: layer * 9 + 1)(outputs2)
    model = models.Model(inputs=inputs, outputs=[outputs1, outputs2])
    model.summary()
    model.compile(optimizer=optimizers.Adam(lr=0.001, amsgrad=True, clipvalue=1.0), loss=['categorical_crossentropy', 'mse'], metrics=['accuracy'])
    
    # DONOTCHANGE: Reserved for nsml use
    bind_model(model)

    # DONOTCHANGE: They are reserved for nsml
    if config.pause:
        nsml.paused(scope=locals())


    # 학습 모드일 때 사용합니다. (기본값)
    if config.mode == 'train':
        # 데이터를 로드합니다.
        dataset = MovieReviewDataset(DATASET_PATH, config.strmaxlen)
示例#27
0
def ResNet(model_input, learningRate):
    """
    ResNeXt by default. For ResNet set `cardinality` = 1 above.
    
    """
    def add_common_layers(y):
        y = layers.BatchNormalization()(y)
        y = layers.LeakyReLU()(y)

        return y

    def grouped_convolution(y, nb_channels, _strides):
        # when `cardinality` == 1 this is just a standard convolution
        if cardinality == 1:
            return layers.Conv2D(nb_channels,
                                 kernel_size=(3, 3),
                                 strides=_strides,
                                 padding='same')(y)

        assert not nb_channels % cardinality
        _d = nb_channels // cardinality

        # in a grouped convolution layer, input and output channels are divided into `cardinality` groups,
        # and convolutions are separately performed within each group
        groups = []
        for j in range(cardinality):
            group = layers.Lambda(lambda z: z[:, :, :, j * _d:j * _d + _d])(y)
            groups.append(
                layers.Conv2D(_d,
                              kernel_size=(3, 3),
                              strides=_strides,
                              padding='same')(group))

        # the grouped convolutional layer concatenates them as the outputs of the layer
        y = layers.concatenate(groups)

        return y

    def residual_block(y,
                       nb_channels_in,
                       nb_channels_out,
                       _strides=(1, 1),
                       _project_shortcut=False):
        """
        Our network consists of a stack of residual blocks. These blocks have the same topology,
        and are subject to two simple rules:

        - If producing spatial maps of the same size, the blocks share the same hyper-parameters (width and filter sizes).
        - Each time the spatial map is down-sampled by a factor of 2, the width of the blocks is multiplied by a factor of 2.
        """
        shortcut = y

        # we modify the residual building block as a bottleneck design to make the network more economical
        y = layers.Conv2D(nb_channels_in,
                          kernel_size=(1, 1),
                          strides=(1, 1),
                          padding='same')(y)
        y = add_common_layers(y)

        # ResNeXt (identical to ResNet when `cardinality` == 1)
        y = grouped_convolution(y, nb_channels_in, _strides=_strides)
        y = add_common_layers(y)

        y = layers.Conv2D(nb_channels_out,
                          kernel_size=(1, 1),
                          strides=(1, 1),
                          padding='same')(y)
        # batch normalization is employed after aggregating the transformations and before adding to the shortcut
        y = layers.BatchNormalization()(y)

        # identity shortcuts used directly when the input and output are of the same dimensions
        if _project_shortcut or _strides != (1, 1):
            # when the dimensions increase projection shortcut is used to match dimensions (done by 1×1 convolutions)
            # when the shortcuts go across feature maps of two sizes, they are performed with a stride of 2
            shortcut = layers.Conv2D(nb_channels_out,
                                     kernel_size=(1, 1),
                                     strides=_strides,
                                     padding='same')(shortcut)
            shortcut = layers.BatchNormalization()(shortcut)

        y = layers.add([shortcut, y])

        # relu is performed right after each batch normalization,
        # expect for the output of the block where relu is performed after the adding to the shortcut
        y = layers.LeakyReLU()(y)

        return y

    # conv1
    x = layers.Conv2D(64, kernel_size=(7, 7), strides=(2, 2),
                      padding='same')(model_input)
    x = add_common_layers(x)

    # conv2
    x = layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x)
    for i in range(3):
        project_shortcut = True if i == 0 else False
        x = residual_block(x, 128, 256, _project_shortcut=project_shortcut)

    # conv3
    for i in range(4):
        # down-sampling is performed by conv3_1, conv4_1, and conv5_1 with a stride of 2
        strides = (2, 2) if i == 0 else (1, 1)
        x = residual_block(x, 256, 512, _strides=strides)

    # conv4
    for i in range(6):
        strides = (2, 2) if i == 0 else (1, 1)
        x = residual_block(x, 512, 1024, _strides=strides)

    # conv5
    for i in range(3):
        strides = (2, 2) if i == 0 else (1, 1)
        x = residual_block(x, 1024, 2048, _strides=strides)

    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dense(1)(x)

    model = models.Model(model_input, x, name='ResNet')

    model.summary()
    model.compile(optimizer=optimizers.rmsprop(lr=learningRate),
                  loss=losses.categorical_crossentropy,
                  metrics=[metrics.categorical_accuracy])

    return model


# image_tensor = layers.Input(shape=(img_height, img_width, img_channels))
# network_output = ResNet(image_tensor)

# model = models.Model(inputs=[image_tensor], outputs=[network_output])
# print(model.summary())
示例#28
0
                           padding='same',
                           use_bias=False,
                           name='block14_sepconv1')(x)
x = layers.BatchNormalization(name='block14_sepconv1_bn')(x)
x = layers.Activation('relu', name='block14_sepconv1_act')(x)

x = layers.SeparableConv2D(2048, (3, 3),
                           padding='same',
                           use_bias=False,
                           name='block14_sepconv2')(x)
x = layers.BatchNormalization(name='block14_sepconv2_bn')(x)
x = layers.Activation('relu', name='block14_sepconv2_act')(x)

x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = layers.Dense(classes, activation='softmax', name='predictions')(x)
model = models.Model(img_input, x, name='xception')

model.compile(loss='categorical_crossentropy',
              optimizer='sgd',
              metrics=['accuracy'])
#tensorboard = TensorBoard(log_dir=r'E:\sansu\python\tensorboard')
#es=EarlyStopping(monitor='val_acc', patience=2, verbose=0,min_delta=0.002,mode='max')
model.fit_generator(train_data,
                    steps_per_epoch=(train_data.samples // train_batch_size),
                    epochs=40,
                    validation_data=test_data,
                    validation_steps=(test_data.samples // test_batch_size))

#base_model=Xception(weights='imagenet',include_top=False,input_shape=(224, 224,3))
# model = base_model.output
# model = Flatten(name='flatten')(model)
示例#29
0
features = features.astype('float64')
scale = lambda e: emotionString.index(e)
emotions = np.array([scale(e) for e in emotions], dtype='uint8')

numberClasses = 7
emotionsCategorical = np_utils.to_categorical(emotions, numberClasses)

hiddenSize = 1024
inputShapeSize = 67
inputLayer = layers.Input(shape=(inputShapeSize, ))
hidden1 = layers.Dense(hiddenSize, activation='relu')(inputLayer)
hidden2 = layers.Dense(hiddenSize, activation='relu')(hidden1)
hidden3 = layers.Dense(hiddenSize, activation='relu')(hidden2)
outLayer = layers.Dense(numberClasses, activation='softmax')(hidden3)
model = models.Model(input=inputLayer, output=outLayer)

model = models.Sequential()
model.add(
    layers.Dense(hiddenSize, activation='relu',
                 input_shape=(inputShapeSize, )))  # hidden1
model.add(layers.Dense(hiddenSize, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(hiddenSize, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(numberClasses, activation='softmax'))  # out layer

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
def resunet(input_shape=(128, 128, 4), n_class=1, num_filters=64, one_hot=False):
    img_input = layers.Input(shape=input_shape)
    # 1/2
    conv1 = layers.Conv2D(num_filters * 1, (3, 3), activation=None, padding="same",
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(img_input)
    conv1 = residual_block(conv1, num_filters * 1)
    conv1 = residual_block(conv1, num_filters * 1, True)
    pool1 = layers.MaxPooling2D((2, 2))(conv1)
    pool1 = layers.Dropout(DROPOUT)(pool1)

    # 1/4
    conv2 = layers.Conv2D(num_filters * 2, (3, 3), activation=None, padding="same",
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(pool1)
    conv2 = residual_block(conv2, num_filters * 2)
    conv2 = residual_block(conv2, num_filters * 2, True)
    pool2 = layers.MaxPooling2D((2, 2))(conv2)
    pool2 = layers.Dropout(DROPOUT)(pool2)

    # 1/8
    conv3 = layers.Conv2D(num_filters * 4, (3, 3), activation=None, padding="same",
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(pool2)
    conv3 = residual_block(conv3, num_filters * 4)
    conv3 = residual_block(conv3, num_filters * 4, True)
    pool3 = layers.MaxPooling2D((2, 2))(conv3)
    pool3 = layers.Dropout(DROPOUT)(pool3)

    # 1/16
    conv4 = layers.Conv2D(num_filters * 8, (3, 3), activation=None, padding="same",
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(pool3)
    conv4 = residual_block(conv4, num_filters * 8)
    conv4 = residual_block(conv4, num_filters * 8, True)
    pool4 = layers.MaxPooling2D((2, 2))(conv4)
    pool4 = layers.Dropout(DROPOUT)(pool4)

    # Middle
    convm = layers.Conv2D(num_filters * 16, (3, 3), activation=None, padding="same",
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(pool4)
    convm = residual_block(convm, num_filters * 16)
    convm = residual_block(convm, num_filters * 16, True)
    # convm = convolutional_block_attention_module(convm, hidden_size=start_neurons, conv_size=7)

    # 1/8
    deconv4 = layers.UpSampling2D((2, 2), interpolation='bilinear')(convm)
    # uconv4 = my_global_attention_upsample(deconv4, conv4)
    uconv4 = layers.concatenate([deconv4, conv4])
    uconv4 = layers.Dropout(DROPOUT)(uconv4)

    uconv4 = layers.Conv2D(num_filters * 8, (3, 3), activation=None, padding="same",
                          kernel_initializer=KERNEL_INITIALIZER,
                          kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                          )(uconv4)
    uconv4 = residual_block(uconv4, num_filters * 8)
    uconv4 = residual_block(uconv4, num_filters * 8, True)

    # 1/4
    deconv3 = layers.UpSampling2D((2, 2), interpolation='bilinear')(uconv4)
    # uconv3 = my_global_attention_upsample(deconv3, conv3)
    uconv3 = layers.concatenate([deconv3, conv3])
    uconv3 = layers.Dropout(DROPOUT)(uconv3)

    uconv3 = layers.Conv2D(num_filters * 4, (3, 3), activation=None, padding="same",
                           kernel_initializer=KERNEL_INITIALIZER,
                           kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                           )(uconv3)
    uconv3 = residual_block(uconv3, num_filters * 4)
    uconv3 = residual_block(uconv3, num_filters * 4, True)

    # 1/2
    deconv2 = layers.UpSampling2D((2, 2), interpolation='bilinear')(uconv3)
    # uconv2 = my_global_attention_upsample(deconv2, conv2)
    uconv2 = layers.concatenate([deconv2, conv2])
    uconv2 = layers.Dropout(DROPOUT)(uconv2)

    uconv2 = layers.Conv2D(num_filters * 2, (3, 3), activation=None, padding="same",
                           kernel_initializer=KERNEL_INITIALIZER,
                           kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                           )(uconv2)
    uconv2 = residual_block(uconv2, num_filters * 2)
    uconv2 = residual_block(uconv2, num_filters * 2, True)

    # 1/1
    deconv1 = layers.UpSampling2D((2, 2), interpolation='bilinear')(uconv2)
    uconv1 = layers.concatenate([deconv1, conv1])
    # uconv1 = my_global_attention_upsample(deconv1, conv1)
    uconv1 = layers.Dropout(DROPOUT)(uconv1)

    uconv1 = layers.Conv2D(num_filters * 1, (3, 3), activation=None, padding="same",
                           kernel_initializer=KERNEL_INITIALIZER,
                           kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                           )(uconv1)
    uconv1 = residual_block(uconv1, num_filters * 1)
    uconv1 = residual_block(uconv1, num_filters * 1, True)

    x = layers.Conv2D(n_class, (1, 1), padding="same", activation=None, use_bias=False,
                      kernel_initializer=KERNEL_INITIALIZER,
                      kernel_regularizer=regularizers.l2(WEIGHT_DECAY)
                     )(uconv1)
    if n_class == 1 or (n_class == 2 and one_hot is False):
        x = layers.Activation('sigmoid')(x)
    else:
        x = layers.Activation('softmax')(x)
    return models.Model(img_input, x)