Example #1
0
def discriminator_model():
    # PatchGAN
    inputs = Input(shape=patch_shape)
    x = Convolution2D(filters=channel_rate,
                      kernel_size=(3, 3),
                      strides=(2, 2),
                      padding="same")(inputs)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)

    x = Convolution2D(filters=2 * channel_rate,
                      kernel_size=(3, 3),
                      strides=(2, 2),
                      padding="same")(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)

    x = Convolution2D(filters=4 * channel_rate,
                      kernel_size=(3, 3),
                      strides=(2, 2),
                      padding="same")(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)

    x = Convolution2D(filters=4 * channel_rate,
                      kernel_size=(3, 3),
                      strides=(2, 2),
                      padding="same")(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)

    x = non_local_block(x, mode='embedded', compression=2)

    x = Flatten()(x)
    outputs = Dense(units=1, activation='sigmoid')(x)
    model = Model(inputs=inputs, outputs=outputs, name='PatchGAN')
    # model.summary()

    # discriminator
    inputs = Input(shape=image_shape)

    list_row_idx = [(i * channel_rate, (i + 1) * channel_rate)
                    for i in range(int(image_shape[0] / patch_shape[0]))]
    list_col_idx = [(i * channel_rate, (i + 1) * channel_rate)
                    for i in range(int(image_shape[1] / patch_shape[1]))]

    list_patch = []
    for row_idx in list_row_idx:
        for col_idx in list_col_idx:
            x_patch = Lambda(lambda z: z[:, row_idx[0]:row_idx[1], col_idx[0]:
                                         col_idx[1], :])(inputs)
            list_patch.append(x_patch)

    x = [model(patch) for patch in list_patch]
    outputs = Average()(x)
    model = Model(inputs=inputs, outputs=outputs, name='Discriminator')
    #model.summary()
    return model
def Encoder(enc_input, num_feedback_bits):
    num_quan_bits = 4
    x = tf.transpose(enc_input, perm=[0, 3, 1, 2])
    x = tf.keras.layers.Conv2D(2, (3, 3),
                               padding='same',
                               data_format="channels_first")(x)
    x = add_common_layers(x)
    x = non_local_block(x, compression=compression_rate, mode=non_local_mode)
    x = tf.keras.layers.Reshape((img_total, ))(x)
    x = layers.Dense(units=int(num_feedback_bits / num_quan_bits),
                     activation='sigmoid')(x)
    enc_output = QuantizationLayer(num_quan_bits)(x)
    return enc_output
def NON_LOCAL_MIXTURE():
    '''NONLOCAL MODEL'''
    '''LIDAR branch'''
    input_lid = Input(shape=(20, 200, 1))
    layer = Conv2D(5,
                   kernel_size=(5, 5),
                   activation='relu',
                   padding="SAME",
                   kernel_initializer=initializers.HeUniform)(input_lid)
    layer = Conv2D(5,
                   kernel_size=(5, 5),
                   activation='relu',
                   padding="SAME",
                   kernel_initializer=initializers.HeUniform)(layer)
    layer = Conv2D(5,
                   kernel_size=(5, 5),
                   strides=2,
                   activation='relu',
                   padding="SAME",
                   kernel_initializer=initializers.HeUniform)(layer)
    layer = Conv2D(5,
                   kernel_size=(5, 5),
                   activation='relu',
                   padding="SAME",
                   kernel_initializer=initializers.HeUniform)(layer)
    layer = Conv2D(5,
                   kernel_size=(5, 5),
                   strides=2,
                   activation='relu',
                   padding="SAME",
                   kernel_initializer=initializers.HeUniform)(layer)
    NLA = non_local_block(layer, intermediate_dim=2, mode='embedded')
    layer = Conv2D(1,
                   kernel_size=(3, 3),
                   strides=(1, 2),
                   activation='relu',
                   padding="SAME",
                   kernel_initializer=initializers.HeUniform)(NLA)
    layer = Flatten()(layer)
    out_lid = Dense(16, activation='relu')(layer)
    '''GPS branch'''
    input_coord = Input(shape=(2))
    '''Concatenation'''
    concatenated = concatenate([out_lid, input_coord])
    layer = Dense(64, activation='relu')(concatenated)
    layer = Dense(64, activation='relu')(layer)
    layer = Dense(64, activation='relu')(layer)
    predictions = Dense(256, activation='softmax')(layer)
    architecture = Model(inputs=[input_lid, input_coord], outputs=predictions)
    return architecture
Example #4
0
def build_model():
    """Function returning keras model instance.
    
    Model can be
     - Trained here
     - Loaded with load_model
     - Loaded from keras.applications
    """
    input_tensor = Input(shape=(224, 224, 3))

    # backbone
    base_model = VGG16(input_tensor=input_tensor,
                       weights='imagenet',
                       include_top=False)
    base_output = base_model.output

    # self-attention
    x = non_local.non_local_block(base_output,
                                  intermediate_dim=None,
                                  compression=2,
                                  mode='embedded',
                                  add_residual=False)
    x = BatchNormalization()(x)

    # channel-attention
    y = channel_attention.squeeze_excitation_layer(base_output,
                                                   512,
                                                   ratio=4,
                                                   concate=False)
    y = BatchNormalization()(y)

    # concat
    x = concatenate([base_output, x], axis=3)
    x = concatenate([x, y], axis=3)
    # spp
    gap = GlobalAveragePooling2D()(x)
    x = Flatten()(x)
    x = concatenate([gap, x])
    x = Dense(512, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dense(512, activation='relu')(x)
    x = BatchNormalization()(x)
    predict = Dense(4, activation='softmax')(x)
    model = Model(inputs=input_tensor, outputs=predict)

    model.load_weights('model.h5')
    model.summary()
    return model
Example #5
0
    def f(x):
        for i in range(blocks):
            x = block_function(filters=filters,
                               stage=stage,
                               block=i,
                               transition_strides=transition_strides[i],
                               dilation_rate=transition_dilation_rates[i],
                               is_first_block_of_first_layer=(is_first_layer
                                                              and i == 0),
                               dropout=dropout,
                               residual_unit=residual_unit)(x)

            # Non Local Blook
            if filters >= 256:
                print("Filters : ", filters, "Adding Non Local Blocks")
                x = non_local_block(x, mode='embedded', compression=2)

        return x
def Decoder(dec_input, num_feedback_bits):
    num_quan_bits = 4
    x = DeuantizationLayer(num_quan_bits)(dec_input)
    x = tf.keras.layers.Reshape((int(num_feedback_bits / num_quan_bits), ))(x)
    x = layers.Dense(img_total, activation='sigmoid')(x)
    x = layers.Reshape((2, 24, 16))(x)
    x = non_local_block(x, compression=compression_rate, mode=non_local_mode)
    for i in range(dense_num):
        x = dense_residual_block(x)
        x = add_common_layers(x)
        x = tf.keras.layers.Conv2D(2, (1, 1),
                                   padding='same',
                                   data_format="channels_first")(x)
    x = tf.keras.layers.Conv2D(2, (3, 3),
                               activation='sigmoid',
                               padding='same',
                               data_format="channels_first")(x)
    dec_output = tf.transpose(x, perm=[0, 2, 3, 1])
    return dec_output
Example #7
0
def NonLocal_SeNet_Block(x_input, out_dims, mode=None, compression=2, reduction_ratio=4):
    residual_abs = Lambda(abs_backend, name="abs_non" + str(out_dims))(x_input)

    x = Conv3D(out_dims, (1, 1, 1), padding='same', use_bias=False,
               kernel_initializer='he_normal', kernel_regularizer=l2(5e-4))(x_input)

    # NonLocal
    x_non_local = non_local_block(x, mode='embedded', compression=2)

    # SeNet
    abs_mean = GlobalAveragePooling3D()(x_non_local)

    # scales = Dense(units=out_dims // reduction_ratio, activation=None, kernel_initializer='he_normal',
    #                kernel_regularizer=l2(1e-4))(abs_mean)
    # scales = Activation('relu')(scales)
    # scales = Dense(units=out_dims)(scales)
    # scales = Activation('sigmoid')(scales)
    # scales = Reshape((1, 1, 1, out_dims))(scales)

    scales = Reshape((1, 1, 1, out_dims))(abs_mean)
    scales = Conv3D(filters=out_dims // reduction_ratio, kernel_size=1,
                    use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(5e-4))(scales)
    scales = Activation('relu')(scales)
    scales = Conv3D(filters=out_dims, kernel_size=1,
                    use_bias=False, kernel_initializer='he_normal', kernel_regularizer=l2(5e-4))(scales)
    scales = Activation('sigmoid')(scales)

    thres = multiply([x, scales])

    # Soft thresholding
    sub = keras.layers.subtract([residual_abs, thres])
    zeros = keras.layers.subtract([sub, sub])
    n_sub = keras.layers.maximum([sub, zeros])
    residual = keras.layers.multiply([Lambda(sign_backend, name="sign_non" + str(out_dims))(x_input), n_sub])

    return residual
Example #8
0
def resnet_MRF():
    # create model CNN
    """ResNet Version 1 Model builder
    Stacks of BN-ReLU-Conv1D
    # Arguments
        input_shape (tensor): shape of input image tensor, e.g. 200x1
        depth (int): number of core convolutional layers, 6n+2, e.g. 20, 32, 44 
        num_classes (int): number of classes, e.g.2 types of tissue parameters, 
        T1 and T2)
    # Returns
        model (Model): Keras model instance
    """

    seq_length = 1000
    inputs = Input(shape=(seq_length, 1))

    x = Conv1D(16,
               21,
               strides=1,
               padding='same',
               kernel_initializer='he_normal',
               kernel_regularizer=regularizers.l2(1e-4),
               name='block1_conv1')(inputs)
    x = Activation('relu')(x)

    x = Conv1D(16,
               21,
               strides=1,
               padding='same',
               kernel_initializer='he_normal',
               kernel_regularizer=regularizers.l2(1e-4),
               name='block1_conv2')(x)
    #    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = non_local_block(
        x, compression=1, mode='embedded'
    )  # mode: `embedded`, `gaussian`, `dot` or `concatenate`.

    x = MaxPooling1D(2)(x)
    x = resnet_layer(inputs=x,
                     num_filters=32,
                     kernel_size=1,
                     batch_normalization=False,
                     name='block2_conv1')
    y = resnet_layer(inputs=x,
                     num_filters=32,
                     kernel_size=21,
                     batch_normalization=False,
                     name='block2_conv2')
    x = add([x, y])
    x = non_local_block(
        x, compression=1, mode='embedded'
    )  # mode: `embedded`, `gaussian`, `dot` or `concatenate`.

    x = MaxPooling1D(2)(x)
    x = resnet_layer(inputs=x,
                     num_filters=64,
                     kernel_size=1,
                     batch_normalization=False,
                     name='block3_conv1')
    y = resnet_layer(inputs=x,
                     num_filters=64,
                     kernel_size=21,
                     batch_normalization=False,
                     name='block3_conv2')
    x = add([x, y])
    x = non_local_block(
        x, compression=1, mode='embedded'
    )  # mode: `embedded`, `gaussian`, `dot` or `concatenate`.

    x = MaxPooling1D(2)(x)
    x = resnet_layer(inputs=x,
                     num_filters=128,
                     kernel_size=1,
                     batch_normalization=False,
                     name='block4_conv1')
    y = resnet_layer(inputs=x,
                     num_filters=128,
                     kernel_size=21,
                     batch_normalization=False,
                     name='block4_conv2')
    x = add([x, y])
    x = non_local_block(
        x, compression=2, mode='embedded'
    )  # mode: `embedded`, `gaussian`, `dot` or `concatenate`.

    x = GlobalAveragePooling1D()(x)

    outputs = Dense(2,
                    kernel_initializer='he_normal',
                    kernel_constraint=maxnorm(3))(x)

    # Instantiate model.
    model = Model(inputs=inputs, outputs=outputs)

    return model
def Network_config(class_num=4,
                   epoch=200,
                   initial_epoch=0,
                   batch_size=32,
                   train_data=None,
                   train_label=None,
                   test_data=None,
                   test_label=None,
                   fold=0):
    adam = Adam(lr=0.005,
                beta_1=0.9,
                beta_2=0.999,
                epsilon=1e-08,
                decay=0.0009)
    sgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False)

    input_tensor = Input(shape=(224, 224, 3))

    #backbone
    base_model = VGG16(input_tensor=input_tensor,
                       weights='imagenet',
                       include_top=False)
    base_output = base_model.output

    #self-attention
    x = non_local.non_local_block(base_output,
                                  intermediate_dim=None,
                                  compression=2,
                                  mode='embedded',
                                  add_residual=False)
    x = BatchNormalization()(x)

    #channel-attention
    y = channel_attention.squeeze_excitation_layer(base_output,
                                                   512,
                                                   ratio=4,
                                                   concate=False)
    y = BatchNormalization()(y)

    #concat
    x = concatenate([base_output, x], axis=3)
    x = concatenate([x, y], axis=3)

    # spp
    gap = GlobalAveragePooling2D()(x)
    x = Flatten()(x)
    x = concatenate([gap, x])
    x = Dense(512, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dense(512, activation='relu')(x)
    x = BatchNormalization()(x)
    predict = Dense(class_num, activation='softmax')(x)
    model = Model(inputs=input_tensor, outputs=predict)

    for layer in (base_model.layers):
        layer.trainable = False

    for l in model.layers:
        print(l.name)

    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=[keras.metrics.categorical_accuracy])
    model.summary()

    tools.create_directory('./final/')
    weights_file = './final/' + str(
        fold
    ) + '-weights.{epoch:02d}-{categorical_accuracy:.4f}-{val_loss:.4f}-{val_categorical_accuracy:.4f}.h5'
    csv_file = './final/record.csv'
    lr_reducer = ReduceLROnPlateau(monitor='categorical_accuracy',
                                   factor=0.2,
                                   cooldown=0,
                                   patience=2,
                                   min_lr=0.5e-6)
    early_stopper = EarlyStopping(monitor='val_categorical_accuracy',
                                  min_delta=1e-4,
                                  patience=30)

    model_checkpoint = ModelCheckpoint(weights_file,
                                       monitor='val_categorical_accuracy',
                                       save_best_only=True,
                                       verbose=1,
                                       save_weights_only=True,
                                       mode='max')
    tensorboard = TensorBoard(log_dir='./logs/',
                              histogram_freq=0,
                              batch_size=8,
                              write_graph=True,
                              write_grads=True,
                              write_images=True,
                              embeddings_freq=0,
                              embeddings_layer_names=None,
                              embeddings_metadata=None)
    CSV_record = CSVLogger(csv_file, separator=',', append=True)

    callbacks = [
        lr_reducer, early_stopper, model_checkpoint, tensorboard, CSV_record
    ]
    gc.disable()
    model.fit_generator(
        generator=tools.batch_generator(np.array(train_data),
                                        np.array(train_label), batch_size,
                                        True, class_num, True),
        steps_per_epoch=int(len(train_label) / batch_size) - 1,
        max_q_size=20,
        initial_epoch=initial_epoch,
        epochs=epoch,
        verbose=1,
        callbacks=callbacks,
        validation_data=tools.batch_generator(np.array(test_data),
                                              np.array(test_label), batch_size,
                                              True, class_num, False),
        validation_steps=int(len(test_label) / batch_size) - 1,
        class_weight='auto')

    #confusion matrix
    all_y_pred = []
    all_y_true = []
    for test_data_batch, test_label_batch in tools.batch_generator_confusion_matrix(
            np.array(test_data), np.array(test_label), batch_size, True,
            class_num):
        y_pred = model.predict(test_data_batch, batch_size)
        y_true = test_label_batch
        for y_p in y_pred:
            all_y_pred.append(np.where(y_p == max(y_p))[0][0])
        for y_t in y_true:
            all_y_true.append(np.where(y_t == max(y_t))[0][0])
    confusion = confusion_matrix(y_true=all_y_true, y_pred=all_y_pred)
    print(confusion)
    f = open('confusion_matrix.txt', 'a+')
    f.write(str(all_y_true) + "\n")
    f.write(str(all_y_pred) + "\n")
    f.write(str(confusion) + '\n')
    f.close()
    gc.enable()
Example #10
0
def get_cnn_model(params):
    """
    Load base CNN model and add metadata fusion layers if 'use_metadata' is set in params.py
    :param params: global parameters, used to find location of the dataset and json file
    :return model: CNN model with or without depending on params
    """

    input_tensor = Input(shape=(params.target_img_size[0],
                                params.target_img_size[1],
                                params.num_channels))
    baseModel = densenet.DenseNetImageNet161(
        input_shape=(params.target_img_size[0], params.target_img_size[1],
                     params.num_channels),
        include_top=False,
        input_tensor=input_tensor)

    modelStruct = baseModel.layers[-1].output

    if params.use_nlm:
        modelStruct = baseModel.layers[-2].output
        modelStruct = non_local_block(modelStruct,
                                      computation_compression=1,
                                      mode='embedded')
        modelStruct = Conv2D(params.cnn_lstm_layer_length, [3, 3],
                             name='conv_nlm')(modelStruct)
        modelStruct = Flatten()(modelStruct)
        modelStruct = Dense(params.cnn_lstm_layer_length,
                            activation='relu',
                            name='fc_nlm')(modelStruct)
        modelStruct = Dropout(0.5)(modelStruct)

    if params.use_spp:
        modelStruct = baseModel.layers[-2].output
        modelStruct = SpatialPyramidPooling([1, 2, 4], name='spp')(modelStruct)
        modelStruct = Dense(params.cnn_lstm_layer_length,
                            activation='relu',
                            name='fc_spp')(modelStruct)
        modelStruct = Dropout(0.5)(modelStruct)

    if params.use_deform:
        modelStruct = baseModel.layers[-2].output
        modelStruct = ConvOffset2D(params.cnn_lstm_layer_length,
                                   name='deform')(modelStruct)
        modelStruct = Conv2D(params.cnn_lstm_layer_length, [3, 3],
                             name='conv_deform')(modelStruct)
        modelStruct = Flatten()(modelStruct)
        modelStruct = Dense(params.cnn_lstm_layer_length,
                            activation='relu',
                            name='fc_deform')(modelStruct)
        modelStruct = Dropout(0.5)(modelStruct)

    if params.use_metadata:
        auxiliary_input = Input(shape=(params.metadata_length, ),
                                name='aux_input')
        modelStruct = merge([modelStruct, auxiliary_input], 'concat')

    modelStruct = Dense(params.cnn_last_layer_length,
                        activation='relu',
                        name='fc1')(modelStruct)
    modelStruct = Dropout(0.5)(modelStruct)
    modelStruct = Dense(params.cnn_last_layer_length,
                        activation='relu',
                        name='fc2')(modelStruct)
    modelStruct = Dropout(0.5)(modelStruct)
    predictions = Dense(params.num_labels, activation='softmax')(modelStruct)

    if not params.use_metadata:
        model = Model(input=[baseModel.input], output=predictions)
    else:
        model = Model(input=[baseModel.input, auxiliary_input],
                      output=predictions)

    for i, layer in enumerate(model.layers):
        layer.trainable = True

    return model
Example #11
0
def NON_LOCAL_MIXTURE_OLD(FLATTENED, LIDAR_TYPE):
    if (LIDAR_TYPE == 'CENTERED'):
        if (FLATTENED):
            input_lid = Input(shape=(67, 67, 1))
        else:
            input_lid = Input(shape=(67, 67, 10))
    elif (LIDAR_TYPE == 'ABSOLUTE'):
        if (FLATTENED):
            input_lid = Input(shape=(20, 200, 1))
        else:
            input_lid = Input(shape=(20, 200, 10))
    elif (LIDAR_TYPE == 'ABSOLUTE_LARGE'):
        if (FLATTENED):
            input_lid = Input(shape=(60, 330, 1))
        else:
            input_lid = Input(shape=(60, 330, 10))
    noisy_input_lid = GaussianNoise(0.01)(input_lid)
    layer = Conv2D(5,
                   kernel_size=(5, 5),
                   activation='relu',
                   padding="SAME",
                   kernel_initializer=initializers.HeUniform)(noisy_input_lid)
    layer = Conv2D(5,
                   kernel_size=(5, 5),
                   activation='relu',
                   padding="SAME",
                   kernel_initializer=initializers.HeUniform)(layer)
    layer = Conv2D(5,
                   kernel_size=(5, 5),
                   strides=2,
                   activation='relu',
                   padding="SAME",
                   kernel_initializer=initializers.HeUniform)(layer)
    layer = Conv2D(5,
                   kernel_size=(5, 5),
                   activation='relu',
                   padding="SAME",
                   kernel_initializer=initializers.HeUniform)(layer)
    layer_input = Conv2D(5,
                         kernel_size=(5, 5),
                         strides=2,
                         activation='relu',
                         padding="SAME",
                         kernel_initializer=initializers.HeUniform)(layer)
    non_local_layer = non_local_block(layer_input, intermediate_dim=2)
    layer = Conv2D(1,
                   kernel_size=(3, 3),
                   strides=(1, 2),
                   activation='relu',
                   padding="SAME",
                   kernel_initializer=initializers.HeUniform)(non_local_layer)
    layer = Flatten()(layer)
    out_lid = Dense(16, activation='relu')(layer)
    '''GPS branch'''
    input_coord = Input(shape=(3))
    noisy_input_coord = GaussianNoise(0.002)(input_coord)
    '''Concatenation'''
    concatenated = concatenate([out_lid, noisy_input_coord])
    reg_val = 0
    layer = Dense(64, activation='relu')(concatenated)
    layer = Dense(64, activation='relu')(layer)
    layer = Dense(64, activation='relu')(layer)
    predictions = Dense(256, activation='softmax')(layer)
    architecture = Model(inputs=[input_lid, input_coord], outputs=predictions)
    return architecture
Example #12
0
        super(CustomModelCheckpoint, self).__init__()

        self.save_model = model_parallel
        self.path = path
        self.nb_epoch = 0

    def on_epoch_end(self, epoch, logs=None):
        self.nb_epoch += 1
        self.save_model.save(self.path + str(self.nb_epoch) + '.hdf5')


i3d = i3d_modified(weights = 'rgb_imagenet_and_kinetics')
model_branch = i3d.i3d_flattened(num_classes = num_classes)
model_branch.load_weights('/data/stars/user/sdas/PhD_work/ICCV_2019/models/epoch_full_body_NTU_CS.hdf5')
model_i3d = Model(inputs = model_branch.input, outputs = model_branch.get_layer('Mixed_5c').output)
x = non_local_block(model_i3d.output, compression=2, mode='embedded')
#x = non_local_block(x, compression=2, mode='embedded')
#x = non_local_block(x, compression=2, mode='embedded')
#x = non_local_block(x, compression=2, mode='embedded')
#x = non_local_block(x, compression=2, mode='embedded')
x = AveragePooling3D((2, 7, 7), strides=(1, 1, 1), padding='valid', name='global_avg_pool'+'second')(x)
x = Dropout(0.0)(x)
x = conv3d_bn(x, num_classes, 1, 1, 1, padding='same', use_bias=True, use_activation_fn=False, use_bn=False, name='Conv3d_6a_1x1'+'second')
x = Flatten(name='flatten'+'second')(x)
predictions = Dense(num_classes, activation='softmax', name='softmax'+'second')(x)
model = Model(inputs=model_branch.input, outputs=predictions, name = 'i3d_nonlocal')
optim = SGD(lr = 0.01, momentum = 0.9)
model.compile(loss = 'categorical_crossentropy', optimizer = optim, metrics = ['accuracy'])

#model = load_model("../weights3/epoch11.hdf5")
# Callbacks