Пример #1
0
def get_red30_model():
    """
    Define the model architecture

    Return the model
    """

    inputs = Input(shape=(256, 256, 2))

    enc_conv0 = Conv2D(32, (3, 3),
                       padding="same",
                       activation='relu',
                       kernel_initializer="he_normal")(inputs)
    enc_conv1 = Conv2D(32, (3, 3), padding="SAME",
                       activation='relu')(enc_conv0)
    max_pool1 = MaxPooling2D((2, 2))(enc_conv1)

    enc_conv2 = Conv2D(32, (3, 3), padding="SAME",
                       activation='relu')(max_pool1)
    max_pool2 = MaxPooling2D((2, 2))(enc_conv2)

    enc_conv3 = Conv2D(32, (3, 3), padding="SAME",
                       activation='relu')(max_pool2)
    max_pool3 = MaxPooling2D((2, 2))(enc_conv3)

    enc_conv4 = Conv2D(32, (3, 3), padding="SAME",
                       activation='relu')(max_pool3)
    max_pool4 = MaxPooling2D((2, 2))(enc_conv4)

    enc_conv5 = Conv2D(32, (3, 3), padding="SAME",
                       activation='relu')(max_pool4)
    max_pool5 = MaxPooling2D((2, 2))(enc_conv5)

    enc_conv6 = Conv2D(32, (3, 3), padding="SAME",
                       activation='relu')(max_pool5)

    up_samp5 = UpSampling2D((2, 2))(enc_conv6)
    concat_5 = concatenate([up_samp5, max_pool4])

    dec_conv5a = Conv2D(64, (3, 3), padding="SAME",
                        activation='relu')(concat_5)
    dec_conv5b = Conv2D(64, (3, 3), padding='SAME',
                        activation='relu')(dec_conv5a)

    up_samp4 = UpSampling2D((2, 2))(dec_conv5b)
    concat_4 = concatenate([up_samp4, max_pool3])

    dec_conv4a = Conv2D(64, (3, 3), padding="SAME",
                        activation='relu')(concat_4)
    dec_conv4b = Conv2D(64, (3, 3), padding='SAME',
                        activation='relu')(dec_conv4a)

    up_samp3 = UpSampling2D((2, 2))(dec_conv4b)
    concat_3 = concatenate([up_samp3, max_pool2])

    dec_conv3a = Conv2D(64, (3, 3), padding="SAME",
                        activation='relu')(concat_3)
    dec_conv3b = Conv2D(64, (3, 3), padding='SAME',
                        activation='relu')(dec_conv3a)

    up_samp2 = UpSampling2D((2, 2))(dec_conv3b)
    concat_2 = concatenate([up_samp2, max_pool1])

    dec_conv2a = Conv2D(64, (3, 3), padding="SAME",
                        activation='relu')(concat_2)
    dec_conv2b = Conv2D(64, (3, 3), padding='SAME',
                        activation='relu')(dec_conv2a)

    up_samp1 = UpSampling2D((2, 2))(dec_conv2b)
    concat_1 = concatenate([up_samp1, inputs])

    dec_conv1a = Conv2D(64, (3, 3), padding="SAME",
                        activation='relu')(concat_1)
    dec_conv1b = Conv2D(32, (3, 3), padding='SAME',
                        activation='relu')(dec_conv1a)
    dec_conv1c = Conv2D(2, (3, 3), padding='SAME',
                        activation='linear')(dec_conv1b)

    model = Model(inputs=inputs, outputs=dec_conv1c)
    return model
Пример #2
0
# Supress warnings about wrong compilation of TensorFlow.
tf.logging.set_verbosity(tf.logging.ERROR)

latent_size = 100

## G

z = Input(shape=[latent_size], name='noise')
# 1 x 1 x 256
G = Dense(7 * 7 * 256)(z)
G = BatchNormalization(momentum=0.9)(G)
G = LeakyReLU(alpha=0.2)(G)
G = Reshape((7, 7, 256))(G)
# 7 x 7 x 256
G = UpSampling2D()(G)
G = Conv2D(128, (5, 5), padding='same')(G)
G = BatchNormalization(momentum=0.9)(G)
G = LeakyReLU(alpha=0.2)(G)
# 14 x 14 x 128
G = UpSampling2D()(G)
G = Conv2D(64, (5, 5), padding='same')(G)
G = BatchNormalization(momentum=0.9)(G)
G = LeakyReLU(alpha=0.2)(G)
# 28 x 28 x 64
G = Conv2D(32, (5, 5), padding='same')(G)
G = BatchNormalization(momentum=0.9)(G)
G = LeakyReLU(alpha=0.2)(G)
# 28 x 28 x 32
G = Conv2D(1, (5, 5), padding='same', activation='tanh')(G)
# 28 x 28 x 1
Пример #3
0
plt.savefig('saves/cdA_gblur_noisy_vis.png')

################# Constructing the Model ######################
input_img = Input(
    shape=(32, 32,
           3))  # adapt this if using `channels_first` image data format

x = Conv2D(32, (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same')(x)

# at this point the representation is (7, 7, 32)

x = Conv2D(32, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(3, (3, 3), activation='sigmoid', padding='same')(x)

autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')

########################## Training the model ##########################
from keras.callbacks import TensorBoard
import sys

#sys.stdout = open('cdA_gblur_output.txt', 'w')
autoencoder.fit(x_train_noisy,
                x_train,
                epochs=100,
def create_yolov3_model(nb_class, anchors, max_box_per_image, max_grid,
                        batch_size, warmup_batches, ignore_thresh, grid_scales,
                        obj_scale, noobj_scale, xywh_scale, class_scale):
    input_image = Input(shape=(None, None, 3))  # net_h, net_w, 3
    true_boxes = Input(shape=(1, 1, 1, max_box_per_image, 4))
    true_yolo_1 = Input(
        shape=(None, None, len(anchors) // 4,
               4 + 1 + nb_class))  # grid_h, grid_w, nb_anchor, 5+nb_class
    true_yolo_2 = Input(
        shape=(None, None, len(anchors) // 4,
               4 + 1 + nb_class))  # grid_h, grid_w, nb_anchor, 5+nb_class

    # Layer  0 => 4
    x = _conv_block(input_image, [{
        'filter': 16,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 0
    }],
                    do_skip=False)
    x = MaxPooling2D(pool_size=(2, 2),
                     strides=2,
                     padding='same',
                     data_format=None)(x)
    # Layer  5 => 8
    x = _conv_block(x, [{
        'filter': 32,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 2
    }],
                    do_skip=False)
    x = MaxPooling2D(pool_size=(2, 2),
                     strides=2,
                     padding='same',
                     data_format=None)(x)
    #   Layer  9 => 11
    x = _conv_block(x, [{
        'filter': 64,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 4
    }],
                    do_skip=False)
    x = MaxPooling2D(pool_size=(2, 2),
                     strides=2,
                     padding='same',
                     data_format=None)(x)
    #Layer 12 => 15
    x = _conv_block(x, [{
        'filter': 128,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 6
    }],
                    do_skip=False)
    x = MaxPooling2D(pool_size=(2, 2),
                     strides=2,
                     padding='same',
                     data_format=None)(x)

    x = _conv_block(x, [{
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 8
    }],
                    do_skip=False)
    skip_8 = x
    x = MaxPooling2D(pool_size=(2, 2),
                     strides=2,
                     padding='same',
                     data_format=None)(x)

    x = _conv_block(x, [{
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 10
    }],
                    do_skip=False)
    x = MaxPooling2D(pool_size=(2, 2),
                     strides=1,
                     padding='same',
                     data_format=None)(x)

    x = _conv_block(x, [{
        'filter': 1024,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 12
    }],
                    do_skip=False)

    x = _conv_block(x, [{
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 13
    }],
                    do_skip=False)
    pred_yolo_1 = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 14}, \
                        {'filter': (3*(5+nb_class)), 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 15}], do_skip=False)

    loss_yolo_1 = YoloLayer(
        anchors[6:12], [1 * num
                        for num in max_grid], batch_size, warmup_batches,
        ignore_thresh, grid_scales[0], obj_scale, noobj_scale, xywh_scale,
        class_scale)([input_image, pred_yolo_1, true_yolo_1, true_boxes])

    # Layer 83 => 86
    x = _conv_block(x, [{
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 18
    }],
                    do_skip=False)
    x = UpSampling2D(2)(x)
    x = concatenate([x, skip_8])

    # Layer 87 => 91
    pred_yolo_2 = _conv_block(x, [{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 21},\
                        {'filter': (3*(5+nb_class)), 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 22}], do_skip=False)

    # Layer 92 => 94

    loss_yolo_2 = YoloLayer(
        anchors[:6], [2 * num for num in max_grid], batch_size, warmup_batches,
        ignore_thresh, grid_scales[1], obj_scale, noobj_scale, xywh_scale,
        class_scale)([input_image, pred_yolo_2, true_yolo_2, true_boxes])

    # Layer 95 => 98

    train_model = Model([input_image, true_boxes, true_yolo_1, true_yolo_2],
                        [loss_yolo_1, loss_yolo_2])
    infer_model = Model(input_image, [pred_yolo_1, pred_yolo_2])

    return [train_model, infer_model]
Пример #5
0
def build_model():
    input_tensor = Input((None, None, 1))

    x = Conv2D(24, 5, strides=2, padding='same')(input_tensor)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(64, 3, strides=2, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(128, 3, strides=1, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(256, 3, strides=2, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(512, 3, strides=1, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(512, 3, strides=2, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(
        256,
        3,
        strides=1,
        padding='same',
    )(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = UpSampling2D(2)(x)
    x = Conv2D(128, 3, strides=1, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(64, 3, strides=1, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = UpSampling2D(2)(x)
    x = Conv2D(32, 3, strides=1, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(16, 3, strides=1, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = UpSampling2D(2)(x)
    x = Conv2D(8, 3, strides=1, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(4, 3, strides=1, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = UpSampling2D(2)(x)
    x = Conv2D(2, 3, strides=1, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(1, 3, strides=1, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    output_tensor = Conv2D(1, 3, padding='same', activation='sigmoid')(x)

    return Model(input_tensor, output_tensor)
Пример #6
0
def build_retinanet(img_shape=(3, 416, 416),
                    n_classes=80,
                    n_priors=5,
                    load_pretrained=False,
                    freeze_layers_from='base_model'):

    # RetinaNet model is only implemented for TF backend
    assert (K.backend() == 'tensorflow')

    inputs = Input(shape=img_shape)

    K.set_image_dim_ordering('th')

    backbone_net = keras_resnet.models.ResNet50(inputs,
                                                include_top=False,
                                                freeze_bn=True)
    _, C3, C4, C5 = backbone_net.outputs  # we ignore C2

    K.set_image_dim_ordering('tf')

    C3 = keras.layers.Permute((3, 2, 1))(C3)
    C4 = keras.layers.Permute((3, 2, 1))(C4)
    C5 = keras.layers.Permute((3, 2, 1))(C5)

    # compute pyramid features as per https://arxiv.org/abs/1708.02002
    features = create_pyramid_features(C3, C4, C5)

    # create regression and classification submodels
    bbox_subnet_model = regression_submodel(n_priors)
    class_subnet_model = classification_submodel(n_classes, n_priors)

    #Concatenation of pyramid levels
    # class_subnet
    class_subnet = [class_subnet_model(f) for f in features]

    class_subnet0 = Conv2D(filters=n_priors * n_classes,
                           kernel_size=(1, 1),
                           strides=(4, 4))(class_subnet[0])
    class_subnet1 = Conv2D(filters=n_priors * n_classes,
                           kernel_size=(1, 1),
                           strides=(2, 2))(class_subnet[1])
    class_subnet2 = class_subnet[2]
    class_subnet3 = UpSampling2D(size=(2, 2),
                                 name='class_upsample')(class_subnet[3])

    class_subnet = [class_subnet0, class_subnet1, class_subnet2, class_subnet3]
    class_subnet = Concatenate(axis=3,
                               name='class_subnet_outputs')(class_subnet)
    class_subnet = Conv2D(filters=n_priors * n_classes,
                          kernel_size=(1, 1),
                          strides=(1, 1))(class_subnet)

    # bbox_subnet
    bbox_subnet = [bbox_subnet_model(f) for f in features]

    bbox_subnet0 = Conv2D(filters=n_priors * (4 + 1),
                          kernel_size=(1, 1),
                          strides=(4, 4))(bbox_subnet[0])
    bbox_subnet1 = Conv2D(filters=n_priors * (4 + 1),
                          kernel_size=(1, 1),
                          strides=(2, 2))(bbox_subnet[1])
    bbox_subnet2 = bbox_subnet[2]
    bbox_subnet3 = UpSampling2D(size=(2, 2),
                                name='bbox_upsample')(bbox_subnet[3])

    bbox_subnet = [bbox_subnet0, bbox_subnet1, bbox_subnet2, bbox_subnet3]
    bbox_subnet = Concatenate(axis=3, name='bbox_subnet_outputs')(bbox_subnet)
    bbox_subnet = Conv2D(filters=n_priors * (4 + 1),
                         kernel_size=(1, 1),
                         strides=(1, 1))(bbox_subnet)

    # Output
    #  - n_priors: anchors
    #  - 4: offsets
    #  - n_classes: probability for each class
    #  - 1: confidence of having an object
    output = Concatenate(axis=3)([class_subnet, bbox_subnet])

    K.set_image_dim_ordering('th')

    output = keras.layers.Permute((3, 2, 1))(output)

    model = Model(inputs=inputs, outputs=output)

    return model
Пример #7
0
def create_yolov3_model(nb_class, anchors, max_box_per_image, max_grid,
                        batch_size, warmup_batches, ignore_thresh, grid_scales,
                        obj_scale, noobj_scale, xywh_scale, class_scale):
    input_image = Input(shape=(None, None, 3))  # net_h, net_w, 3
    true_boxes = Input(shape=(1, 1, 1, max_box_per_image, 4))
    true_yolo_1 = Input(
        shape=(None, None, len(anchors) // 6,
               4 + 1 + nb_class))  # grid_h, grid_w, nb_anchor, 5+nb_class
    true_yolo_2 = Input(
        shape=(None, None, len(anchors) // 6,
               4 + 1 + nb_class))  # grid_h, grid_w, nb_anchor, 5+nb_class
    true_yolo_3 = Input(
        shape=(None, None, len(anchors) // 6,
               4 + 1 + nb_class))  # grid_h, grid_w, nb_anchor, 5+nb_class

    # Layer  0 => 4
    x = _conv_block(input_image, [{
        'filter': 32,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 0
    }, {
        'filter': 64,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 1
    }, {
        'filter': 32,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 2
    }, {
        'filter': 64,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 3
    }])

    # Layer  5 => 8
    x = _conv_block(x, [{
        'filter': 128,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 5
    }, {
        'filter': 64,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 6
    }, {
        'filter': 128,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 7
    }])

    # Layer  9 => 11
    x = _conv_block(x, [{
        'filter': 64,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 9
    }, {
        'filter': 128,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 10
    }])

    # Layer 12 => 15
    x = _conv_block(x, [{
        'filter': 256,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 12
    }, {
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 13
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 14
    }])

    # Layer 16 => 36
    for i in range(7):
        x = _conv_block(x, [{
            'filter': 128,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 16 + i * 3
        }, {
            'filter': 256,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 17 + i * 3
        }])

    skip_36 = x

    # Layer 37 => 40
    x = _conv_block(x, [{
        'filter': 512,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 37
    }, {
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 38
    }, {
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 39
    }])

    # Layer 41 => 61
    for i in range(7):
        x = _conv_block(x, [{
            'filter': 256,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 41 + i * 3
        }, {
            'filter': 512,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 42 + i * 3
        }])

    skip_61 = x

    # Layer 62 => 65
    x = _conv_block(x, [{
        'filter': 1024,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 62
    }, {
        'filter': 512,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 63
    }, {
        'filter': 1024,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 64
    }])

    # Layer 66 => 74
    for i in range(3):
        x = _conv_block(x, [{
            'filter': 512,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 66 + i * 3
        }, {
            'filter': 1024,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 67 + i * 3
        }])

    # Layer 75 => 79
    x = _conv_block(x, [{
        'filter': 512,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 75
    }, {
        'filter': 1024,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 76
    }, {
        'filter': 512,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 77
    }, {
        'filter': 1024,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 78
    }, {
        'filter': 512,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 79
    }],
                    do_skip=False)

    # Layer 80 => 82
    pred_yolo_1 = _conv_block(x, [{
        'filter': 1024,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 80
    }, {
        'filter': (3 * (5 + nb_class)),
        'kernel': 1,
        'stride': 1,
        'bnorm': False,
        'leaky': False,
        'layer_idx': 81
    }],
                              do_skip=False)
    loss_yolo_1 = YoloLayer(
        anchors[12:], [1 * num
                       for num in max_grid], batch_size, warmup_batches,
        ignore_thresh, grid_scales[0], obj_scale, noobj_scale, xywh_scale,
        class_scale)([input_image, pred_yolo_1, true_yolo_1, true_boxes])

    # Layer 83 => 86
    x = _conv_block(x, [{
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 84
    }],
                    do_skip=False)
    x = UpSampling2D(2)(x)
    x = concatenate([x, skip_61])

    # Layer 87 => 91
    x = _conv_block(x, [{
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 87
    }, {
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 88
    }, {
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 89
    }, {
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 90
    }, {
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 91
    }],
                    do_skip=False)

    # Layer 92 => 94
    pred_yolo_2 = _conv_block(x, [{
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 92
    }, {
        'filter': (3 * (5 + nb_class)),
        'kernel': 1,
        'stride': 1,
        'bnorm': False,
        'leaky': False,
        'layer_idx': 93
    }],
                              do_skip=False)
    loss_yolo_2 = YoloLayer(
        anchors[6:12], [2 * num
                        for num in max_grid], batch_size, warmup_batches,
        ignore_thresh, grid_scales[1], obj_scale, noobj_scale, xywh_scale,
        class_scale)([input_image, pred_yolo_2, true_yolo_2, true_boxes])

    # Layer 95 => 98
    x = _conv_block(x, [{
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 96
    }],
                    do_skip=False)
    x = UpSampling2D(2)(x)
    x = concatenate([x, skip_36])

    # Layer 99 => 106
    pred_yolo_3 = _conv_block(x, [{
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 99
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 100
    }, {
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 101
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 102
    }, {
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 103
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 104
    }, {
        'filter': (3 * (5 + nb_class)),
        'kernel': 1,
        'stride': 1,
        'bnorm': False,
        'leaky': False,
        'layer_idx': 105
    }],
                              do_skip=False)
    loss_yolo_3 = YoloLayer(
        anchors[:6], [4 * num for num in max_grid], batch_size, warmup_batches,
        ignore_thresh, grid_scales[2], obj_scale, noobj_scale, xywh_scale,
        class_scale)([input_image, pred_yolo_3, true_yolo_3, true_boxes])

    train_model = Model(
        [input_image, true_boxes, true_yolo_1, true_yolo_2, true_yolo_3],
        [loss_yolo_1, loss_yolo_2, loss_yolo_3])
    infer_model = Model(input_image, [pred_yolo_1, pred_yolo_2, pred_yolo_3])

    return [train_model, infer_model]
Пример #8
0
def init_models(autoencoder_weights, classifier_weights):

    input_shape = Input(shape=(90, 80, 1))

    # Encoder Layers
    conv_1 = Conv2D(16, (2, 2), activation='relu', padding='same')
    max_pool_1 = MaxPooling2D((3, 2), padding='same')
    conv_2 = Conv2D(32, (2, 2), activation='relu', padding='same')
    max_pool_2 = MaxPooling2D((3, 2), padding='same')
    conv_3 = Conv2D(64, (2, 2), activation='relu', padding='same')
    max_pool_3 = MaxPooling2D((2, 2), padding='same')
    conv_4 = Conv2D(128, (2, 2), activation='relu', padding='same')

    encoded = Flatten(name='encoder')

    # Bottleneck
    dense_1 = Dense(256, name='bottleneck')
    dense_2 = Dense(6400)
    reshape = Reshape((5, 10, 128))

    # Decoder Layers
    conv_5 = Conv2D(128, (2, 2), activation='relu', padding='same')
    up_samp_1 = UpSampling2D((2, 2))
    conv_6 = Conv2D(64, (2, 2), activation='relu', padding='same')
    up_samp_2 = UpSampling2D((3, 2))
    conv_7 = Conv2D(32, (2, 2), activation='relu', padding='same')
    up_samp_3 = UpSampling2D((3, 2))

    decoded = Conv2D(1, (3, 3),
                     activation='sigmoid',
                     padding='same',
                     name='decoder')

    ####################################################################################################
    #-----------------------------------------Full Autoencoder-----------------------------------------#
    ####################################################################################################

    autoencoder = conv_1(input_shape)
    autoencoder = max_pool_1(autoencoder)
    autoencoder = conv_2(autoencoder)
    autoencoder = max_pool_2(autoencoder)
    autoencoder = conv_3(autoencoder)
    autoencoder = max_pool_3(autoencoder)
    autoencoder = conv_4(autoencoder)
    autoencoder = encoded(autoencoder)
    autoencoder = dense_1(autoencoder)
    autoencoder = dense_2(autoencoder)
    autoencoder = reshape(autoencoder)
    autoencoder = conv_5(autoencoder)
    autoencoder = up_samp_1(autoencoder)
    autoencoder = conv_6(autoencoder)
    autoencoder = up_samp_2(autoencoder)
    autoencoder = conv_7(autoencoder)
    autoencoder = up_samp_3(autoencoder)
    autoencoder = decoded(autoencoder)

    autoencoder = Model(inputs=input_shape, outputs=autoencoder)

    ####################################################################################################
    #------------------------------------------Encoder-------------------------------------------------#
    ####################################################################################################

    encoder = conv_1(input_shape)
    encoder = max_pool_1(encoder)
    encoder = conv_2(encoder)
    encoder = max_pool_2(encoder)
    encoder = conv_3(encoder)
    encoder = max_pool_3(encoder)
    encoder = conv_4(encoder)
    encoder = encoded(encoder)
    encoder = dense_1(encoder)

    encoder_model = Model(inputs=input_shape, outputs=encoder)

    ####################################################################################################
    #------------------------------------------Decoder------------------------------------------------#
    ####################################################################################################

    bottleneck_input_shape = Input(shape=(256, ))
    decoder_model = dense_2(bottleneck_input_shape)
    decoder_model = reshape(decoder_model)
    decoder_model = conv_5(decoder_model)
    decoder_model = up_samp_1(decoder_model)
    decoder_model = conv_6(decoder_model)
    decoder_model = up_samp_2(decoder_model)
    decoder_model = conv_7(decoder_model)
    decoder_model = up_samp_3(decoder_model)
    decoder_model = decoded(decoder_model)

    decoder_model = Model(inputs=bottleneck_input_shape, outputs=decoder_model)

    # Initializes the layers with weights
    autoencoder.load_weights(autoencoder_weights)

    ####################################################################################################
    #------------------------------------------Classifier----------------------------------------------#
    ####################################################################################################

    # Layers
    c_conv_1 = Conv2D(8, kernel_size=(2, 2), activation='relu')
    c_max_pool_1 = MaxPooling2D(pool_size=(2, 2))
    c_drop_1 = Dropout(0.2)
    c_conv_2 = Conv2D(16, kernel_size=(2, 2), activation='relu')
    c_max_pool_2 = MaxPooling2D(pool_size=(2, 2))
    c_drop_2 = Dropout(0.2)
    c_conv_3 = Conv2D(32, kernel_size=(2, 2), activation='relu')
    c_max_pool_3 = MaxPooling2D(pool_size=(2, 2))
    c_drop_3 = Dropout(0.2)
    c_flatten = Flatten()
    c_dense_1 = Dense(512, activation='relu')
    c_drop_4 = Dropout(0.2)
    c_dense_2 = Dense(256, activation='relu')
    c_drop_5 = Dropout(0.2)
    c_dense_3 = Dense(128, activation='relu')
    c_drop_6 = Dropout(0.2)
    c_dense_output = Dense(35, activation='softmax')

    # Model
    classifier = c_conv_1(input_shape)
    classifier = c_max_pool_1(classifier)
    classifier = c_drop_1(classifier)
    classifier = c_conv_2(classifier)
    classifier = c_max_pool_2(classifier)
    classifier = c_drop_2(classifier)
    classifier = c_conv_3(classifier)
    classifier = c_max_pool_3(classifier)
    classifier = c_drop_3(classifier)
    classifier = c_flatten(classifier)

    classifier = c_dense_1(classifier)
    classifier = c_drop_4(classifier)
    classifier = c_dense_2(classifier)
    classifier = c_drop_5(classifier)
    classifier = c_dense_3(classifier)
    classifier = c_drop_6(classifier)
    classifier = c_dense_output(classifier)

    classifier = Model(inputs=input_shape, outputs=classifier)

    # Initializes the classifier's layers with weights
    classifier.load_weights(classifier_weights)

    combined_model = dense_2(bottleneck_input_shape)
    combined_model = reshape(combined_model)
    combined_model = conv_5(combined_model)
    combined_model = up_samp_1(combined_model)
    combined_model = conv_6(combined_model)
    combined_model = up_samp_2(combined_model)
    combined_model = conv_7(combined_model)
    combined_model = up_samp_3(combined_model)
    combined_model = decoded(combined_model)

    combined_model = c_conv_1(combined_model)
    combined_model = c_max_pool_1(combined_model)
    combined_model = c_drop_1(combined_model)
    combined_model = c_conv_2(combined_model)
    combined_model = c_max_pool_2(combined_model)
    combined_model = c_drop_2(combined_model)
    combined_model = c_conv_3(combined_model)
    combined_model = c_max_pool_3(combined_model)
    combined_model = c_drop_3(combined_model)
    combined_model = c_flatten(combined_model)

    combined_model = c_dense_1(combined_model)
    combined_model = c_drop_4(combined_model)
    combined_model = c_dense_2(combined_model)
    combined_model = c_drop_5(combined_model)
    combined_model = c_dense_3(combined_model)
    combined_model = c_drop_6(combined_model)
    combined_model = c_dense_output(combined_model)

    full_model = Model(inputs=bottleneck_input_shape, outputs=combined_model)

    return full_model, decoder_model
Пример #9
0
 def __init__(self, im_shape):
     img = Input(shape=(im_shape[0], im_shape[1], 1))
     [pr6, pr5, pr4, pr3, pr2, pr1] = DispNet(img)
     depth = UpSampling2D(name='y_pred')(pr1)
     self.model = Model(input=img, output=depth)
    shape = K.shape(x)
    return K.reshape(K.repeat(input, 4 * 4), (shape[0], 4, 4, 256))


vgg_output = Lambda(repeat_output)(vgg_output)

# freeze vgg16
for layer in vgg16.layers:
    layer.trainable = False

# concatenated net
merged = concatenate([vgg_output, main_output], axis=3)

last = Conv2D(128, (3, 3), padding="same")(merged)

last = UpSampling2D(size=(2, 2))(last)
last = Conv2D(64, (3, 3),
              padding="same",
              activation="relu",
              kernel_regularizer=regularizers.l2(0.01))(last)
last = Conv2D(64, (3, 3),
              padding="same",
              activation="relu",
              kernel_regularizer=regularizers.l2(0.01))(last)

last = UpSampling2D(size=(2, 2))(last)
last = Conv2D(32, (3, 3),
              padding="same",
              activation="relu",
              kernel_regularizer=regularizers.l2(0.01))(last)
last = Conv2D(2, (3, 3),
Пример #11
0
    def build_conv_max_pool_architecture(self):
        # Encoder
        e1 = Convolution2D(48,
                           4,
                           4,
                           activation='relu',
                           border_mode='same',
                           name='e1')(self.autoencoder_input)
        e2 = MaxPooling2D((2, 2), border_mode='same', name='e2')(e1)
        e3 = Convolution2D(64,
                           2,
                           2,
                           activation='relu',
                           border_mode='same',
                           name='e3')(e2)
        e4 = MaxPooling2D((3, 3), border_mode='same', name='e4')(e3)
        e5 = Dense(self.latent_shape, activation='relu',
                   name='e5')(flatten(e4))
        self.encoder_output = e5

        # Decoder layers
        d1 = Dense(3136, activation='relu', name='d1')
        d2 = Reshape((14, 14, 16), name='d2')
        d3 = Convolution2D(64,
                           2,
                           2,
                           activation='relu',
                           border_mode='same',
                           name='d3')
        d4 = UpSampling2D((3, 3), name='d4')
        d5 = Convolution2D(48,
                           4,
                           4,
                           activation='relu',
                           border_mode='same',
                           name='d5')
        d6 = UpSampling2D((2, 2), name='d6')
        d7 = Convolution2D(1,
                           4,
                           4,
                           activation='sigmoid',
                           border_mode='same',
                           name='d7')

        # Full autoencoder
        d1_full = d1(self.encoder_output)
        d2_full = d2(d1_full)
        d3_full = d3(d2_full)
        d4_full = d4(d3_full)
        d5_full = d5(d4_full)
        d6_full = d6(d5_full)
        d7_full = d7(d6_full)

        # Only decoding
        d1_decoder = d1(self.decoder_input)
        d2_decoder = d2(d1_decoder)
        d3_decoder = d3(d2_decoder)
        d4_decoder = d4(d3_decoder)
        d5_decoder = d5(d4_decoder)
        d6_decoder = d6(d5_decoder)
        d7_decoder = d7(d6_decoder)

        self.decoder_output = d7_decoder
        self.autoencoder_output = d7_full

        # MSE autoencoder reconstruction loss (with attention)
        reconstruction_loss = tf.pow(
            tf.multiply(
                tf.subtract(self.autoencoder_input, self.autoencoder_output),
                self.autoencoder_movement_focus_input), 2)
        mean_reconstruction_loss = tf.reduce_mean(reconstruction_loss)
        self.emulator_reconstruction_loss = tf.reduce_mean(tf.reshape(
            reconstruction_loss, (self.emulator_counts, 84 * 84)),
                                                           axis=-1)
        self.autoencoder_loss = mean_reconstruction_loss
Пример #12
0
x_test_small = np.array(x_test_small)

print("Successfuly generated appropriate data")

# prepare array for tensorflow
x_train = np.expand_dims(x_train, x_train.shape[-1])
x_train_small = np.expand_dims(x_train_small, x_train_small.shape[-1])
x_test = np.expand_dims(x_test, x_test.shape[-1])
x_test_small = np.expand_dims(x_test_small, x_test_small.shape[-1])

# Build model
input_img = Input(
    shape=(x_train_small.shape[1], x_train_small.shape[2],
           1))  # adapt this if using `channels_first` image data format
x = UpSampling2D((2, 2), interpolation='bilinear')(input_img)
x = Conv2D(64, (9, 9), activation='relu', padding='same')(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = Conv2D(1, (3, 3), activation='relu', padding='same')(x)

upsample = Model(input_img, x)
upsample.compile(optimizer='adadelta',
                 loss='mean_squared_error',
                 metrics=[PSNR])

#Train the model
upsample.fit(x_train_small,
             x_train,
             epochs=epochs,
             batch_size=batch_size,
             shuffle=True,
Пример #13
0
    def get_unet(self):

        # inputs = Input((self.img_rows, self.img_cols,1))
        inputs = Input((self.img_rows, self.img_cols, 3))
        '''
		unet with crop(because padding = valid) 

		conv1 = Conv2D(64, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(inputs)
		print "conv1 shape:",conv1.shape
		conv1 = Conv2D(64, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(conv1)
		print "conv1 shape:",conv1.shape
		crop1 = Cropping2D(cropping=((90,90),(90,90)))(conv1)
		print "crop1 shape:",crop1.shape
		pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
		print "pool1 shape:",pool1.shape

		conv2 = Conv2D(128, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(pool1)
		print "conv2 shape:",conv2.shape
		conv2 = Conv2D(128, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(conv2)
		print "conv2 shape:",conv2.shape
		crop2 = Cropping2D(cropping=((41,41),(41,41)))(conv2)
		print "crop2 shape:",crop2.shape
		pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
		print "pool2 shape:",pool2.shape

		conv3 = Conv2D(256, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(pool2)
		print "conv3 shape:",conv3.shape
		conv3 = Conv2D(256, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(conv3)
		print "conv3 shape:",conv3.shape
		crop3 = Cropping2D(cropping=((16,17),(16,17)))(conv3)
		print "crop3 shape:",crop3.shape
		pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
		print "pool3 shape:",pool3.shape

		conv4 = Conv2D(512, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(pool3)
		conv4 = Conv2D(512, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(conv4)
		drop4 = Dropout(0.5)(conv4)
		crop4 = Cropping2D(cropping=((4,4),(4,4)))(drop4)
		pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

		conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(pool4)
		conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(conv5)
		drop5 = Dropout(0.5)(conv5)

		up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
		merge6 = merge([crop4,up6], mode = 'concat', concat_axis = 3)
		conv6 = Conv2D(512, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(merge6)
		conv6 = Conv2D(512, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(conv6)

		up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
		merge7 = merge([crop3,up7], mode = 'concat', concat_axis = 3)
		conv7 = Conv2D(256, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(merge7)
		conv7 = Conv2D(256, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(conv7)

		up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
		merge8 = merge([crop2,up8], mode = 'concat', concat_axis = 3)
		conv8 = Conv2D(128, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(merge8)
		conv8 = Conv2D(128, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(conv8)

		up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
		merge9 = merge([crop1,up9], mode = 'concat', concat_axis = 3)
		conv9 = Conv2D(64, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(merge9)
		conv9 = Conv2D(64, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(conv9)
		conv9 = Conv2D(2, 3, activation = 'relu', padding = 'valid', kernel_initializer = 'he_normal')(conv9)
		'''

        conv1 = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(inputs)
        print("conv1 shape:", conv1.shape)
        conv1 = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv1)
        print("conv1 shape:", conv1.shape)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
        print("pool1 shape:", pool1.shape)

        conv2 = Conv2D(128,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(pool1)
        print("conv2 shape:", conv2.shape)
        conv2 = Conv2D(128,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv2)
        print("conv2 shape:", conv2.shape)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
        print("pool2 shape:", pool2.shape)

        conv3 = Conv2D(256,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(pool2)
        print("conv3 shape:", conv3.shape)
        conv3 = Conv2D(256,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv3)
        print("conv3 shape:", conv3.shape)
        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
        print("pool3 shape:", pool3.shape)

        conv4 = Conv2D(512,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(pool3)
        conv4 = Conv2D(512,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv4)
        drop4 = Dropout(0.5)(conv4)
        pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

        conv5 = Conv2D(1024,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(pool4)
        conv5 = Conv2D(1024,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv5)
        drop5 = Dropout(0.5)(conv5)

        up6 = Conv2D(512,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal')(
                         UpSampling2D(size=(2, 2))(drop5))
        merge6 = merge([drop4, up6], mode='concat', concat_axis=3)
        conv6 = Conv2D(512,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(merge6)
        conv6 = Conv2D(512,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv6)

        up7 = Conv2D(256,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal')(
                         UpSampling2D(size=(2, 2))(conv6))
        merge7 = merge([conv3, up7], mode='concat', concat_axis=3)
        conv7 = Conv2D(256,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(merge7)
        conv7 = Conv2D(256,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv7)

        up8 = Conv2D(128,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal')(
                         UpSampling2D(size=(2, 2))(conv7))
        merge8 = merge([conv2, up8], mode='concat', concat_axis=3)
        conv8 = Conv2D(128,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(merge8)
        conv8 = Conv2D(128,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv8)

        up9 = Conv2D(64,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal')(
                         UpSampling2D(size=(2, 2))(conv8))
        merge9 = merge([conv1, up9], mode='concat', concat_axis=3)
        conv9 = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(merge9)
        conv9 = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv9)
        conv9 = Conv2D(2,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv9)
        conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)

        model = Model(input=inputs, output=conv10)

        model.compile(optimizer=Adam(lr=1e-4),
                      loss='binary_crossentropy',
                      metrics=['accuracy'])

        return model
Пример #14
0
    def Network(self):
        inputs = Input(shape=(self.img_size[0], self.img_size[1], 1),
                       batch_shape=None)
        conv1 = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(inputs)
        conv1 = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

        conv2 = Conv2D(128,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(pool1)
        conv2 = Conv2D(128,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = Conv2D(256,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(pool2)
        conv3 = Conv2D(256,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv3)
        pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = Conv2D(512,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(pool3)
        conv4 = Conv2D(512,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv4)
        drop4 = Dropout(0.5)(conv4)
        pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)

        conv5 = Conv2D(1024,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(pool4)
        conv5 = Conv2D(1024,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv5)
        drop5 = Dropout(0.5)(conv5)

        up6 = Conv2D(512,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal')(
                         UpSampling2D(size=(2, 2))(drop5))
        merge6 = merge([drop4, up6], mode='concat', concat_axis=3)
        conv6 = Conv2D(512,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(merge6)
        conv6 = Conv2D(512,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv6)

        up7 = Conv2D(256,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal')(
                         UpSampling2D(size=(2, 2))(conv6))
        merge7 = merge([conv3, up7], mode='concat', concat_axis=3)
        conv7 = Conv2D(256,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(merge7)
        conv7 = Conv2D(256,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv7)

        up8 = Conv2D(128,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal')(
                         UpSampling2D(size=(2, 2))(conv7))
        merge8 = merge([conv2, up8], mode='concat', concat_axis=3)
        conv8 = Conv2D(128,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(merge8)
        conv8 = Conv2D(128,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv8)

        up9 = Conv2D(64,
                     2,
                     activation='relu',
                     padding='same',
                     kernel_initializer='he_normal')(
                         UpSampling2D(size=(2, 2))(conv8))
        merge9 = merge([conv1, up9], mode='concat', concat_axis=3)
        conv9 = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(merge9)
        conv9 = Conv2D(64,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv9)
        conv9 = Conv2D(2,
                       3,
                       activation='relu',
                       padding='same',
                       kernel_initializer='he_normal')(conv9)
        conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)

        model = Model(input=inputs, output=conv10)

        return model
def gan_2(input_img,
          input_channel=3,
          z_num=64,
          no_of_pairs=5,
          hidden_num=128,
          activation_fn=tf.nn.elu,
          noise_dim=64):
    # Encoder
    encoder_layer_list = []
    x = Conv2D(hidden_num,
               kernel_size=3,
               strides=1,
               activation=activation_fn,
               padding='same')(input_img)
    prev_channel_num = hidden_num

    for idx in range(no_of_pairs):
        # to increase number of filter by (filters)*(index+1) ex: 16, 32, 48 ...
        channel_num = hidden_num * (idx + 1)

        res = x
        x = Conv2D(channel_num,
                   kernel_size=3,
                   strides=1,
                   activation=activation_fn,
                   padding='same')(x)
        x = Conv2D(channel_num,
                   kernel_size=3,
                   strides=1,
                   activation=activation_fn,
                   padding='same')(x)

        if idx > 0:
            encoder_layer_list.append(x)
        if idx < no_of_pairs - 1:
            x = Conv2D(channel_num,
                       kernel_size=3,
                       strides=2,
                       activation=activation_fn,
                       padding='same')(x)

    if noise_dim > 0:
        x = Lambda(get_noise)(x)

    for idx in range(no_of_pairs):
        if idx < no_of_pairs - 1:
            x = Concatenate(axis=-1)(
                [x, encoder_layer_list[no_of_pairs - 2 - idx]])

        channel_num = x.get_shape().as_list()[-1]
        x = Conv2D(hidden_num,
                   kernel_size=3,
                   strides=1,
                   activation=activation_fn,
                   padding='same')(x)
        x = Conv2D(hidden_num,
                   kernel_size=3,
                   strides=1,
                   activation=activation_fn,
                   padding='same')(x)

        if idx < no_of_pairs - 1:
            x = UpSampling2D(2)(x)

    out = Conv2D(input_channel,
                 kernel_size=3,
                 strides=1,
                 activation=None,
                 padding='same')(x)

    return out
Пример #16
0
def _main(args):
    config_path = os.path.expanduser(args.config_path)
    weights_path = os.path.expanduser(args.weights_path)
    assert config_path.endswith('.cfg'), '{} is not a .cfg file'.format(
        config_path)
    assert weights_path.endswith(
        '.weights'), '{} is not a .weights file'.format(weights_path)

    output_path = os.path.expanduser(args.output_path)
    assert output_path.endswith(
        '.h5'), 'output path {} is not a .h5 file'.format(output_path)
    output_root = os.path.splitext(output_path)[0]

    # Load weights and config.
    print('Loading weights.')
    weights_file = open(weights_path, 'rb')
    major, minor, revision = np.ndarray(shape=(3, ),
                                        dtype='int32',
                                        buffer=weights_file.read(12))
    if (major * 10 + minor) >= 2 and major < 1000 and minor < 1000:
        seen = np.ndarray(shape=(1, ),
                          dtype='int64',
                          buffer=weights_file.read(8))
    else:
        seen = np.ndarray(shape=(1, ),
                          dtype='int32',
                          buffer=weights_file.read(4))
    print('Weights Header: ', major, minor, revision, seen)

    print('Parsing Darknet config.')
    unique_config_file = unique_config_sections(config_path)
    cfg_parser = configparser.ConfigParser()
    cfg_parser.read_file(unique_config_file)

    print('Creating Keras model.')
    input_layer = Input(shape=(None, None, 3))
    prev_layer = input_layer
    all_layers = []

    weight_decay = float(cfg_parser['net_0']['decay']
                         ) if 'net_0' in cfg_parser.sections() else 5e-4
    count = 0
    out_index = []
    for section in cfg_parser.sections():
        print('Parsing section {}'.format(section))
        if section.startswith('convolutional'):
            filters = int(cfg_parser[section]['filters'])
            size = int(cfg_parser[section]['size'])
            stride = int(cfg_parser[section]['stride'])
            pad = int(cfg_parser[section]['pad'])
            activation = cfg_parser[section]['activation']
            batch_normalize = 'batch_normalize' in cfg_parser[section]

            padding = 'same' if pad == 1 and stride == 1 else 'valid'

            # Setting weights.
            # Darknet serializes convolutional weights as:
            # [bias/beta, [gamma, mean, variance], conv_weights]
            prev_layer_shape = K.int_shape(prev_layer)

            weights_shape = (size, size, prev_layer_shape[-1], filters)
            darknet_w_shape = (filters, weights_shape[2], size, size)
            weights_size = np.product(weights_shape)

            print('conv2d', 'bn' if batch_normalize else '  ', activation,
                  weights_shape)

            conv_bias = np.ndarray(shape=(filters, ),
                                   dtype='float32',
                                   buffer=weights_file.read(filters * 4))
            count += filters

            if batch_normalize:
                bn_weights = np.ndarray(shape=(3, filters),
                                        dtype='float32',
                                        buffer=weights_file.read(filters * 12))
                count += 3 * filters

                bn_weight_list = [
                    bn_weights[0],  # scale gamma
                    conv_bias,  # shift beta
                    bn_weights[1],  # running mean
                    bn_weights[2]  # running var
                ]

            conv_weights = np.ndarray(shape=darknet_w_shape,
                                      dtype='float32',
                                      buffer=weights_file.read(weights_size *
                                                               4))
            count += weights_size

            # DarkNet conv_weights are serialized Caffe-style:
            # (out_dim, in_dim, height, width)
            # We would like to set these to Tensorflow order:
            # (height, width, in_dim, out_dim)
            conv_weights = np.transpose(conv_weights, [2, 3, 1, 0])
            conv_weights = [conv_weights] if batch_normalize else [
                conv_weights, conv_bias
            ]

            # Handle activation.
            act_fn = None
            if activation == 'leaky':
                pass  # Add advanced activation later.
            elif activation != 'linear':
                raise ValueError(
                    'Unknown activation function `{}` in section {}'.format(
                        activation, section))

            # Create Conv2D layer
            if stride > 1:
                # Darknet uses left and top padding instead of 'same' mode
                prev_layer = ZeroPadding2D(((1, 0), (1, 0)))(prev_layer)
            conv_layer = (Conv2D(filters, (size, size),
                                 strides=(stride, stride),
                                 kernel_regularizer=l2(weight_decay),
                                 use_bias=not batch_normalize,
                                 weights=conv_weights,
                                 activation=act_fn,
                                 padding=padding))(prev_layer)

            if batch_normalize:
                conv_layer = (BatchNormalization(
                    weights=bn_weight_list))(conv_layer)
            prev_layer = conv_layer

            if activation == 'linear':
                all_layers.append(prev_layer)
            elif activation == 'leaky':
                act_layer = LeakyReLU(alpha=0.1)(prev_layer)
                prev_layer = act_layer
                all_layers.append(act_layer)

        elif section.startswith('route'):
            ids = [int(i) for i in cfg_parser[section]['layers'].split(',')]
            layers = [all_layers[i] for i in ids]
            if len(layers) > 1:
                print('Concatenating route layers:', layers)
                concatenate_layer = Concatenate()(layers)
                all_layers.append(concatenate_layer)
                prev_layer = concatenate_layer
            else:
                skip_layer = layers[0]  # only one layer to route
                all_layers.append(skip_layer)
                prev_layer = skip_layer

        elif section.startswith('maxpool'):
            size = int(cfg_parser[section]['size'])
            stride = int(cfg_parser[section]['stride'])
            all_layers.append(
                MaxPooling2D(pool_size=(size, size),
                             strides=(stride, stride),
                             padding='same')(prev_layer))
            prev_layer = all_layers[-1]

        elif section.startswith('shortcut'):
            index = int(cfg_parser[section]['from'])
            activation = cfg_parser[section]['activation']
            assert activation == 'linear', 'Only linear activation supported.'
            all_layers.append(Add()([all_layers[index], prev_layer]))
            prev_layer = all_layers[-1]

        elif section.startswith('upsample'):
            stride = int(cfg_parser[section]['stride'])
            assert stride == 2, 'Only stride=2 supported.'
            all_layers.append(UpSampling2D(stride)(prev_layer))
            prev_layer = all_layers[-1]

        elif section.startswith('yolo'):
            out_index.append(len(all_layers) - 1)
            all_layers.append(None)
            prev_layer = all_layers[-1]

        elif section.startswith('net'):
            pass

        else:
            raise ValueError(
                'Unsupported section header type: {}'.format(section))

    # Create and save model.
    if len(out_index) == 0: out_index.append(len(all_layers) - 1)
    model = Model(inputs=input_layer,
                  outputs=[all_layers[i] for i in out_index])
    print(model.summary())
    if args.weights_only:
        model.save_weights('{}'.format(output_path))
        print('Saved Keras weights to {}'.format(output_path))
    else:
        model.save('{}'.format(output_path))
        print('Saved Keras model to {}'.format(output_path))

    # Check to see if all weights have been read.
    remaining_weights = len(weights_file.read()) / 4
    weights_file.close()
    print('Read {} of {} from Darknet weights.'.format(
        count, count + remaining_weights))
    if remaining_weights > 0:
        print('Warning: {} unused weights'.format(remaining_weights))

    if args.plot_model:
        plot(model, to_file='{}.png'.format(output_root), show_shapes=True)
        print('Saved model plot to {}.png'.format(output_root))
Пример #17
0
def build_ae(vgg16_encoder, x_train_shape, class_or_regr):
    """Build your autoencoder in here."""

    encoder_input = Input(shape=x_train_shape, name='encoder_input')

    # ---------- block1

    x = vgg16_encoder.get_layer('block1_conv1')(encoder_input)
    x = BatchNormalization()(x)
    x = vgg16_encoder.get_layer('block1_conv2')(x)
    x = BatchNormalization()(x)
    x = vgg16_encoder.get_layer('block1_pool')(x)

    # ---------- block2

    x = vgg16_encoder.get_layer('block2_conv1')(x)
    x = BatchNormalization()(x)
    x = vgg16_encoder.get_layer('block2_conv2')(x)
    x = BatchNormalization()(x)
    x = vgg16_encoder.get_layer('block2_pool')(x)

    # ---------- block3

    x = vgg16_encoder.get_layer('block3_conv1')(x)
    x = BatchNormalization()(x)
    x = vgg16_encoder.get_layer('block3_conv2')(x)
    x = BatchNormalization()(x)
    x = vgg16_encoder.get_layer('block3_conv3')(x)
    x = BatchNormalization()(x)
    x = vgg16_encoder.get_layer('block3_pool')(x)

    # ---------- block4

    x = vgg16_encoder.get_layer('block4_conv1')(x)
    x = BatchNormalization()(x)
    x = vgg16_encoder.get_layer('block4_conv2')(x)
    x = BatchNormalization()(x)
    x = vgg16_encoder.get_layer('block4_conv3')(x)
    x = BatchNormalization()(x)
    x = vgg16_encoder.get_layer('block4_pool')(x)

    # ---------- block5

    x = vgg16_encoder.get_layer('block5_conv1')(x)
    x = BatchNormalization()(x)
    x = vgg16_encoder.get_layer('block5_conv2')(x)
    x = BatchNormalization()(x)
    encoded = vgg16_encoder.get_layer('block5_conv3')(x)

    # ---------- block5
    x = Conv2DTranspose(512, (3, 3), activation='relu',
                        padding='same')(encoded)
    x = BatchNormalization()(x)
    x = Conv2DTranspose(512, (3, 3), activation='relu', padding='same')(x)
    x = BatchNormalization()(x)
    x = Conv2DTranspose(512, (3, 3), activation='relu', padding='same')(x)
    x = BatchNormalization()(x)

    # ---------- block4

    x = UpSampling2D((2, 2))(x)
    x = Conv2DTranspose(512, (3, 3), activation='relu', padding='same')(x)
    x = BatchNormalization()(x)
    x = Conv2DTranspose(512, (3, 3), activation='relu', padding='same')(x)
    x = BatchNormalization()(x)
    x = Conv2DTranspose(512, (3, 3), activation='relu', padding='same')(x)
    x = BatchNormalization()(x)

    # ---------- block3

    x = UpSampling2D((2, 2))(x)
    x = Conv2DTranspose(256, (3, 3), activation='relu', padding='same')(x)
    x = BatchNormalization()(x)
    x = Conv2DTranspose(256, (3, 3), activation='relu', padding='same')(x)
    x = BatchNormalization()(x)
    x = Conv2DTranspose(256, (3, 3), activation='relu', padding='same')(x)
    x = BatchNormalization()(x)

    # ---------- block2

    x = UpSampling2D((2, 2))(x)
    x = Conv2DTranspose(128, (3, 3), activation='relu', padding='same')(x)
    x = BatchNormalization()(x)
    x = Conv2DTranspose(128, (3, 3), activation='relu', padding='same')(x)
    x = BatchNormalization()(x)

    # ---------- block1

    x = UpSampling2D((2, 2))(x)
    x = Conv2DTranspose(64, (3, 3), activation='relu', padding='same')(x)
    x = BatchNormalization()(x)
    x = Conv2DTranspose(64, (3, 3), activation='relu', padding='same')(x)
    x = BatchNormalization()(x)

    if class_or_regr == 0:
        final_filters = 3
    elif class_or_regr == 1:
        final_filters = 29

    decoder_output = Conv2DTranspose(final_filters, (3, 3),
                                     activation='softmax',
                                     padding='same')(x)
    autoencoder = Model(encoder_input, decoder_output)

    return autoencoder
Пример #18
0
                        padding='same')(encoder_output)
encoder_output = Conv2D(256, (3, 3), activation='relu',
                        padding='same')(encoder_output)

#Fusion
embed_input = Input(shape=(1000, ))
fusion_output = RepeatVector(32 * 32)(embed_input)
fusion_output = Reshape(([32, 32, 1000]))(fusion_output)
fusion_output = concatenate([encoder_output, fusion_output], axis=3)
fusion_output = Conv2D(256, (1, 1), activation='relu',
                       padding='same')(fusion_output)

#Decoder
decoder_output = Conv2D(128, (3, 3), activation='relu',
                        padding='same')(fusion_output)
decoder_output = UpSampling2D((2, 2))(decoder_output)
decoder_output = Conv2D(64, (3, 3), activation='relu',
                        padding='same')(decoder_output)
decoder_output = UpSampling2D((2, 2))(decoder_output)
decoder_output = Conv2D(32, (3, 3), activation='relu',
                        padding='same')(decoder_output)
decoder_output = Conv2D(16, (3, 3), activation='relu',
                        padding='same')(decoder_output)
decoder_output = Conv2D(2, (3, 3), activation='tanh',
                        padding='same')(decoder_output)
decoder_output = UpSampling2D((2, 2))(decoder_output)

model = Model(inputs=[encoder_input, embed_input], outputs=decoder_output)


def create_inception_embedding(grayscaled_rgb):
Пример #19
0
    Conv2D(10, (8, 8),
           activation='relu',
           padding='same',
           input_shape=input_shape))
auto_encoder.add(
    Conv2D(10, (8, 8),
           activation='relu',
           padding='same',
           input_shape=input_shape))
auto_encoder.add(MaxPooling2D((2, 2), padding='same'))
auto_encoder.add(
    Conv2D(10, (8, 8),
           activation='relu',
           padding='same',
           input_shape=input_shape))
auto_encoder.add(UpSampling2D((2, 2)))
auto_encoder.add(
    Conv2D(10, (8, 8),
           activation='relu',
           padding='same',
           input_shape=input_shape))
auto_encoder.add(Conv2D(10, (8, 8), activation='relu', padding='same'))
auto_encoder.add(Conv2D(1, (3, 3), activation='sigmoid', padding='same'))

#**********************************model compile*********************************************

auto_encoder.compile(optimizer='sgd', loss='mean_squared_error')
auto_encoder.summary()

#****************************************training*******************************************
for q in range(5):
Пример #20
0
def get_resnet(f=16, bn_axis=3, classes=1):
    input = Input((img_rows, img_cols, 1))
    x = ZeroPadding2D((4, 4))(input)
    x = Conv2D(f, (7, 7), strides=(2, 2), name='conv1')(x)
    x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [f, f, f * 2], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [f, f, f * 2], stage=2, block='b')
    x2 = identity_block(x, 3, [f, f, f * 2], stage=2, block='c')

    x = conv_block(x2, 3, [f * 2, f * 2, f * 4], stage=3, block='a')
    x = identity_block(x, 3, [f * 2, f * 2, f * 4], stage=3, block='b')
    x3 = identity_block(x, 3, [f * 2, f * 2, f * 4], stage=3, block='d')

    x = conv_block(x3, 3, [f * 4, f * 4, f * 8], stage=4, block='a')
    x = identity_block(x, 3, [f * 4, f * 4, f * 8], stage=4, block='b')
    x4 = identity_block(x, 3, [f * 4, f * 4, f * 8], stage=4, block='f')

    x = conv_block(x4, 3, [f * 8, f * 8, f * 16], stage=5, block='a')
    x = identity_block(x, 3, [f * 8, f * 8, f * 16], stage=5, block='b')
    x = identity_block(x, 3, [f * 8, f * 8, f * 16], stage=5, block='c')

    x = up_conv_block(x, 3, [f * 16, f * 8, f * 8], stage=6, block='a')
    x = identity_block(x, 3, [f * 16, f * 8, f * 8], stage=6, block='b')
    x = identity_block(x, 3, [f * 16, f * 8, f * 8], stage=6, block='c')

    x = concatenate([x, x4], axis=bn_axis)

    x = up_conv_block(x, 3, [f * 16, f * 4, f * 4], stage=7, block='a')
    x = identity_block(x, 3, [f * 16, f * 4, f * 4], stage=7, block='b')

    x = identity_block(x, 3, [f * 16, f * 4, f * 4], stage=7, block='f')

    x = concatenate([x, x3], axis=bn_axis)

    x = up_conv_block(x, 3, [f * 8, f * 2, f * 2], stage=8, block='a')
    x = identity_block(x, 3, [f * 8, f * 2, f * 2], stage=8, block='b')
    x = identity_block(x, 3, [f * 8, f * 2, f * 2], stage=8, block='d')

    x = concatenate([x, x2], axis=bn_axis)

    x = up_conv_block(x, 3, [f * 4, f, f], stage=10, block='a', strides=(1, 1))
    x = identity_block(x, 3, [f * 4, f, f], stage=10, block='b')
    x = identity_block(x, 3, [f * 4, f, f], stage=10, block='c')

    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(classes, (3, 3),
               padding='same',
               activation='sigmoid',
               name='convLast')(x)

    model = Model(input, x, name='resnetUnet')
    model.compile(
        optimizer=Adam(lr=3e-4),
        loss=dice_coef_loss,
        metrics=[dice_coef, defect_accuracy, precision, recall, f1score])

    model.summary()

    return model
Пример #21
0
def model(input_shape, kernel_size, optimizer='adam'):

    model = Sequential()

    model.add(
        Conv2D(kernel_size, (3, 3),
               input_shape=input_shape,
               activation='relu',
               padding='same',
               strides=2))
    model.add(
        Conv2D(kernel_size * 2, (3, 3),
               input_shape=input_shape,
               activation='relu',
               padding='same'))

    model.add(
        Conv2D(kernel_size * 4, (3, 3),
               input_shape=input_shape,
               activation='relu',
               padding='same',
               strides=2))
    model.add(
        Conv2D(kernel_size * 4, (3, 3),
               input_shape=input_shape,
               activation='relu',
               padding='same'))

    model.add(
        Conv2D(kernel_size * 8, (3, 3),
               input_shape=input_shape,
               activation='relu',
               padding='same',
               strides=2))
    model.add(
        Conv2D(kernel_size * 8, (3, 3),
               input_shape=input_shape,
               activation='relu',
               padding='same'))

    model.add(
        Conv2D(kernel_size * 16, (3, 3),
               input_shape=input_shape,
               activation='relu',
               padding='same'))

    model.add(UpSampling2D((2, 2)))
    model.add(
        Conv2D(kernel_size * 16, (3, 3),
               input_shape=input_shape,
               activation='relu',
               padding='same'))

    model.add(UpSampling2D((2, 2)))

    model.add(
        Conv2D(kernel_size * 8, (3, 3),
               input_shape=input_shape,
               activation='relu',
               padding='same'))
    model.add(
        Conv2D(kernel_size * 4, (3, 3),
               input_shape=input_shape,
               activation='relu',
               padding='same'))
    model.add(
        Conv2D(kernel_size * 2, (3, 3),
               input_shape=input_shape,
               activation='relu',
               padding='same'))

    model.add(Conv2D(2, (3, 3), activation='tanh', padding='same'))
    model.add(UpSampling2D((2, 2)))

    model.compile(optimizer=optimizer, loss='mse')

    print(model.summary())

    return model
Пример #22
0
    k_size = 3
    model = models.Sequential()
    model.add(Conv2D(64, kernel_size=(k_size, k_size), strides=(1, 1),
                     activation='relu',
                     input_shape=input_shape, padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(32, (k_size, k_size), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # model.add(Conv2D(8, (k_size, k_size), activation='relu', padding='same'))
    # model.add(MaxPooling2D(pool_size=(2, 2)))
    # ### Encoded
    # model.add(Conv2D(8, (k_size, k_size), activation='relu', padding='same'))
    # model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(16, (k_size, k_size), activation='relu', padding='same'))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(32, (k_size, k_size), activation='relu', padding='same'))
    model.add(UpSampling2D((2, 2)))
    model.add(Conv2D(1, (k_size, k_size), activation='sigmoid', padding='same'))
    model.summary()

    model.compile(optimizer='adam', loss='binary_crossentropy')
    # model.compile(optimizer='adam', loss='mean_squared_error')

    # N_total = N_copies * N_samples
    # N_train = N_total * 80 // 100
    # train_noisy, train_clean = PSF_img_photon[:N_train], PSF_img_copy[:N_train]
    # test_noisy, test_clean = PSF_img_photon[N_train:], PSF_img_copy[N_train:]

    N_copies = 15
    N_loops = 15
Пример #23
0
    def build_model(self):
        inputs = Input((self.patch_height, self.patch_width, 1))
        conv1 = Conv2D(32, (3, 3), padding='same')(inputs)  # 'valid'
        conv1 = LeakyReLU(alpha=0.3)(conv1)
        conv1 = Dropout(0.2)(conv1)
        conv1 = normalization.BatchNormalization(
            epsilon=2e-05,
            axis=3,
            momentum=0.9,
            weights=None,
            beta_initializer='RandomNormal',
            gamma_initializer='one')(conv1)
        conv1 = Conv2D(32, (3, 3), dilation_rate=2, padding='same')(conv1)
        conv1 = LeakyReLU(alpha=0.3)(conv1)
        conv1 = Conv2D(32, (3, 3), dilation_rate=4, padding='same')(conv1)
        conv1 = LeakyReLU(alpha=0.3)(conv1)
        pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

        # pool1 = normalization.BatchNormalization(epsilon=1e-06, mode=1, axis=-1, momentum=0.9, weights=None, beta_init='zero', gamma_init='one')(pool1)
        conv2 = Conv2D(64, (3, 3), padding='same')(
            pool1)  # ,activation='relu', padding='same')(pool1)
        conv2 = normalization.BatchNormalization(
            epsilon=2e-05,
            axis=3,
            momentum=0.9,
            weights=None,
            beta_initializer='RandomNormal',
            gamma_initializer='one')(conv2)
        conv2 = LeakyReLU(alpha=0.3)(conv2)
        conv2 = Dropout(0.2)(conv2)
        conv2 = Conv2D(64, (3, 3), dilation_rate=2, padding='same')(conv2)
        conv2 = LeakyReLU(alpha=0.3)(conv2)
        conv2 = Conv2D(64, (3, 3), dilation_rate=4, padding='same')(
            conv2)  # ,W_regularizer=l2(0.01), b_regularizer=l2(0.01))(conv2)
        conv2 = LeakyReLU(alpha=0.3)(conv2)
        pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

        # crop = Cropping2D(cropping=((int(3 * patch_height / 8), int(3 * patch_height / 8)), (int(3 * patch_width / 8), int(3 * patch_width / 8))))(conv1)
        # conv3 = concatenate([crop,pool2], axis=1)
        conv3 = Conv2D(128, (3, 3), padding='same')(
            pool2)  # , activation='relu', padding='same')(conv3)
        conv3 = normalization.BatchNormalization(
            epsilon=2e-05,
            axis=3,
            momentum=0.9,
            weights=None,
            beta_initializer='RandomNormal',
            gamma_initializer='one')(conv3)
        conv3 = LeakyReLU(alpha=0.3)(conv3)
        conv3 = Dropout(0.2)(conv3)
        conv3 = Conv2D(128, (3, 3), dilation_rate=2, padding='same')(
            conv3)  # ,W_regularizer=l2(0.01), b_regularizer=l2(0.01))(conv3)
        conv3 = normalization.BatchNormalization(
            epsilon=2e-05,
            axis=3,
            momentum=0.9,
            weights=None,
            beta_initializer='RandomNormal',
            gamma_initializer='one')(conv3)
        conv3 = LeakyReLU(alpha=0.3)(conv3)

        conv3 = Conv2D(128, (3, 3), dilation_rate=4, padding='same')(conv3)
        conv3 = normalization.BatchNormalization(
            epsilon=2e-05,
            axis=3,
            momentum=0.9,
            weights=None,
            beta_initializer='RandomNormal',
            gamma_initializer='one')(conv3)
        conv3 = LeakyReLU(alpha=0.3)(conv3)

        # up1 = UpSampling2D(size=(2, 2))(conv3)
        up1 = concatenate([UpSampling2D(size=(2, 2))(conv3), conv2], axis=3)
        conv4 = Conv2D(64, (3, 3), padding='same')(up1)
        conv4 = LeakyReLU(alpha=0.3)(conv4)
        conv4 = Dropout(0.2)(conv4)
        conv4 = Conv2D(64, (3, 3), padding='same')(conv4)
        conv4 = LeakyReLU(alpha=0.3)(conv4)
        # conv4 = normalization.BatchNormalization(epsilon=1e-06, mode=1, axis=-1, momentum=0.9, weights=None, beta_init='zero', gamma_init='one')(conv4)
        #
        # up2 = UpSampling2D(size=(2, 2))(conv4)
        up2 = concatenate([UpSampling2D(size=(2, 2))(conv4), conv1], axis=3)
        conv5 = Conv2D(32, (3, 3), padding='same')(up2)
        conv5 = LeakyReLU(alpha=0.3)(conv5)
        conv5 = Dropout(0.2)(conv5)
        conv5 = Conv2D(32, (3, 3), padding='same')(conv5)
        conv5 = LeakyReLU(alpha=0.3)(conv5)

        conv6 = Conv2D(self.num_seg_class + 1, (1, 1), padding='same')(conv5)
        conv6 = LeakyReLU(alpha=0.3)(conv6)
        # conv6 = normalization.BatchNormalization(epsilon=1e-06, mode=1, axis=-1, momentum=0.9, weights=None, beta_init='zero', gamma_init='one')(conv6)

        # for tensorflow
        # conv6 = core.Reshape((patch_height*patch_width,num_lesion_class+1))(conv6)
        # for theano
        conv6 = core.Reshape((self.patch_height * self.patch_width,
                              self.num_seg_class + 1))(conv6)
        # conv6 = core.Permute((2, 1))(conv6)
        ############
        act = Activation('softmax')(conv6)

        model = Model(inputs=inputs, outputs=act)
        model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=['categorical_accuracy'])
        plot_model(model,
                   to_file=os.path.join(self.config.checkpoint, "model.png"),
                   show_shapes=True)
        self.model = model
Пример #24
0
def make_yolov3_model():
    input_image = Input(shape=(None, None, 3))

    # Layer  0 => 4
    x = _conv_block(input_image, [{
        'filter': 32,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 0
    }, {
        'filter': 64,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 1
    }, {
        'filter': 32,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 2
    }, {
        'filter': 64,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 3
    }])

    # Layer  5 => 8
    x = _conv_block(x, [{
        'filter': 128,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 5
    }, {
        'filter': 64,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 6
    }, {
        'filter': 128,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 7
    }])

    # Layer  9 => 11
    x = _conv_block(x, [{
        'filter': 64,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 9
    }, {
        'filter': 128,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 10
    }])

    # Layer 12 => 15
    x = _conv_block(x, [{
        'filter': 256,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 12
    }, {
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 13
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 14
    }])

    # Layer 16 => 36
    for i in range(7):
        x = _conv_block(x, [{
            'filter': 128,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 16 + i * 3
        }, {
            'filter': 256,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 17 + i * 3
        }])

    skip_36 = x

    # Layer 37 => 40
    x = _conv_block(x, [{
        'filter': 512,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 37
    }, {
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 38
    }, {
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 39
    }])

    # Layer 41 => 61
    for i in range(7):
        x = _conv_block(x, [{
            'filter': 256,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 41 + i * 3
        }, {
            'filter': 512,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 42 + i * 3
        }])

    skip_61 = x

    # Layer 62 => 65
    x = _conv_block(x, [{
        'filter': 1024,
        'kernel': 3,
        'stride': 2,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 62
    }, {
        'filter': 512,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 63
    }, {
        'filter': 1024,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 64
    }])

    # Layer 66 => 74
    for i in range(3):
        x = _conv_block(x, [{
            'filter': 512,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 66 + i * 3
        }, {
            'filter': 1024,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 67 + i * 3
        }])

    # Layer 75 => 79
    x = _conv_block(x, [{
        'filter': 512,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 75
    }, {
        'filter': 1024,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 76
    }, {
        'filter': 512,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 77
    }, {
        'filter': 1024,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 78
    }, {
        'filter': 512,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 79
    }],
                    skip=False)

    # Layer 80 => 82
    yolo_82 = _conv_block(x, [{
        'filter': 1024,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 80
    }, {
        'filter': 255,
        'kernel': 1,
        'stride': 1,
        'bnorm': False,
        'leaky': False,
        'layer_idx': 81
    }],
                          skip=False)

    # Layer 83 => 86
    x = _conv_block(x, [{
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 84
    }],
                    skip=False)
    x = UpSampling2D(2)(x)
    x = concatenate([x, skip_61])

    # Layer 87 => 91
    x = _conv_block(x, [{
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 87
    }, {
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 88
    }, {
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 89
    }, {
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 90
    }, {
        'filter': 256,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 91
    }],
                    skip=False)

    # Layer 92 => 94
    yolo_94 = _conv_block(x, [{
        'filter': 512,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 92
    }, {
        'filter': 255,
        'kernel': 1,
        'stride': 1,
        'bnorm': False,
        'leaky': False,
        'layer_idx': 93
    }],
                          skip=False)

    # Layer 95 => 98
    x = _conv_block(x, [{
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 96
    }],
                    skip=False)
    x = UpSampling2D(2)(x)
    x = concatenate([x, skip_36])

    # Layer 99 => 106
    yolo_106 = _conv_block(x, [{
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 99
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 100
    }, {
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 101
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 102
    }, {
        'filter': 128,
        'kernel': 1,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 103
    }, {
        'filter': 256,
        'kernel': 3,
        'stride': 1,
        'bnorm': True,
        'leaky': True,
        'layer_idx': 104
    }, {
        'filter': 255,
        'kernel': 1,
        'stride': 1,
        'bnorm': False,
        'leaky': False,
        'layer_idx': 105
    }],
                           skip=False)

    model = Model(input_image, [yolo_82, yolo_94, yolo_106])
    return model
Пример #25
0
def UnetResidual_model(input_shape=(image_rows, image_cols, image_channels),
                       num_classes=1,
                       k=64,
                       kc=512):

    # Conv filter parameters
    k1 = k
    k2 = 2 * k1
    k3 = 2 * k2
    k4 = 2 * k3
    kc = 2 * k4
    #print('k1 ={}'.format(k1))
    #print('k2 ={}'.format(k2))
    #print('k3 ={}'.format(k3))
    #print('k4 ={}'.format(k4))
    #print('kc ={}'.format(kc))

    #### Build U-Net model
    #### Input
    inputs = Input((image_rows, image_cols, image_channels))

    #### DownSampling Block (4)
    down1 = Conv2D(k1, (3, 3), padding='same')(inputs)
    down1 = BatchNormalization()(down1)
    down1 = Activation('relu')(down1)
    down1 = Conv2D(k1, (3, 3), padding='same')(down1)
    down1 = BatchNormalization()(down1)
    down1 = Activation('relu')(down1)
    down1_pool = MaxPooling2D((2, 2), strides=(2, 2))(down1)

    down2 = Conv2D(k2, (3, 3), padding='same')(down1_pool)
    down2 = BatchNormalization()(down2)
    down2 = Activation('relu')(down2)
    down2 = Conv2D(k2, (3, 3), padding='same')(down2)
    down2 = BatchNormalization()(down2)
    shortcut = Conv2D(k2, (1, 1))(down1_pool)
    shortcut = BatchNormalization()(shortcut)
    down2 = add([down2, shortcut])  # skip connection
    down2 = Activation('relu')(down2)
    down2_pool = MaxPooling2D((2, 2), strides=(2, 2))(down2)

    down3 = Conv2D(k3, (3, 3), padding='same')(down2_pool)
    down3 = BatchNormalization()(down3)
    down3 = Activation('relu')(down3)
    down3 = Conv2D(k3, (3, 3), padding='same')(down3)
    down3 = BatchNormalization()(down3)
    shortcut = Conv2D(k3, (1, 1))(down2_pool)
    shortcut = BatchNormalization()(shortcut)
    down3 = add([down3, shortcut])  # skip connection
    down3 = Activation('relu')(down3)
    down3_pool = MaxPooling2D((2, 2), strides=(2, 2))(down3)

    down4 = Conv2D(k4, (3, 3), padding='same')(down3_pool)
    down4 = BatchNormalization()(down4)
    down4 = Activation('relu')(down4)
    down4 = Conv2D(k4, (3, 3), padding='same')(down4)
    down4 = BatchNormalization()(down4)
    shortcut = Conv2D(k4, (1, 1))(down3_pool)
    shortcut = BatchNormalization()(shortcut)
    down4 = add([down4, shortcut])  # skip connection
    down4 = Activation('relu')(down4)
    down4_pool = MaxPooling2D((2, 2), strides=(2, 2))(down4)
    down4_pool = Dropout(0.5)(down4_pool)

    #### Center Block
    center = Conv2D(kc, (3, 3), padding='same')(down4_pool)
    center = BatchNormalization()(center)
    center = Activation('relu')(center)
    center = Conv2D(kc, (3, 3), padding='same')(center)
    center = BatchNormalization()(center)
    shortcut = Conv2D(kc, (1, 1))(down4_pool)
    shortcut = BatchNormalization()(shortcut)
    center = add([center, shortcut])  # skip connection
    center = Activation('relu')(center)
    center = Dropout(0.5)(center)

    #### UpSampling Blocks (4)
    up4 = UpSampling2D((2, 2))(center)
    up4_con = concatenate([down4, up4], axis=3)
    up4 = Conv2D(k4, (3, 3), padding='same')(up4_con)
    up4 = BatchNormalization()(up4)
    up4 = Activation('relu')(up4)
    up4 = Conv2D(k4, (3, 3), padding='same')(up4)
    up4 = BatchNormalization()(up4)
    up4 = Activation('relu')(up4)
    up4 = Conv2D(k4, (3, 3), padding='same')(up4)
    up4 = BatchNormalization()(up4)
    shortcut = Conv2D(k4, (1, 1))(up4_con)
    shortcut = BatchNormalization()(shortcut)
    up4 = add([up4, shortcut])  # skip connection
    up4 = Activation('relu')(up4)

    up3 = UpSampling2D((2, 2))(up4)
    up3_con = concatenate([down3, up3], axis=3)
    up3 = Conv2D(k3, (3, 3), padding='same')(up3_con)
    up3 = BatchNormalization()(up3)
    up3 = Activation('relu')(up3)
    up3 = Conv2D(k3, (3, 3), padding='same')(up3)
    up3 = BatchNormalization()(up3)
    up3 = Activation('relu')(up3)
    up3 = Conv2D(k3, (3, 3), padding='same')(up3)
    up3 = BatchNormalization()(up3)
    shortcut = Conv2D(k3, (1, 1))(up3_con)
    shortcut = BatchNormalization()(shortcut)
    up3 = add([up3, shortcut])  # skip connection
    up3 = Activation('relu')(up3)

    up2 = UpSampling2D((2, 2))(up3)
    up2_con = concatenate([down2, up2], axis=3)
    up2 = Conv2D(k2, (3, 3), padding='same')(up2_con)
    up2 = BatchNormalization()(up2)
    up2 = Activation('relu')(up2)
    up2 = Conv2D(k2, (3, 3), padding='same')(up2)
    up2 = BatchNormalization()(up2)
    up2 = Activation('relu')(up2)
    up2 = Conv2D(k2, (3, 3), padding='same')(up2)
    up2 = BatchNormalization()(up2)
    shortcut = Conv2D(k2, (1, 1))(up2_con)
    shortcut = BatchNormalization()(shortcut)
    up2 = add([up2, shortcut])  # skip connection
    up2 = Activation('relu')(up2)

    up1 = UpSampling2D((2, 2))(up2)
    up1_con = concatenate([down1, up1], axis=3)
    up1 = Conv2D(k1, (3, 3), padding='same')(up1_con)
    up1 = BatchNormalization()(up1)
    up1 = Activation('relu')(up1)
    up1 = Conv2D(k1, (3, 3), padding='same')(up1)
    up1 = BatchNormalization()(up1)
    up1 = Activation('relu')(up1)
    up1 = Conv2D(k1, (3, 3), padding='same')(up1)
    up1 = BatchNormalization()(up1)
    shortcut = Conv2D(k1, (1, 1))(up1_con)
    shortcut = BatchNormalization()(shortcut)
    up1 = add([up1, shortcut])  # skip connection
    up1 = Activation('relu')(up1)

    # Pixel Classification
    classify = Conv2D(num_classes, (1, 1), activation='sigmoid')(up1)

    # Model
    model = Model(inputs=[inputs], outputs=[classify])  # optimizer=sgd

    ### Optimizer, Loss and Metrics
    model.compile(optimizer=Adam(lr=1e-3),
                  loss=binary_crossentropy,
                  metrics=[dice_coeff])

    # Display the model
    model.summary()

    return model
Пример #26
0
    def upscale2x(self,
                  input_tensor,
                  filters,
                  kernel_size=3,
                  padding="same",
                  interpolation="bilinear",
                  res_block_follows=False,
                  sr_ratio=0.5,
                  scale_factor=2,
                  fast=False,
                  **kwargs):
        """ Custom hybrid upscale layer for sub-pixel up-scaling.

        Most of up-scaling is approximating lighting gradients which can be accurately achieved
        using linear fitting. This layer attempts to improve memory consumption by splitting
        with bilinear and convolutional layers so that the sub-pixel update will get details
        whilst the bilinear filter will get lighting.

        Adds reflection padding if it has been selected by the user, and other post-processing
        if requested by the plugin.

        Parameters
        ----------
        input_tensor: tensor
            The input tensor to the layer
        filters: int
            The dimensionality of the output space (i.e. the number of output filters in the
            convolution)
        kernel_size: int, optional
            An integer or tuple/list of 2 integers, specifying the height and width of the 2D
            convolution window. Can be a single integer to specify the same value for all spatial
            dimensions. Default: 3
        padding: ["valid", "same"], optional
            The padding to use. Default: `"same"`
        interpolation: ["nearest", "bilinear"], optional
            Interpolation to use for up-sampling. Default: `"bilinear"`
        res_block_follows: bool, optional
            If a residual block will follow this layer, then this should be set to `True` to add
            a leaky ReLu after the convolutional layer. Default: ``False``
        scale_factor: int, optional
            The amount to upscale the image. Default: `2`
        sr_ratio: float, optional
            The proportion of super resolution (pixel shuffler) filters to use. Non-fast mode only.
            Default: `0.5`
        kwargs: dict
            Any additional Keras standard layer keyword arguments
        fast: bool, optional
            Use a faster up-scaling method that may appear more rugged. Default: ``False``

        Returns
        -------
        tensor
            The output tensor from the Upscale layer
        """
        name = self._get_name("upscale2x_{}".format("fast" if fast else "hyb"))
        var_x = input_tensor
        if not fast:
            sr_filters = int(filters * sr_ratio)
            filters = filters - sr_filters
            var_x_sr = self.upscale(var_x,
                                    filters,
                                    kernel_size=kernel_size,
                                    padding=padding,
                                    scale_factor=scale_factor,
                                    res_block_follows=res_block_follows,
                                    **kwargs)

        if fast or (not fast and filters > 0):
            var_x2 = self.conv2d(var_x,
                                 filters,
                                 kernel_size=3,
                                 padding=padding,
                                 name="{}_conv2d".format(name),
                                 **kwargs)
            var_x2 = UpSampling2D(size=(scale_factor, scale_factor),
                                  interpolation=interpolation,
                                  name="{}_upsampling2D".format(name))(var_x2)
            if fast:
                var_x1 = self.upscale(var_x,
                                      filters,
                                      kernel_size=kernel_size,
                                      padding=padding,
                                      scale_factor=scale_factor,
                                      res_block_follows=res_block_follows,
                                      **kwargs)
                var_x = Add()([var_x2, var_x1])
            else:
                var_x = Concatenate(name="{}_concatenate".format(name))(
                    [var_x_sr, var_x2])
        else:
            var_x = var_x_sr
        return var_x
Пример #27
0
def get_unet_128(input_shape=(128, 128, 3), num_classes=1):
    inputs = Input(shape=input_shape)
    # 128

    down1 = Conv2D(64, (3, 3), padding='same')(inputs)
    down1 = BatchNormalization()(down1)
    down1 = Activation('relu')(down1)
    down1 = Conv2D(64, (3, 3), padding='same')(down1)
    down1 = BatchNormalization()(down1)
    down1 = Activation('relu')(down1)
    down1_pool = MaxPooling2D((2, 2), strides=(2, 2))(down1)
    # 64

    down2 = Conv2D(128, (3, 3), padding='same')(down1_pool)
    down2 = BatchNormalization()(down2)
    down2 = Activation('relu')(down2)
    down2 = Conv2D(128, (3, 3), padding='same')(down2)
    down2 = BatchNormalization()(down2)
    down2 = Activation('relu')(down2)
    down2_pool = MaxPooling2D((2, 2), strides=(2, 2))(down2)
    # 32

    down3 = Conv2D(256, (3, 3), padding='same')(down2_pool)
    down3 = BatchNormalization()(down3)
    down3 = Activation('relu')(down3)
    down3 = Conv2D(256, (3, 3), padding='same')(down3)
    down3 = BatchNormalization()(down3)
    down3 = Activation('relu')(down3)
    down3_pool = MaxPooling2D((2, 2), strides=(2, 2))(down3)
    # 16

    down4 = Conv2D(512, (3, 3), padding='same')(down3_pool)
    down4 = BatchNormalization()(down4)
    down4 = Activation('relu')(down4)
    down4 = Conv2D(512, (3, 3), padding='same')(down4)
    down4 = BatchNormalization()(down4)
    down4 = Activation('relu')(down4)
    down4_pool = MaxPooling2D((2, 2), strides=(2, 2))(down4)
    # 8

    center = Conv2D(1024, (3, 3), padding='same')(down4_pool)
    center = BatchNormalization()(center)
    center = Activation('relu')(center)
    center = Conv2D(1024, (3, 3), padding='same')(center)
    center = BatchNormalization()(center)
    center = Activation('relu')(center)

    up4 = UpSampling2D((2, 2))(center)
    up4 = concatenate([down4, up4], axis=3)
    up4 = Conv2D(512, (3, 3), padding='same')(up4)
    up4 = BatchNormalization()(up4)
    up4 = Activation('relu')(up4)
    up4 = Conv2D(512, (3, 3), padding='same')(up4)
    up4 = BatchNormalization()(up4)
    up4 = Activation('relu')(up4)
    up4 = Conv2D(512, (3, 3), padding='same')(up4)
    up4 = BatchNormalization()(up4)
    up4 = Activation('relu')(up4)
    # 16

    up3 = UpSampling2D((2, 2))(up4)
    up3 = concatenate([down3, up3], axis=3)
    up3 = Conv2D(256, (3, 3), padding='same')(up3)
    up3 = BatchNormalization()(up3)
    up3 = Activation('relu')(up3)
    up3 = Conv2D(256, (3, 3), padding='same')(up3)
    up3 = BatchNormalization()(up3)
    up3 = Activation('relu')(up3)
    up3 = Conv2D(256, (3, 3), padding='same')(up3)
    up3 = BatchNormalization()(up3)
    up3 = Activation('relu')(up3)
    # 32

    up2 = UpSampling2D((2, 2))(up3)
    up2 = concatenate([down2, up2], axis=3)
    up2 = Conv2D(128, (3, 3), padding='same')(up2)
    up2 = BatchNormalization()(up2)
    up2 = Activation('relu')(up2)
    up2 = Conv2D(128, (3, 3), padding='same')(up2)
    up2 = BatchNormalization()(up2)
    up2 = Activation('relu')(up2)
    up2 = Conv2D(128, (3, 3), padding='same')(up2)
    up2 = BatchNormalization()(up2)
    up2 = Activation('relu')(up2)
    # 64

    up1 = UpSampling2D((2, 2))(up2)
    up1 = concatenate([down1, up1], axis=3)
    up1 = Conv2D(64, (3, 3), padding='same')(up1)
    up1 = BatchNormalization()(up1)
    up1 = Activation('relu')(up1)
    up1 = Conv2D(64, (3, 3), padding='same')(up1)
    up1 = BatchNormalization()(up1)
    up1 = Activation('relu')(up1)
    up1 = Conv2D(64, (3, 3), padding='same')(up1)
    up1 = BatchNormalization()(up1)
    up1 = Activation('relu')(up1)
    # 128

    classify = Conv2D(num_classes, (1, 1), activation='sigmoid')(up1)

    model = Model(inputs=inputs, outputs=classify)

    model.compile(optimizer=RMSprop(lr=0.0001),
                  loss=bce_dice_loss,
                  metrics=[dice_coeff])

    return model
def gan_1(
    input_img,
    hidden_num=128,
    no_of_pairs=5,
    min_fea_map_H=8,
    activation_fn=tf.nn.elu,
    noise_dim=0,
    z_num=64,
    input_channel=3
):  #x, hidden_num=3, no_of_pairs=4, min_fea_map_H=8, activation_fn=tf.nn.elu, noise_dim=0):

    # Encoder
    encoder_layer_list = []
    x = Conv2D(hidden_num,
               kernel_size=3,
               strides=1,
               activation=activation_fn,
               padding='same')(input_img)

    for idx in range(no_of_pairs):
        # to increase number of filter by (filters)*(index+1) ex: 16, 32, 48 ...
        channel_num = hidden_num * (idx + 1)

        res = x
        x = Conv2D(channel_num,
                   kernel_size=3,
                   strides=1,
                   activation=activation_fn,
                   padding='same')(x)
        x = Conv2D(channel_num,
                   kernel_size=3,
                   strides=1,
                   activation=activation_fn,
                   padding='same')(x)

        x = add([x, res])

        encoder_layer_list.append(x)
        if idx < no_of_pairs - 1:
            x = Conv2D(hidden_num * (idx + 2),
                       kernel_size=3,
                       strides=2,
                       activation=activation_fn,
                       padding='same')(x)

    # for flattening the layer
    x = Flatten()(x)
    # 20480
    reshape_dim = int(np.prod([min_fea_map_H, min_fea_map_H / 2, channel_num]))
    x = Reshape((1, reshape_dim))(x)

    x = Dense(z_num, activation=None)(x)

    # Decoder
    reshape_dim = int(np.prod([min_fea_map_H, min_fea_map_H / 2, hidden_num]))
    x = Dense(reshape_dim, activation=None)(x)
    x = Reshape((min_fea_map_H, min_fea_map_H // 2, hidden_num))(x)

    for idx in range(no_of_pairs):
        x = Concatenate(axis=-1)(
            [x, encoder_layer_list[no_of_pairs - 1 - idx]])
        res = x

        channel_num = x.get_shape().as_list()[-1]
        x = Conv2D(channel_num,
                   kernel_size=3,
                   strides=1,
                   activation=activation_fn,
                   padding='same')(x)
        x = Conv2D(channel_num,
                   kernel_size=3,
                   strides=1,
                   activation=activation_fn,
                   padding='same')(x)
        x = add([x, res])

        if idx < no_of_pairs - 1:
            x = UpSampling2D(2)(x)
            x = Conv2D(hidden_num * (no_of_pairs - idx - 1),
                       kernel_size=1,
                       strides=1,
                       activation=activation_fn,
                       padding='same')(x)

    out = Conv2D(input_channel,
                 name='output_g1',
                 kernel_size=3,
                 strides=1,
                 activation=None,
                 padding='same')(x)
    return out
from keras import Model
input = Input(shape=(28, 28, 1))
# encoder
conv1 = Conv2D(32, (3, 3), padding='same',
               activation='relu')(input)  # 1*28*28 --> 32*28*28
pool1 = MaxPooling2D(pool_size=(2, 2),
                     padding='same')(conv1)  # 32*28*28 -->32*14*14
conv2 = Conv2D(32, (3, 3), padding='same',
               activation='relu')(pool1)  # 32*14*14 --> 32*14*14
pool2 = MaxPooling2D(pool_size=(2, 2),
                     padding='same')(conv2)  # 32*14*14  --> 32*7*7

# decoder
conv3 = Conv2D(32, (3, 3), padding='same',
               activation='relu')(pool2)  # 32*7*7 --> 32*7*7
up1 = UpSampling2D((2, 2))(conv3)  # 32*7*7-->32*14*14
conv4 = Conv2D(32, (3, 3), padding='same',
               activation='relu')(up1)  # 32*14*14 --> 32*14*14
up2 = UpSampling2D((2, 2))(conv4)  # 32*14*14 --> 32*28*28

output = Conv2D(1, (3, 3), padding='same',
                activation='sigmoid')(up2)  # 32*28*28--> 1*28*28

model = Model(inputs=input, outputs=output)

model.compile(optimizer='adadelta', loss='binary_crossentropy')
history = model.fit(x_train_noise,
                    x_train_noise,
                    batch_size=128,
                    epochs=3,
                    verbose=2,
def SegNet():
    model = Sequential()
    #encoder
    # model.add(Conv2D(64,(3,3),strides=(1,1),input_shape=(3,img_w,img_h),padding='same',activation='relu'))
    model.add(Conv2D(64, (3, 3), strides=(1, 1), input_shape=(img_w, img_h, 3), padding='same', activation='relu')) # for channels_last
    model.add(BatchNormalization())
    model.add(Conv2D(64,(3,3),strides=(1,1),padding='same',activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2,2)))

    #(128,128)
    model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    #(64,64)
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    #(32,32)
    model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    #(16,16)
    model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))

    #(8,8)
    #decoder
    model.add(UpSampling2D(size=(2,2)))
    #(16,16)
    model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(2, 2)))

    #(32,32)
    model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(2, 2)))

    #(64,64)
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(2, 2)))

    #(128,128)
    model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(UpSampling2D(size=(2, 2)))

    #(256,256)
    # model.add(Conv2D(64, (3, 3), strides=(1, 1), input_shape=(3,img_w, img_h), padding='same', activation='relu'))
    model.add(Conv2D(64, (3, 3), strides=(1, 1), input_shape=(img_w, img_h, 3), padding='same', activation='relu')) # for channels_last
    model.add(BatchNormalization())
    model.add(Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu'))
    model.add(BatchNormalization())
    model.add(Conv2D(n_label, (1, 1), strides=(1, 1), padding='same'))
    # model.add(Reshape((n_label,img_w*img_h)))
    model.add(Reshape((img_w * img_h, n_label)))

    #axis=1和axis=2互换位置,等同于np.swapaxes(layer,1,2)
    # model.add(Permute((2,1)))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy',optimizer='sgd',metrics=['accuracy'])
    model.summary()
    return model