Exemple #1
0
    def build_discriminator(self):

        model = Sequential()

        model.add(
            Conv2D(32,
                   kernel_size=3,
                   strides=2,
                   input_shape=self.img_shape,
                   padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding2D(padding=((0, 1), (0, 1))))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(1, activation='sigmoid'))

        model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity)
Exemple #2
0
def generator_unet_upsampling(img_shape,
                              disc_img_shape,
                              model_name="generator_unet_upsampling"):
    filters_num = 64
    axis_num = -1
    channels_num = img_shape[-1]
    min_s = min(img_shape[:-1])

    unet_input = Input(shape=img_shape, name="unet_input")

    conv_num = int(np.floor(np.log(min_s) / np.log(2)))
    list_filters_num = [filters_num * min(8, (2**i)) for i in range(conv_num)]

    # Encoder
    first_conv = Conv2D(list_filters_num[0], (3, 3),
                        strides=(2, 2),
                        name='unet_conv2D_1',
                        padding='same')(unet_input)
    list_encoder = [first_conv]
    for i, f in enumerate(list_filters_num[1:]):
        name = 'unet_conv2D_' + str(i + 2)
        conv = conv_block_unet(list_encoder[-1], f, name, axis_num)
        list_encoder.append(conv)

    # prepare decoder filters
    list_filters_num = list_filters_num[:
                                        -2][::
                                            -1]  # list_filters_num -> [512, 512, 512, 256, 128, 64]
    if len(list_filters_num) < conv_num - 1:
        list_filters_num.append(filters_num)

    # Decoder
    first_up_conv = up_conv_block_unet(list_encoder[-1],
                                       list_encoder[-2],
                                       list_filters_num[0],
                                       "unet_upconv2D_1",
                                       axis_num,
                                       dropout=True)
    list_decoder = [first_up_conv]
    for i, f in enumerate(list_filters_num[1:]):
        name = "unet_upconv2D_" + str(i + 2)
        if i < 2:
            d = True
        else:
            d = False
        up_conv = up_conv_block_unet(list_decoder[-1],
                                     list_encoder[-(i + 3)],
                                     f,
                                     name,
                                     axis_num,
                                     dropout=d)
        list_decoder.append(up_conv)

    x = Activation('relu')(list_decoder[-1])
    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(disc_img_shape[-1], (3, 3), name="last_conv", padding='same')(x)
    x = Activation('tanh')(x)

    generator_unet = Model(inputs=[unet_input], outputs=[x])
    return generator_unet
Exemple #3
0
    def build(self):

        model = Sequential()
        img = Input(shape=self.img_shape)

        model.add(
            Conv2D(32,
                   kernel_size=3,
                   activation="relu",
                   strides=2,
                   padding="same"))

        model.add(
            Conv2D(32,
                   kernel_size=3,
                   activation="relu",
                   strides=2,
                   padding="same"))

        model.add(
            Conv2D(64,
                   kernel_size=3,
                   activation="relu",
                   strides=1,
                   padding="same"))

        model.add(Dropout(0.25))

        model.add(MaxPooling2D((2, 2), strides=None, padding="same"))
        model.add(Flatten())

        # Extract feature representation
        mp = model(img)
        # Determine valence of the image

        v1 = Dense(64, activation="relu", name='valence')(mp)
        valence = Dense(1, activation="sigmoid")(v1)
        valence = Lambda(lambda x: x * 10.)(valence)

        a1 = Dense(64, activation="relu", name='arousal')(mp)
        arousal = Dense(1, activation="sigmoid")(a1)
        arousal = Lambda(lambda x: x * 10.)(arousal)

        d1 = Dense(64, activation="relu", name='dominance')(mp)
        dominance = Dense(1, activation="sigmoid")(d1)
        dominance = Lambda(lambda x: x * 10.)(dominance)

        #model.summary()

        return Model(img, [arousal, valence, dominance])
Exemple #4
0
def up_conv_block_unet(x, x2, f, name, bn_axis, bn=True, dropout=False):
    x = Activation('relu')(x)
    x = UpSampling2D(size=(2, 2))(x)
    x = Conv2D(f, (3, 3), name=name, padding='same')(x)
    if bn: x = BatchNormalization(axis=bn_axis)(x)
    if dropout: x = Dropout(0.5)(x)
    x = Concatenate(axis=bn_axis)([x, x2])
    return x
Exemple #5
0
def layers(input_shape):
    return [
        ZeroPadding2D(padding=3,
                      input_shape=input_shape,
                      data_format='channels_first'),  # <1>
        Conv2D(48, (7, 7), data_format='channels_first'),
        Activation('relu'),
        ZeroPadding2D(padding=2, data_format='channels_first'),  # <2>
        Conv2D(32, (5, 5), data_format='channels_first'),
        Activation('relu'),
        ZeroPadding2D(padding=2, data_format='channels_first'),
        Conv2D(32, (5, 5), data_format='channels_first'),
        Activation('relu'),
        ZeroPadding2D(padding=2, data_format='channels_first'),
        Conv2D(32, (5, 5), data_format='channels_first'),
        Activation('relu'),
        Flatten(),
        Dense(512),
        Activation('relu'),
    ]
def define_lenet(input_shape, num_classes):
    model = Sequential()
    model.add(
        Conv2D(
            filters=20,
            input_shape=input_shape,
            kernel_size=5,
            padding='same',
            activation='relu',
        ))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(
        Conv2D(
            filters=50,
            kernel_size=5,
            padding='same',
            activation='relu',
        ))
    model.add(Flatten())
    model.add(Dense(500, activation='relu'))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))
    model.summary()
    return model
Exemple #7
0
    def build_generator(self):

        model = Sequential()

        model.add(
            Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((7, 7, 128)))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(self.latent_dim, ))
        img = model(noise)

        return Model(noise, img)
Exemple #8
0
def conv_block_unet(x, f, name, bn_axis, bn=True, strides=(2, 2)):
    x = LeakyReLU(0.2)(x)
    x = Conv2D(f, (3, 3), strides=strides, name=name, padding='same')(x)
    if bn: x = BatchNormalization(axis=bn_axis)(x)
    return x
Exemple #9
0
def DCGAN_discriminator(img_shape,
                        disc_img_shape,
                        patch_num,
                        model_name='DCGAN_discriminator'):
    disc_raw_img_shape = (disc_img_shape[0], disc_img_shape[1], img_shape[-1])
    list_input = [
        Input(shape=disc_img_shape, name='disc_input_' + str(i))
        for i in range(patch_num)
    ]
    list_raw_input = [
        Input(shape=disc_raw_img_shape, name='disc_raw_input_' + str(i))
        for i in range(patch_num)
    ]

    axis_num = -1
    filters_num = 64
    conv_num = int(np.floor(np.log(disc_img_shape[1]) / np.log(2)))
    list_filters = [filters_num * min(8, (2**i)) for i in range(conv_num)]

    # First Conv
    generated_patch_input = Input(shape=disc_img_shape,
                                  name='discriminator_input')
    xg = Conv2D(list_filters[0], (3, 3),
                strides=(2, 2),
                name='disc_conv2d_1',
                padding='same')(generated_patch_input)
    xg = BatchNormalization(axis=axis_num)(xg)
    xg = LeakyReLU(0.2)(xg)

    # First Raw Conv
    raw_patch_input = Input(shape=disc_raw_img_shape,
                            name='discriminator_raw_input')
    xr = Conv2D(list_filters[0], (3, 3),
                strides=(2, 2),
                name='raw_disc_conv2d_1',
                padding='same')(raw_patch_input)
    xr = BatchNormalization(axis=axis_num)(xr)
    xr = LeakyReLU(0.2)(xr)

    # Next Conv
    for i, f in enumerate(list_filters[1:]):
        name = 'disc_conv2d_' + str(i + 2)
        x = Concatenate(axis=axis_num)([xg, xr])
        x = Conv2D(f, (3, 3), strides=(2, 2), name=name, padding='same')(x)
        x = BatchNormalization(axis=axis_num)(x)
        x = LeakyReLU(0.2)(x)

    x_flat = Flatten()(x)
    x = Dense(2, activation='softmax', name='disc_dense')(x_flat)

    PatchGAN = Model(inputs=[generated_patch_input, raw_patch_input],
                     outputs=[x],
                     name='PatchGAN')

    x = [
        PatchGAN([list_input[i], list_raw_input[i]]) for i in range(patch_num)
    ]

    if len(x) > 1:
        x = Concatenate(axis=axis_num)(x)
    else:
        x = x[0]

    x_out = Dense(2, activation='softmax', name='disc_output')(x)

    discriminator_model = Model(inputs=(list_input + list_raw_input),
                                outputs=[x_out],
                                name=model_name)

    return discriminator_model
def _main_(args):

    config_path = args.conf

    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())

    if config['backup']['create_backup']:
        config = create_backup(config)

    keras.backend.tensorflow_backend.set_session(get_session())

    #path for the training and validation dataset
    datasetTrainPath = os.path.join(args.folder, "train")
    datasetValPath = os.path.join(args.folder, "val")

    for folder in [datasetTrainPath, datasetValPath]:
        if not os.path.isdir(folder):
            raise Exception("{} doesn't exist!".format(folder))

    classesTrain = next(os.walk(datasetTrainPath))[1]
    classesVal = next(os.walk(datasetValPath))[1]

    if not classesVal == classesTrain:
        raise Exception(
            "The training and validation classes must be the same!")
    else:
        folders = classesTrain

    #training configuration
    epochs = config['train']['nb_epochs']
    batchSize = config['train']['batch_size']
    width = config['model']['input_size_w']
    height = config['model']['input_size_h']
    depth = 3 if config['model']['gray_mode'] == False else 1

    #config keras generators
    if len(
            folders
    ) == 2:  #if just have 2 classes, the model will have a binary output
        classes = 1
    else:
        classes = len(folders)

    #count all samples
    imagesTrainPaths = []
    imagesValPaths = []
    for folder in folders:
        imagesTrainPaths += list(
            list_images(os.path.join(datasetTrainPath, folder)))
        imagesValPaths += list(
            list_images(os.path.join(datasetValPath, folder)))

    generator_config = {
        'IMAGE_H': height,
        'IMAGE_W': width,
        'IMAGE_C': depth,
        'BATCH_SIZE': batchSize
    }

    #callbacks
    model_name = config['train']['saved_weights_name']
    checkPointSaverBest = ModelCheckpoint(model_name,
                                          monitor='val_acc',
                                          verbose=1,
                                          save_best_only=True,
                                          save_weights_only=False,
                                          mode='auto',
                                          period=1)
    ckp_model_name = os.path.splitext(model_name)[1] + "_ckp.h5"
    checkPointSaver = ModelCheckpoint(ckp_model_name,
                                      verbose=1,
                                      save_best_only=False,
                                      save_weights_only=False,
                                      period=10)

    tb = TensorBoard(log_dir=config['train']['tensorboard_log_dir'],
                     histogram_freq=0,
                     batch_size=batchSize,
                     write_graph=True,
                     write_grads=False,
                     write_images=False,
                     embeddings_freq=0,
                     embeddings_layer_names=None,
                     embeddings_metadata=None)

    #create the classification model
    # make the feature extractor layers
    if depth == 1:
        input_size = (height, width, 1)
        input_image = Input(shape=input_size)
    else:
        input_size = (height, width, 3)
        input_image = Input(shape=input_size)

    feature_extractor = import_feature_extractor(config['model']['backend'],
                                                 input_size)

    train_generator = BatchGenerator(imagesTrainPaths,
                                     generator_config,
                                     norm=feature_extractor.normalize,
                                     jitter=True)
    val_generator = BatchGenerator(imagesValPaths,
                                   generator_config,
                                   norm=feature_extractor.normalize,
                                   jitter=False)

    features = feature_extractor.extract(input_image)

    # make the model head
    output = Conv2D(classes, (1, 1), padding="same")(features)
    output = BatchNormalization()(output)
    output = LeakyReLU(alpha=0.1)(output)
    output = GlobalAveragePooling2D()(output)
    output = Activation("sigmoid")(output) if classes == 1 else Activation(
        "softmax")(output)

    if config['train']['pretrained_weights'] != "":
        model = load_model(config['model']['pretrained_weights'])
    else:
        model = Model(input_image, output)
        opt = Adam()
        model.compile(loss="binary_crossentropy"
                      if classes == 1 else "categorical_crossentropy",
                      optimizer=opt,
                      metrics=["accuracy"])
    model.summary()

    model.fit_generator(train_generator,
                        steps_per_epoch=len(imagesTrainPaths) // batchSize,
                        epochs=epochs,
                        validation_data=val_generator,
                        validation_steps=len(imagesValPaths) // batchSize,
                        callbacks=[checkPointSaverBest, checkPointSaver, tb],
                        workers=12,
                        max_queue_size=40)