Beispiel #1
0
def transition_block(input,
                     nb_filter,
                     use_pool=True,
                     dropout_rate=None,
                     pooltype=1,
                     weight_decay=1e-4):
    x = BatchNormalization(axis=-1, epsilon=1.1e-5)(input)
    x = Activation('relu')(x)
    x = Conv2D(nb_filter, (1, 1),
               kernel_initializer='he_normal',
               padding='same',
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(x)

    if (dropout_rate):
        x = Dropout(dropout_rate)(x)
    if use_pool:
        if (pooltype == 2):
            x = AveragePooling2D((2, 2), strides=(2, 2))(x)
        elif (pooltype == 1):
            x = ZeroPadding2D(padding=(0, 1))(x)
            x = AveragePooling2D((2, 2), strides=(2, 1))(x)
        elif (pooltype == 3):
            x = AveragePooling2D((2, 2), strides=(2, 1))(x)
    return x, nb_filter
Beispiel #2
0
def block(out,
          nkernels,
          down=True,
          bn=True,
          dropout=False,
          leaky=True,
          normalization=InstanceNormalization):
    if leaky:
        out = LeakyReLU(0.2)(out)
    else:
        out = Activation('relu')(out)
    if down:
        out = ZeroPadding2D((1, 1))(out)
        out = Conv2D(nkernels,
                     kernel_size=(4, 4),
                     strides=(2, 2),
                     use_bias=False)(out)
    else:
        out = Conv2DTranspose(nkernels,
                              kernel_size=(4, 4),
                              strides=(2, 2),
                              use_bias=False)(out)
        out = Cropping2D((1, 1))(out)
    if bn:
        out = normalization(axis=-1)(out)
    if dropout:
        out = Dropout(0.5)(out)
    return out
def _add_encoding_layer(filter_count, sequence):
    new_sequence = LeakyReLU(0.2)(sequence)
    new_sequence = ZeroPadding2D(CONV_PADDING)(new_sequence)
    new_sequence = Conv2D(filter_count, CONV_FILTER_SIZE,
                          strides=CONV_STRIDE)(new_sequence)
    new_sequence = BatchNormalization()(new_sequence)
    return new_sequence
def Discriminator():

    initializer = tf.random_normal_initializer(0.0, 0.02)

    inputs = Input(
        shape=[config.IMG_WIDTH, config.IMG_HEIGHT, config.INPUT_CHANNELS],
        name="input_image",
    )

    target = Input(
        shape=[config.IMG_WIDTH, config.IMG_HEIGHT, config.INPUT_CHANNELS],
        name="target_image",
    )

    x = Concatenate()([inputs, target])

    down1 = downsample(64, 4, False)(x)
    down2 = downsample(128, 4)(down1)
    down3 = downsample(256, 4)(down2)

    zero_pad1 = ZeroPadding2D()

    conv = Conv2D(512,
                  4,
                  strides=1,
                  kernel_initializer=initializer,
                  use_bias=False)(zero_pad1)

    batchnorm1 = BatchNormalization()(conv)

    leaky_relu = LeakyReLU()(batchnorm1)

    zero_pad2 = ZeroPadding2D()(leaky_relu)

    last = Conv2D(1, 4, strides=1, kernel_initializer=initializer)(zero_pad2)

    return Model(inputs=[inputs, target], outputs=last)
Beispiel #5
0
def make_generator(image_size, number_of_classes):
    input_a = Input(image_size + (1, ))
    cls = Input((1, ), dtype='int32')
    # input is 64 x 64 x nc
    conditional_instance_norm = lambda axis: (
        lambda inp: ConditionalInstanceNormalization(
            number_of_classes=number_of_classes, axis=axis)([inp, cls]))

    e1 = ZeroPadding2D((1, 1))(input_a)
    e1 = Conv2D(64, kernel_size=(4, 4), strides=(2, 2))(e1)
    #input is 32 x 32 x 64
    e2 = block(e1, 128, normalization=conditional_instance_norm)
    #input is 16 x 16 x 128
    e3 = block(e2, 256, normalization=conditional_instance_norm)
    #input is 8 x 8 x 256
    e4 = block(e3, 512, normalization=conditional_instance_norm)
    #input is 4 x 4 x 512
    e5 = block(e4, 512, normalization=conditional_instance_norm)
    #input is 2 x 2 x 512
    e6 = block(e5, 512, bn=False)
    #input is 1 x 1 x 512
    out = block(e6,
                512,
                down=False,
                leaky=False,
                dropout=True,
                normalization=conditional_instance_norm)
    #input is 2 x 2 x 512
    out = Concatenate(axis=-1)([out, e5])
    out = block(out,
                512,
                down=False,
                leaky=False,
                dropout=True,
                normalization=conditional_instance_norm)
    #input is 4 x 4 x 512
    out = Concatenate(axis=-1)([out, e4])
    out = block(out,
                512,
                down=False,
                leaky=False,
                dropout=True,
                normalization=conditional_instance_norm)
    #input is 8 x 8 x 512
    out = Concatenate(axis=-1)([out, e3])
    out = block(out,
                512,
                down=False,
                leaky=False,
                normalization=conditional_instance_norm)
    #input is 16 x 16 x 512
    out = Concatenate(axis=-1)([out, e2])
    out = block(out,
                256,
                down=False,
                leaky=False,
                normalization=conditional_instance_norm)
    #input is 32 x 32 x 256
    out = Concatenate(axis=-1)([out, e1])
    out = block(out, 3, down=False, leaky=False, bn=False)
    #input is  64 x 64 x 128

    out = Activation('tanh')(out)

    return Model(inputs=[input_a, cls], outputs=[out])
Beispiel #6
0
style_image_tensor = K.variable(style_img)
style_image_mask_tensor = K.variable(style_img_mask)
content_image_tensor = K.variable(content_img)

# this will contain our generated image
combination_image = K.placeholder((1, 3, img_width, img_height))

# combine the 4 images into a single Keras tensor
input_tensor = K.concatenate([
    style_image_tensor, style_image_mask_tensor, content_image_tensor,
    combination_image
],
                             axis=0)

# build the VGG16 network with our 3 images as input
first_layer = ZeroPadding2D((1, 1), )
first_layer.set_input(input_tensor, shape=(4, 3, img_width, img_height))

model = Sequential()
model.add(first_layer)
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
model.add(AveragePooling2D((2, 2), strides=(2, 2)))

model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
model.add(AveragePooling2D((2, 2), strides=(2, 2)))
def unet_model(output_channel_count):

    # (256 x 256 x input_channel_count)
    inputs = Input((INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE, input_channel_count))

    # エンコーダーの作成
    # (128 x 128 x N)
    enc1 = ZeroPadding2D(CONV_PADDING)(inputs)
    enc1 = Conv2D(first_layer_filter_count,
                  CONV_FILTER_SIZE,
                  strides=CONV_STRIDE)(enc1)

    # (64 x 64 x 2N)
    filter_count = first_layer_filter_count * 2
    enc2 = _add_encoding_layer(filter_count, enc1)

    # (32 x 32 x 4N)
    filter_count = first_layer_filter_count * 4
    enc3 = _add_encoding_layer(filter_count, enc2)

    # (16 x 16 x 8N)
    filter_count = first_layer_filter_count * 8
    enc4 = _add_encoding_layer(filter_count, enc3)

    # (8 x 8 x 8N)
    enc5 = _add_encoding_layer(filter_count, enc4)

    # (4 x 4 x 8N)
    enc6 = _add_encoding_layer(filter_count, enc5)

    # (2 x 2 x 8N)
    enc7 = _add_encoding_layer(filter_count, enc6)

    # (1 x 1 x 8N)
    enc8 = _add_encoding_layer(filter_count, enc7)

    # デコーダーの作成
    # (2 x 2 x 8N)
    dec1 = _add_decoding_layer(filter_count, True, enc8)
    dec1 = concatenate([dec1, enc7], axis=CONCATENATE_AXIS)

    # (4 x 4 x 8N)
    dec2 = _add_decoding_layer(filter_count, True, dec1)
    dec2 = concatenate([dec2, enc6], axis=CONCATENATE_AXIS)

    # (8 x 8 x 8N)
    dec3 = _add_decoding_layer(filter_count, True, dec2)
    dec3 = concatenate([dec3, enc5], axis=CONCATENATE_AXIS)

    # (16 x 16 x 8N)
    dec4 = _add_decoding_layer(filter_count, False, dec3)
    dec4 = concatenate([dec4, enc4], axis=CONCATENATE_AXIS)

    # (32 x 32 x 4N)
    filter_count = first_layer_filter_count * 4
    dec5 = _add_decoding_layer(filter_count, False, dec4)
    dec5 = concatenate([dec5, enc3], axis=CONCATENATE_AXIS)

    # (64 x 64 x 2N)
    filter_count = first_layer_filter_count * 2
    dec6 = _add_decoding_layer(filter_count, False, dec5)
    dec6 = concatenate([dec6, enc2], axis=CONCATENATE_AXIS)

    # (128 x 128 x N)
    filter_count = first_layer_filter_count
    dec7 = _add_decoding_layer(filter_count, False, dec6)
    dec7 = concatenate([dec7, enc1], axis=CONCATENATE_AXIS)

    # (256 x 256 x output_channel_count)
    dec8 = Activation(activation='relu')(dec7)
    dec8 = Conv2DTranspose(output_channel_count,
                           DECONV_FILTER_SIZE,
                           strides=DECONV_STRIDE)(dec8)
    dec8 = Activation(activation='sigmoid')(dec8)

    return Model(inputs=inputs, outputs=dec8)
Beispiel #8
0
class_mode='binary')
'''
#알렉스넷 모델 생성
model = Sequential()

#Alexnet - 계층 1 : 11x11 필터를 96개를 사용, strides = 4, 활화화함수 = relu,
# 입력 데이터 크기 224x224 , 3x3 크기의 풀리계층 사용

model.add(
    Conv2D(96, (11, 11), strides=4, input_shape=(80, 60, 1),
           activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3), strides=1))
model.add(BatchNormalization())

#Alexnet - 계층 2 : 5X5 필터를 256개 사용 , strides = 1, 활화화함수 = relu, 3x3 크기의 풀리계층 사용
model.add(ZeroPadding2D(2))
model.add(Conv2D(256, (5, 5), strides=1, activation='relu'))

model.add(MaxPooling2D(pool_size=(3, 3), strides=1))
model.add(BatchNormalization())

#Alexnet - 계층 3 : 3x3 필터를 384개 사용, strides =1 , 활성화함수 = relu
model.add(ZeroPadding2D(1))
model.add(Conv2D(384, (3, 3), strides=1, activation='relu'))

#Alexnet - 계층 4 : 3x3 필터를 384개 사용, strides =1 , 활성화함수 = relu
model.add(ZeroPadding2D(1))
model.add(Conv2D(384, (3, 3), strides=1, activation='relu'))

#Alexnet - 계층 5 : 3x3 필터를 256개 사용, strides =1 , 활성화함수 = relu, 3x3 크기의 풀리계층 사용
model.add(ZeroPadding2D(1))