Ejemplo n.º 1
0
    def build_discriminator(self):

        model = Sequential()
        model.add(
            Conv2D(16,
                   kernel_size=4,
                   strides=2,
                   padding='same',
                   input_shape=self.img_shape))
        model.add(LeakyReLU(alpha=0.8))
        model.add(Conv2D(32, kernel_size=4, strides=2, padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(InstanceNormalization())
        model.add(Conv2D(64, kernel_size=4, strides=2, padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(InstanceNormalization())
        model.add(Conv2D(64, kernel_size=4, strides=2, padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(InstanceNormalization())
        model.add(Conv2D(128, kernel_size=4, strides=2, padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(InstanceNormalization())

        img = Input(shape=self.img_shape)
        features = model(img)

        label = Flatten()(features)
        validity = Dense(1, activation="sigmoid")(label)

        return Model(img, validity)
Ejemplo n.º 2
0
    def build_discriminator(self):

        model = Sequential()
        model.add(
            Conv2D(self.df,
                   kernel_size=4,
                   strides=2,
                   padding='same',
                   input_shape=self.img_shape))
        model.add(LeakyReLU(alpha=0.8))
        model.add(Conv2D(self.df * 2, kernel_size=4, strides=2,
                         padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(InstanceNormalization())
        model.add(Conv2D(self.df * 4, kernel_size=4, strides=2,
                         padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(InstanceNormalization())
        model.add(Conv2D(self.df * 8, kernel_size=4, strides=2,
                         padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(InstanceNormalization())
        model.add(Conv2D(1, kernel_size=4, strides=1, padding='same'))

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity)
Ejemplo n.º 3
0
    def build_discriminator(self):

        img = Input(shape=self.img_shape)

        model = Sequential()
        model.add(Conv2D(64, kernel_size=4, strides=2, padding='same', input_shape=self.img_shape))
        model.add(LeakyReLU(alpha=0.8))
        model.add(Conv2D(128, kernel_size=4, strides=2, padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(InstanceNormalization())
        model.add(Conv2D(256, kernel_size=4, strides=2, padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(InstanceNormalization())

        model.summary()

        img = Input(shape=self.img_shape)
        features = model(img)

        validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(features)

        label = Flatten()(features)
        label = Dense(self.num_classes+1, activation="softmax")(label)

        return Model(img, [validity, label])
Ejemplo n.º 4
0
def residual_block_m(y, nb_channels, _strides=(1, 1), _project_shortcut=False):
    shortcut = y

    # down-sampling is performed with a stride of 2
    y = InstanceNormalization(axis=-1)(y)
    y = layers.ELU()(y)
    y = layers.Conv2D(nb_channels,
                      kernel_size=(3, 3),
                      strides=_strides,
                      padding='same')(y)

    y = InstanceNormalization(axis=-1)(y)
    y = layers.ELU()(y)
    y = layers.Conv2D(nb_channels,
                      kernel_size=(3, 3),
                      strides=(1, 1),
                      padding='same')(y)

    # identity shortcuts used directly when the input and
    # output are of the same dimensions
    if _project_shortcut or _strides != (1, 1):
        # when the dimensions increase projection shortcut is used to
        # match dimensions (done by 1x1 convolutions)
        # when the shortcuts go across feature maps of two sizes,
        # they are performed with a stride of 2
        shortcut = InstanceNormalization(axis=-1)(shortcut)
        shortcut = layers.Conv2D(nb_channels,
                                 kernel_size=(1, 1),
                                 strides=_strides,
                                 padding='same')(shortcut)

    y = layers.add([shortcut, y])

    return y
def generator_resnet(opt):
    img = Input(shape=(
        opt.data_pix_size,
        opt.data_pix_size,
        opt.in_dim,
    ))
    pad_img = Lambda(
        lambda x: tf.pad(x, [[0, 0], [3, 3], [3, 3], [0, 0]], 'REFLECT'))(img)
    c1 = conv2d(pad_img,
                opt.g_fir_dim,
                7,
                1,
                padding='VALID',
                activation='relu')
    c1 = InstanceNormalization()(c1)
    c2 = conv2d(c1, opt.g_fir_dim * 2, 3, 2, activation='relu')
    c2 = InstanceNormalization()(c2)
    c3 = conv2d(c2, opt.g_fir_dim * 4, 3, 2, activation='relu')
    c3 = InstanceNormalization()(c3)
    #residule bolck
    r1 = residule_block(c3, opt.g_fir_dim * 4)
    r2 = residule_block(r1, opt.g_fir_dim * 4)
    r3 = residule_block(r2, opt.g_fir_dim * 4)
    r4 = residule_block(r3, opt.g_fir_dim * 4)
    r5 = residule_block(r4, opt.g_fir_dim * 4)
    r6 = residule_block(r5, opt.g_fir_dim * 4)
    r7 = residule_block(r6, opt.g_fir_dim * 4)
    r8 = residule_block(r7, opt.g_fir_dim * 4)
    r9 = residule_block(r8, opt.g_fir_dim * 4)

    t1 = trans_conv2d(r9,
                      opt.g_fir_dim * 2,
                      3,
                      2,
                      padding='SAME',
                      activation='relu')
    t1 = InstanceNormalization()(t1)
    t2 = trans_conv2d(t1,
                      opt.g_fir_dim,
                      3,
                      2,
                      padding='SAME',
                      activation='relu')
    t2 = InstanceNormalization()(t2)
    t2_pad = Lambda(
        lambda x: tf.pad(x, [[0, 0], [1, 2], [1, 2], [0, 0]], 'REFLECT'))(t2)
    gen_img = conv2d(t2_pad,
                     opt.out_dim,
                     7,
                     1,
                     padding='VALID',
                     activation='tanh')
    return Model(inputs=img, outputs=gen_img)
def residule_block(r_i, layer_output, ks=3, s=1):
    r = Lambda(
        lambda x: tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], 'REFLECT'))(r_i)
    #r = ReflectionPadding2D(padding=(1,1))(r_i)
    r = conv2d(r, layer_output, ks, s, padding='VALID')
    r = InstanceNormalization()(r)
    r = Lambda(
        lambda x: tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], 'REFLECT'))(r)
    #r = ReflectionPadding2D(padding=(1,1))(r)
    r = conv2d(r, layer_output, ks, s, padding='VALID')
    r = InstanceNormalization()(r)
    return add([r_i, r])
Ejemplo n.º 7
0
		def residual_block(layer_input, normalization=self.normalize_G):
			"""Residual block described in paper"""
			d = Conv2D(64, kernel_size=3, strides=1, padding='same')(layer_input)
			if normalization:
				d = InstanceNormalization()(d)
				# d = BatchNormalization(momentum=0.8)(d) # TODO 6/5/2018
			d = Activation('relu')(d)
			d = Conv2D(64, kernel_size=3, strides=1, padding='same')(d)
			if normalization:
				d = InstanceNormalization()(d)
				# d = BatchNormalization(momentum=0.8)(d) # TODO 6/5/2018
			d = Add()([d, layer_input])
			return d
Ejemplo n.º 8
0
        def Rk(layer_input, filters, f_size=3, i_norm = True) :
            d = ReflectionPadding2D(padding=(1, 1))(layer_input)
            d = Conv2D(filters, kernel_size=f_size, padding="valid")(d)

            if i_norm == True :
                d = InstanceNormalization()(d)
            d = ReLU()(d)
            d = ReflectionPadding2D(padding=(1, 1))(d)
            d = Conv2D(filters, kernel_size=f_size, padding="valid")(d)
            if i_norm == True :
                d = InstanceNormalization()(d)
            d = layer_input + d
            d = ReLU()(d)
            return d
def discriminator(opt):
    img = Input(shape=(
        opt.data_pix_size,
        opt.data_pix_size,
        opt.in_dim,
    ))
    d1 = LeakyReLU(alpha=0.2)(conv2d(img, opt.d_fir_dim, 4, 2))
    d2 = LeakyReLU(alpha=0.2)(InstanceNormalization()(conv2d(
        d1, opt.d_fir_dim * 2, 4, 2)))
    d3 = LeakyReLU(alpha=0.2)(InstanceNormalization()(conv2d(
        d2, opt.d_fir_dim * 4, 4, 2)))
    d4 = LeakyReLU(alpha=0.2)(InstanceNormalization()(conv2d(
        d3, opt.d_fir_dim * 8, 4, 2)))
    d5 = conv2d(d4, 1, s=1)
    return Model(inputs=img, outputs=d5)
Ejemplo n.º 10
0
 def deconv2d(layer_input, filters, f_size=4, dropout_rate=0):
     u = Conv2DTranspose(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
     if dropout_rate:
         u = Dropout(dropout_rate)(u)
     u = InstanceNormalization()(u)
     u = Activation('relu')(u)
     return u
Ejemplo n.º 11
0
		def clf_layer(layer_input, filters, f_size=4, normalization=self.normalize_C):
			"""Classifier layer"""
			d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
			d = LeakyReLU(alpha=0.2)(d)
			if normalization:
				d = InstanceNormalization()(d)
			return d
Ejemplo n.º 12
0
def create_convolution_block(input_layer,
                             n_filters,
                             batch_normalization=True,
                             kernel=(3, 3, 3),
                             activation=None,
                             padding='same',
                             strides=(1, 1, 1),
                             instance_normalization=False,
                             data_format='channels_last'):

    layer = Conv3D(n_filters,
                   kernel,
                   padding=padding,
                   strides=strides,
                   data_format=data_format)(input_layer)
    # batch_normalization before activation
    if batch_normalization:
        layer = BatchNormalization(axis=4)(layer)
    elif instance_normalization:
        try:
            from keras_contrib.layers.normalization import InstanceNormalization
        except ImportError:
            raise ImportError(
                "Install keras_contrib in order to use instance normalization."
                "\nTry: pip install git+https://www.github.com/farizrahman4u/keras-contrib.git"
            )
        layer = InstanceNormalization(axis=4)(layer)

    if activation is None:
        return Activation('relu')(layer)
    else:
        return activation()(layer)
Ejemplo n.º 13
0
def conv(x,
         n_filters,
         kernel_size=3,
         stride=1,
         relu=True,
         nb_classes=1,
         targets=None):
    '''
    Reflection padding, convolution, instance normalization and (maybe) relu.
    '''
    if not kernel_size % 2:
        raise ValueError('Expected odd kernel size.')
    pad = int((kernel_size - 1) / 2)
    o = ReflectionPadding2D(padding=(pad, pad))(x)
    #o = Convolution2D(n_filters, kernel_size, kernel_size,
    #                  subsample=(stride, stride), init=weights_init)(o)
    o = Conv2D(n_filters, kernel_size, strides=stride, padding='same')(o)
    #o = BatchNormalization()(o)
    # if nb_classes > 1:
    #     o = ConditionalInstanceNormalization(targets, nb_classes)(o)
    # else:

    o = InstanceNormalization()(o)

    if relu:
        o = Activation('relu')(o)
    return o
Ejemplo n.º 14
0
        def downsample(layer_input, filters, f_size=4):
            d = Conv2D(filters, kernel_size=f_size, strides=2,
                       padding='same')(layer_input)
            d = InstanceNormalization(axis=-1, center=False, scale=False)(d)
            d = Activation('relu')(d)

            return d
def decoder_layer(inputs,
                  paired_inputs,
                  filters=16,
                  kernel_size=3,
                  strides=2,
                  activation='relu',
                  instance_norm=True):
    """Builds a generic decoder layer made of Conv2D-IN-LeakyReLU
    IN is optional, LeakyReLU may be replaced by ReLU

    """

    conv = Conv2DTranspose(filters=filters,
                           kernel_size=kernel_size,
                           strides=strides,
                           padding='same')

    x = inputs
    if instance_norm:
        x = InstanceNormalization()(x)
    if activation == 'relu':
        x = Activation('relu')(x)
    else:
        x = LeakyReLU(alpha=0.2)(x)
    x = conv(x)
    x = concatenate([x, paired_inputs])
    return x
Ejemplo n.º 16
0
def create_convolution_block(input_layer,
                             n_filters,
                             kernel=(3, 3, 3),
                             activation=LeakyReLU,
                             padding='same',
                             strides=(1, 1, 1),
                             batch_normalization=False,
                             instance_normalization=True):
    """
    :param strides:
    :param input_layer:
    :param n_filters:
    :param batch_normalization:
    :param kernel:
    :param activation: Keras activation layer to use. (default is 'relu')
    :param padding:
    :return:
    """
    layer = Conv3D(n_filters, kernel, padding=padding,
                   strides=strides)(input_layer)
    if batch_normalization:
        layer = BatchNormalization(axis=1)(layer)
    elif instance_normalization:
        layer = InstanceNormalization(axis=1)(layer)
    if activation is None:
        return Activation('relu')(layer)
    else:
        return activation()(layer)
Ejemplo n.º 17
0
 def ck(layer_input, filters, f_size=7, i_norm=True) :
     d = ReflectionPadding2D(padding=(3, 3))(layer_input)
     d = Conv2D(filters, kernel_size=f_size, strides=1, padding="same")(d)
     if i_norm == True :
         d = InstanceNormalization()(d)
     d = ReLU()(d)
     return d
Ejemplo n.º 18
0
 def deconv2d(layer_input,
              skip_input,
              filters,
              f_size=3,
              dropout_rate=0.2):
     """Layers used during upsampling"""
     u = UpSampling2D(size=2)(layer_input)
     u1 = Conv2D(filters,
                 kernel_size=f_size,
                 strides=1,
                 padding='same',
                 activation='relu')(u)
     u2 = Conv2D(filters,
                 kernel_size=f_size,
                 strides=1,
                 padding='SAME',
                 activation='relu')(u1)
     u = Conv2D(filters,
                kernel_size=f_size,
                padding='SAME',
                activation='relu')(u2)
     if dropout_rate:
         u = Dropout(dropout_rate)(u)
     u = InstanceNormalization()(u)
     u = Concatenate()([u, skip_input])
     return u
def decoder_layer(inputs,
                  paired_inputs,
                  filters=16,
                  kernel_size=3,
                  strides=2,
                  activation='relu',
                  instance_norm=True):
    """Builds a generic decoder layer made of Conv2D-IN-LeakyReLU
    IN is optional, LeakyReLU may be replaced by ReLU
    Arguments: (partial)
    inputs (tensor): the decoder layer input
    paired_inputs (tensor): the encoder layer output 
          provided by U-Net skip connection &
          concatenated to inputs.

    """

    conv = Conv2DTranspose(filters=filters,
                           kernel_size=kernel_size,
                           strides=strides,
                           padding='same')

    x = inputs
    if instance_norm:
        x = InstanceNormalization()(x)
    if activation == 'relu':
        x = Activation('relu')(x)
    else:
        x = LeakyReLU(alpha=0.2)(x)
    x = conv(x)
    x = concatenate([x, paired_inputs])
    return x
Ejemplo n.º 20
0
def model_init(opts):
    """Simple sequential image-to-image convolutional neural network"""

    init_fn = VarianceScaling(2.)

    model = Sequential()
    isFirst = True
    for ks, nk, a in iters.izip(opts.kernelSizes, opts.numKernels,
                                opts.activations):

        if isFirst:
            model.add(
                layers.Conv2D(nk,
                              kernel_size=ks,
                              strides=opts.strides,
                              padding=opts.padding,
                              kernel_initializer=init_fn,
                              input_shape=opts.inputShape))
            isFirst = False
        else:
            model.add(
                layers.Conv2D(nk,
                              kernel_size=ks,
                              strides=opts.strides,
                              padding=opts.padding,
                              kernel_initializer=init_fn))

        if opts.includeInsNormLayer:
            model.add(InstanceNormalization(axis=opts.insNormAxis))

        model.add(layers.Activation(a))
        if opts.dropRate > 0.0:
            model.add(layers.Dropout(rate=opts.dropRate))
    return model
Ejemplo n.º 21
0
def create_convolution_block(input_layer,
                             n_filters,
                             kernel=(3, 3, 3),
                             activation=LeakyReLU,
                             padding='same',
                             strides=(1, 1, 1),
                             normMethod='batch_norm'):
    """
    strides:
    input_layer:
    n_filters:
    batch_normalization:
    kernel:
    activation: Keras activation layer to use. (default is 'relu')
    padding:
    :return:
    """
    layer = Conv3D(n_filters, kernel, padding=padding,
                   strides=strides)(input_layer)
    if normMethod == 'batch_norm':
        layer = BatchNormalization(axis=1)(layer)
    elif normMethod == 'instance_norm':
        layer = InstanceNormalization(axis=1)(layer)
    elif normMethod == 'group_norm':
        layer = GroupNormalization(groups=4, axis=1, epsilon=0.1)
    if activation is None:
        return Activation('relu')(layer)
    else:
        return activation()(layer)
Ejemplo n.º 22
0
 def deconv_layer(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
     u = UpSampling1D(size=2)(layer_input)
     u = Conv1D(filters, f_size, strides=1, padding='same', activation='relu')(u)
     if dropout_rate:
         u = Dropout(dropout_rate)(u)
     u = InstanceNormalization()(u)
     u = Concatenate()([u, skip_input])
     return u
Ejemplo n.º 23
0
	def convdown(x,deep,kernal=(5,5)):
		''' conv 1/2 -> lrelu -> instanceNorm '''
		from keras.layers import Conv2D,LeakyReLU
		from keras_contrib.layers.normalization import InstanceNormalization
		x = Conv2D(deep, kernel_size=kernal, strides=2, padding='same')(x)
		x = LeakyReLU(alpha=0.2)(x)
		x = InstanceNormalization()(x)
		return x
 def conv2d(layer_input, filters, f_size=4, stride=2):
     d = Conv2D(filters,
                kernel_size=f_size,
                strides=stride,
                padding='same')(layer_input)
     d = InstanceNormalization()(d)
     d = Activation('relu')(d)
     return d
    def model(self):
        input_layer = Input(shape=self.SHAPE)
        
        down_1 = Convolution2D(64  , kernel_size=4, strides=2, padding='same',activation=LeakyReLU(alpha=0.2))(input_layer)
        norm_1 = InstanceNormalization()(down_1)

        down_2 = Convolution2D(64*2, kernel_size=4, strides=2, padding='same',activation=LeakyReLU(alpha=0.2))(norm_1)
        norm_2 = InstanceNormalization()(down_2)

        down_3 = Convolution2D(64*4, kernel_size=4, strides=2, padding='same',activation=LeakyReLU(alpha=0.2))(norm_2)
        norm_3 = InstanceNormalization()(down_3)

        down_4 = Convolution2D(64*8, kernel_size=4, strides=2, padding='same',activation=LeakyReLU(alpha=0.2))(norm_3)
        norm_4 = InstanceNormalization()(down_4)


        upsample_1 = UpSampling2D()(norm_4)
        up_conv_1 = Convolution2D(64*4, kernel_size=4, strides=1, padding='same',activation='relu')(upsample_1)
        norm_up_1 = InstanceNormalization()(up_conv_1)
        add_skip_1 = Concatenate()([norm_up_1,norm_3])

        upsample_2 = UpSampling2D()(add_skip_1)
        up_conv_2 = Convolution2D(64*2, kernel_size=4, strides=1, padding='same',activation='relu')(upsample_2)
        norm_up_2 = InstanceNormalization()(up_conv_2)
        add_skip_2 = Concatenate()([norm_up_2,norm_2])

        upsample_3 = UpSampling2D()(add_skip_2)
        up_conv_3 = Convolution2D(64, kernel_size=4, strides=1, padding='same',activation='relu')(upsample_3)
        norm_up_3 = InstanceNormalization()(up_conv_3)
        add_skip_3 = Concatenate()([norm_up_3,norm_1])

        last_upsample = UpSampling2D()(add_skip_3)
        output_layer = Convolution2D(3, kernel_size=4, strides=1, padding='same',activation='tanh')(last_upsample)
        
        return Model(input_layer,output_layer)