Esempio n. 1
0
def generator_deconv(cat_dim, noise_dim, img_dim, batch_size, model_name="generator_deconv", dset="mnist"):
    """
    Generator model of the DCGAN

    args : nb_classes (int) number of classes
           img_dim (tuple of int) num_chan, height, width
           pretr_weights_file (str) file holding pre trained weights

    returns : model (keras NN) the Neural Net model
    """

    s = img_dim[1]
    f = 128

    if dset == "mnist":
        start_dim = int(s / 4)
        nb_upconv = 2
    else:
        start_dim = int(s / 16)
        nb_upconv = 4

    reshape_shape = (start_dim, start_dim, f)
    output_channels = img_dim[-1]

    cat_input = Input(shape=cat_dim, name="cat_input")
    noise_input = Input(shape=noise_dim, name="noise_input")

    gen_input = merge([cat_input, noise_input], mode="concat")

    x = Dense(1024)(gen_input)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = Dense(f * start_dim * start_dim)(x)
    x = BatchNormalization()(x)
    x = Activation("relu")(x)

    x = Reshape(reshape_shape)(x)

    # Transposed conv blocks
    for i in range(nb_upconv - 1):
        x = UpSampling2D(size=(2, 2))(x)
        nb_filters = int(f / (2 ** (i + 1)))
        s = start_dim * (2 ** (i + 1))
        o_shape = (batch_size, s, s, nb_filters)
        x = Deconv2D(nb_filters, (4, 4), output_shape=o_shape, strides=(1, 1), padding="same")(x)
        x = BatchNormalization()(x)
        x = Activation("relu")(x)

    # Last block
    x = UpSampling2D(size=(1, 2))(x)
    s = start_dim * (2 ** (nb_upconv))
    o_shape = (batch_size, s, s, output_channels)
    x = Deconv2D(output_channels, (4, 4), output_shape=o_shape, strides=(1, 1), padding="same")(x)
    x = BatchNormalization()(x)
    x = Activation("sigmoid")(x)

    generator_model = Model(inputs=[cat_input, noise_input], outputs=[x], name=model_name)

    return generator_model
def generator_deconv(noise_dim, img_dim, bn_mode, batch_size, model_name="generator_deconv", dset="mnist"):
    """
    Generator model of the DCGAN

    args : nb_classes (int) number of classes
           img_dim (tuple of int) num_chan, height, width
           pretr_weights_file (str) file holding pre trained weights

    returns : model (keras NN) the Neural Net model
    """

    assert K.backend() == "tensorflow", "Deconv not implemented with theano"

    s = img_dim[1]
    f = 512

    if dset == "mnist":
        start_dim = int(s / 4)
        nb_upconv = 2
    else:
        start_dim = int(s / 16)
        nb_upconv = 4

    reshape_shape = (start_dim, start_dim, f)
    bn_axis = -1
    output_channels = img_dim[-1]

    gen_input = Input(shape=noise_dim, name="generator_input")

    x = Dense(f * start_dim * start_dim, input_dim=noise_dim)(gen_input)
    x = Reshape(reshape_shape)(x)
    x = BatchNormalization(axis=bn_axis)(x)
    x = Activation("relu")(x)

    # Transposed conv blocks
    for i in range(nb_upconv - 1):
        nb_filters = int(f / (2 ** (i + 1)))
        s = start_dim * (2 ** (i + 1))
        o_shape = (batch_size, s, s, nb_filters)
        x = Deconv2D(nb_filters, (3, 3), output_shape=o_shape, strides=(2, 2), padding="same")(x)
        x = BatchNormalization(axis=-1)(x)
        x = Activation("relu")(x)

    # Last block
    s = start_dim * (2 ** (nb_upconv))
    o_shape = (batch_size, s, s, output_channels)
    x = Deconv2D(output_channels, (3, 3), output_shape=o_shape, strides=(2, 2), padding="same")(x)
    x = Activation("tanh")(x)

    generator_model = Model(inputs=[gen_input], outputs=[x], name=model_name)

    return generator_model
Esempio n. 3
0
def deconv_block_unet(x,
                      x2,
                      f,
                      h,
                      w,
                      batch_size,
                      name,
                      bn_mode,
                      bn_axis,
                      bn=True,
                      dropout=False):

    o_shape = (batch_size, h * 2, w * 2, f)
    x = Activation("relu")(x)
    x = Deconv2D(f, (3, 3),
                 output_shape=o_shape,
                 strides=(2, 2),
                 padding="same")(x)
    if bn:
        x = BatchNormalization(axis=bn_axis)(x)
    if dropout:
        x = Dropout(0.5)(x)
    x = Concatenate(axis=bn_axis)([x, x2])

    return x
Esempio n. 4
0
def generator(n_nodes=256, noise_dim=100, batch_size=32):
    # generate noise data
    noise_input = Input(shape=(1, noise_dim), name='noise_input')

    # ------------
    # Geometry model
    # -------------

    geometry_hidden1 = Dense(units=1024, input_dim=100, activation='tanh', name='g_dens_1')(noise_input)
    geometry_hidden2 = Dense(4*4*64, activation='tanh', name='g_dens_2')(geometry_hidden1)
    geometry_hidden2_reshape = Reshape(target_shape=(4, 4, 64), name='g_dens_reshape')(geometry_hidden2)
    geometry_hidden3 = Deconv2D(32, kernel_size=(3, 3), strides=(2, 2), name='g_deconv_1', padding='same', data_format='channels_last', kernel_initializer='normal')(geometry_hidden2_reshape)
    geometry_hidden3 = Activation(activation='tanh')(BatchNormalization(name='g_bn_1')(geometry_hidden3))

    geometry_hidden4 = Deconv2D(3, kernel_size=(3, 3), strides=(2, 2), name='g_deconv_2', padding='same', data_format='channels_last', kernel_initializer='normal')(geometry_hidden3)
    geometry_hidden4 = Activation(activation='tanh')(BatchNormalization(name='g_bn_2')(geometry_hidden4))

    geometry_hidden_reshape = Reshape(target_shape=(n_nodes, 3))(geometry_hidden4)
    geometry_output = geometry_hidden_reshape
    geometry_model = Model(inputs=[noise_input],
                           outputs=[geometry_output],
                           name='geometry')

    # -----------
    # morphology
    # ----------

    morphology_hidden1 = Dense(units=512, activation='relu', name='m_den_1', input_dim=100)(noise_input)
    morphology_hidden1 = Dense(8*8*64, activation='relu', name='m_den_2')(morphology_hidden1)
    morphology_hidden1 = Reshape(target_shape=(8, 8, 64), name='m_reshape')(morphology_hidden1)

    morphology_hidden2 = Deconv2D(64, kernel_size=(3, 3), padding='same', strides=(2, 2), name='m_deconv_1', data_format='channels_last', kernel_initializer='uniform')(morphology_hidden1)
    morphology_hidden2 = Activation(activation='relu')(BatchNormalization(name='m_bn_1')(morphology_hidden2))

    morphology_hidden3 = Deconv2D(32, kernel_size=(3, 3), strides=(2, 2), padding='same', name='m_deconv_2', data_format='channels_last', kernel_initializer='uniform')(morphology_hidden2)
    morphology_hidden3 = Activation(activation='relu')(BatchNormalization(name='m_bn_2')(morphology_hidden3))

    morphology_hidden4 = Deconv2D(16, kernel_size=(3, 3), strides=(2, 2), padding='same', name='m_deconv_3', data_format='channels_last', kernel_initializer='uniform')(morphology_hidden3)
    morphology_hidden4 = Activation(activation='relu')(BatchNormalization(name='m_bn_3')(morphology_hidden4))

    morphology_hidden5 = Deconv2D(8, kernel_size=(3, 3), strides=(2, 2), padding='same', name='m_deconv_4', data_format='channels_last', kernel_initializer='uniform')(morphology_hidden4)
    morphology_hidden5 = Activation(activation='relu')(BatchNormalization(name='m_bn_4')(morphology_hidden5))

    morphology_hidden6 = Deconv2D(1, kernel_size=(3, 3), strides=(2, 2), padding='same', name='m_deconv_5', data_format='channels_last', kernel_initializer='uniform')(morphology_hidden5)
    morphology_hidden6 = Activation(activation='sigmoid')(BatchNormalization(name='m_bn_5')(morphology_hidden6))

    morphology_reshape = Reshape(target_shape=(n_nodes, -1))(morphology_hidden6)

    lambda_args = {'n_nodes': n_nodes, 'batch_size': batch_size}

    morphology_output = Lambda(layers.masked_softmax,
                               output_shape=(n_nodes, n_nodes),
                               arguments=lambda_args)(morphology_reshape)  # input mismatch because softmax output is n_nodes-1,n_nodes
    morphology_model = Model(inputs=[noise_input],
                             outputs=[morphology_output],
                             name='morphology')
    geometry_model.summary()  # show geometry network structure
    morphology_model.summary()  # show morphology network structure

    return geometry_model, morphology_model
Esempio n. 5
0
def generator_unet_deconv(img_dim, bn_mode, batch_size, model_name="generator_unet_deconv"):

    assert K.backend() == "tensorflow", "Not implemented with theano backend"

    nb_filters = 64
    bn_axis = -1
    h, w, nb_channels = img_dim
    min_s = min(img_dim[:-1])

    unet_input = Input(shape=img_dim, name="unet_input")

    # Prepare encoder filters
    nb_conv = int(np.floor(np.log(min_s) / np.log(2)))
    list_nb_filters = [nb_filters * min(8, (2 ** i)) for i in range(nb_conv)]

    # Encoder
    list_encoder = [Conv2D(list_nb_filters[0], (3, 3),
                           strides=(2, 2), name="unet_conv2D_1", padding="same")(unet_input)]
    # update current "image" h and w
    h, w = h / 2, w / 2
    for i, f in enumerate(list_nb_filters[1:]):
        name = "unet_conv2D_%s" % (i + 2)
        conv = conv_block_unet(list_encoder[-1], f, name, bn_mode, bn_axis)
        list_encoder.append(conv)
        h, w = h / 2, w / 2

    # Prepare decoder filters
    list_nb_filters = list_nb_filters[:-1][::-1]
    if len(list_nb_filters) < nb_conv - 1:
        list_nb_filters.append(nb_filters)

    # Decoder
    list_decoder = [deconv_block_unet(list_encoder[-1], list_encoder[-2],
                                      list_nb_filters[0], h, w, batch_size,
                                      "unet_upconv2D_1", bn_mode, bn_axis, dropout=True)]
    h, w = h * 2, w * 2
    for i, f in enumerate(list_nb_filters[1:]):
        name = "unet_upconv2D_%s" % (i + 2)
        # Dropout only on first few layers
        if i < 2:
            d = True
        else:
            d = False
        conv = deconv_block_unet(list_decoder[-1], list_encoder[-(i + 3)], f, h,
                                 w, batch_size, name, bn_mode, bn_axis, dropout=d)
        list_decoder.append(conv)
        h, w = h * 2, w * 2

    x = Activation("relu")(list_decoder[-1])
    o_shape = (batch_size,) + img_dim
    x = Deconv2D(nb_channels, (3, 3), output_shape=o_shape, strides=(2, 2), padding="same")(x)
    x = Activation("tanh")(x)

    generator_unet = Model(inputs=[unet_input], outputs=[x])

    return generator_unet
Esempio n. 6
0
    def __init__(self,
                 image_size,
                 filters,
                 lbl_dim=1,
                 latent=8,
                 flows=0,
                 dropout=0.2,
                 use_beta=False):
        super(VAE_Decoder, self).__init__()

        w = image_size[0] // 2**len(filters)
        h = image_size[1] // 2**len(filters)

        fclSiz = w * h * filters[0]
        self.image_size = image_size
        self.lbl_dim = lbl_dim
        self.latent = latent
        self.flows = flows
        self.use_beta = use_beta
        self.flow = Sequential()
        for f in range(flows):
            self.flow.add(Flow(latent))
        self.dec = Sequential([
            Dense(fclSiz),
            BatchNormalization(),
            Activation(K.elu),
            Reshape([w, h, filters[0]]),
        ])
        for f in filters:
            self.dec.add(
                Deconv2D(filters=f,
                         kernel_size=5,
                         strides=2,
                         padding='same',
                         use_bias=False))
            self.dec.add(BatchNormalization(axis=3))
            self.dec.add(Activation(K.elu))
        self.dropout = Dropout(dropout)
        self.img = Sequential([
            # Deconv2D(filters=6, kernel_size=5, strides=1, padding='same', activation=K.tanh ),
            Deconv2D(filters=6, kernel_size=5, strides=1, padding='same'),
            Reshape([image_size[0] * image_size[1] * 6])
        ])
Esempio n. 7
0
def deconv_block_unet(x, x2, f, bn_axis, bn=True, dropout=False, name=""):
    x = Activation("relu")(x)
    x = Deconv2D(f, (3, 3), strides=(2, 2), padding="same", name=name)(x)
    if bn:
        x = BatchNormalization(axis=bn_axis)(x)
    if dropout:
        x = Dropout(0.5)(x)
    x = Concatenate(axis=bn_axis)([x, x2])

    return x
def create_decoder(output_dims, base_filters=64, layers=4, latent=512):
    w = output_dims[0] // 2**layers
    h = output_dims[1] // 2**layers
    c = base_filters * 2**(layers - 1)
    decoder = Sequential()
    decoder.add(InputLayer([latent]))
    decoder.add(Dense(w * h * c))
    decoder.add(Reshape([w, h, c]))
    for i in range(layers - 1, 0, -1):
        decoder.add(
            Deconv2D(filters=base_filters * 2**i,
                     kernel_size=(5, 5),
                     strides=(2, 2),
                     padding='same',
                     bias=False))
        decoder.add(BatchNormalization(axis=3))
        decoder.add(Activation(K.relu))
    decoder.add(
        Deconv2D(filters=3, kernel_size=(5, 5), strides=(2, 2),
                 padding='same'))
    return decoder
Esempio n. 9
0
def deconv(x, nf, ks, strides, name, weight_decay):
    kernel_reg = l2(weight_decay[0]) if weight_decay else None
    bias_reg = l2(weight_decay[1]) if weight_decay else None

    x = Deconv2D(nf, (ks, ks),
                 strides=strides,
                 padding='same',
                 use_bias=True,
                 name=name,
                 kernel_regularizer=kernel_reg,
                 bias_regularizer=bias_reg,
                 kernel_initializer=random_normal(stddev=0.01),
                 bias_initializer=constant(0.0))(x)
    return x
Esempio n. 10
0
    def build_generator(self):

        # Input
        latent_z = Input(shape=(self.latent_dim,))
        label_y = Input(shape=(self.num_classes,))
        x = Concatenate()([latent_z, label_y])
        x = Reshape(target_shape=(1, 1, -1))(x)

        # Full Conv 1
        x = Deconv2D(kernel_size=[4, 4], strides=(2, 2), filters=512)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)

        # Full Conv 2
        x = Deconv2D(kernel_size=[4, 4], strides=(2, 2), filters=256, padding='same')(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)

        # Full Conv 3
        x = Deconv2D(kernel_size=[4, 4], strides=(2, 2), filters=128, padding='same')(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        # Full Conv 4
        x = Deconv2D(kernel_size=[4, 4], strides=(2, 2), filters=64, padding='same')(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)

        # Full Conv 5
        x = Deconv2D(kernel_size=[4, 4], strides=(2, 2), filters=1, padding='same')(x)
        x = Activation(activation='tanh')(x)

        model = Model([latent_z, label_y], x)

        model.summary()

        return model
Esempio n. 11
0
def FCN_VGG16_32s(input_shape, class_num):

    # input
    input = Input(shape=input_shape) # (h, w, c)
    
    # block1
    x = Conv2D(64, (3,3), activation='relu', padding='same', name='block1_conv1')(input)
    x = Conv2D(64, (3,3), activation='relu', padding='same', name='block1_conv2')(x)
    x = MaxPooling2D((2,2), strides=(2,2), name='block1_pool')(x)

    # block2
    x = Conv2D(128, (3,3), activation='relu', padding='same', name='block2_conv1')(x)
    x = Conv2D(128, (3,3), activation='relu', padding='same', name='block2_conv2')(x)
    x = MaxPooling2D((2,2), strides=(2,2), name='block2_pool')(x)

    # block3
    x = Conv2D(256, (3,3), activation='relu', padding='same', name='block3_conv1')(x)
    x = Conv2D(256, (3,3), activation='relu', padding='same', name='block3_conv2')(x)
    x = Conv2D(256, (3,3), activation='relu', padding='same', name='block3_conv3')(x)
    x = MaxPooling2D((2,2), strides=(2,2), name='block3_pool')(x)

    # block4
    x = Conv2D(512, (3,3), activation='relu', padding='same', name='block4_conv1')(x)
    x = Conv2D(512, (3,3), activation='relu', padding='same', name='block4_conv2')(x)
    x = Conv2D(512, (3,3), activation='relu', padding='same', name='block4_conv3')(x)
    x = MaxPooling2D((2,2), strides=(2,2), name='block4_pool')(x)

    # block5
    x = Conv2D(512, (3,3), activation='relu', padding='same', name='block5_conv1')(x)
    x = Conv2D(512, (3,3), activation='relu', padding='same', name='block5_conv2')(x)
    x = Conv2D(512, (3,3), activation='relu', padding='same', name='block5_conv3')(x)
    x = MaxPooling2D((2,2), strides=(2,2), name='block5_pool')(x)

    # fc (implemented as conv)
    x = Conv2D(4096, (7,7), activation='relu', padding='same', name='fc1')(x)
    x = Dropout(0.5)(x)
    x = Conv2D(4096, (1,1), activation='relu', padding='same', name='fc2')(x)
    x = Dropout(0.5)(x)
    x = Conv2D(class_num, (1,1), name='fc3')(x) # No activation (i.e. a(x) = x)

    # upsampling (x32)
    x = Deconv2D(class_num, (32, 32), strides=(32, 32), name='deconv', use_bias=False, activation='softmax')(x) # padding?
    
    # define model
    model = Model(input, x)

    return model
Esempio n. 12
0
def FCN_VGG16_32s_rgbd(input_shape, class_num):
    # input
    input = Input(shape=input_shape) # (h, w, c)
    
    # VGG16
    vgg16 = VGG16(input_shape=(input_shape[0], input_shape[1], 3), include_top=False, weights='imagenet')

    # first layer
    if input_shape[2] == 3: # RGB
        x = vgg16(input)
    
    elif input_shape[2] == 4: # RGBD
        # split input into RGB and D channels
        rgb = Lambda(lambda x : x[:,:,:,0:3])(input)
        depth = Lambda(lambda x : x[:,:,:,3:4])(input)

        # first layer
        rgb_feat = vgg16.layers[1](rgb) # block1_conv1
        depth_feat = Conv2D(64, (3,3), activation='relu', padding='same', name='depth_conv')(depth)
        x = Add()([rgb_feat, depth_feat])
        for i in range(2, len(vgg16.layers)):
            x = vgg16.layers[i](x)
    
    else:
        print("Error!! wrong # of channels!!")
        sys.exit(1)

    # fc (implemented as conv)
    x = Conv2D(4096, (7,7), activation='relu', padding='same', name='fc1')(x)
    x = Dropout(0.5)(x)
    x = Conv2D(4096, (1,1), activation='relu', padding='same', name='fc2')(x)
    x = Dropout(0.5)(x)
    x = Conv2D(class_num, (1,1), name='fc3')(x) # No activation (i.e. a(x) = x)

    # upsampling (x32)
    x = Deconv2D(class_num, (32, 32), strides=(32, 32), name='deconv', use_bias=False, activation='softmax')(x) # padding?
    
    # define model
    model = Model(input, x)

    return model 
def generator_deconv(noise_dim, img_dim, batch_size, model_name="generator_deconv", dset="mnist"):
    """DCGAN generator based on Deconv2D

    Args:
        noise_dim: Dimension of the noise input
        img_dim: dimension of the image output
        batch_size: needed to reshape after the deconv2D
        model_name: model name (default: {"generator_deconv"})
        dset: dataset (default: {"mnist"})

    Returns:
        keras model
    """

    assert K.backend() == "tensorflow", "Deconv not implemented with theano"

    s = img_dim[1]
    f = 512

    if dset == "mnist":
        start_dim = int(s / 4)
        nb_upconv = 2
    elif dset == "celebA":
        start_dim = int(s / 16)
        nb_upconv = 4
    else:
        o = s
        nb_upconv = 0
        while o > 7:
            o = o/2
            nb_upconv += 1
        start_dim = int(o)

    reshape_shape = (start_dim, start_dim, f)
    bn_axis = -1
    output_channels = img_dim[-1]

    gen_input = Input(shape=noise_dim, name="generator_input")

    # Noise input and reshaping
    x = Dense(f * start_dim * start_dim, input_dim=noise_dim, use_bias=False)(gen_input)
    x = Reshape(reshape_shape)(x)
    x = BatchNormalization(axis=bn_axis)(x)
    x = Activation("relu")(x)

    # Transposed conv blocks: Deconv2D->BN->ReLU
    for i in range(nb_upconv - 1):
        nb_filters = int(f / (2 ** (i + 1)))
        s = start_dim * (2 ** (i + 1))
        o_shape = (batch_size, s, s, nb_filters)
        x = Deconv2D(nb_filters, (3, 3),
                     output_shape=o_shape, strides=(2, 2),
                     padding="same", use_bias=False,
                     kernel_initializer=RandomNormal(stddev=0.02))(x)
        x = BatchNormalization(axis=-1)(x)
        x = Activation("relu")(x)

    # Last block
    s = start_dim * (2 ** (nb_upconv))
    o_shape = (batch_size, s, s, output_channels)
    x = Deconv2D(output_channels, (3, 3),
                 output_shape=o_shape, strides=(2, 2),
                 padding="same", use_bias=False,
                 kernel_initializer=RandomNormal(stddev=0.02))(x)
    x = Activation("tanh")(x)

    generator_model = Model(inputs=[gen_input], outputs=[x], name=model_name)
    visualize_model(generator_model)

    return generator_model
Esempio n. 14
0
def deep_net(n_ch, patch_height, patch_width):
    inputs = Input(shape=(n_ch, patch_height, patch_width))
    # Block 1
    x1_1 = Conv2D(64, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block1_conv1',
                  data_format='channels_first')(inputs)
    x1_2 = Conv2D(64, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block1_conv2',
                  data_format='channels_first')(x1_1)

    x1_pool = MaxPooling2D((2, 2),
                           strides=(2, 2),
                           name='block1_pool',
                           data_format='channels_first')(x1_2)

    # Block 2
    x2_1 = Conv2D(128, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block2_conv1',
                  data_format='channels_first')(x1_pool)
    x2_2 = Conv2D(128, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block2_conv2',
                  data_format='channels_first')(x2_1)
    x2_pool = MaxPooling2D((2, 2),
                           strides=(2, 2),
                           name='block2_pool',
                           data_format='channels_first')(x2_2)

    # Block 3
    x3_1 = Conv2D(256, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block3_conv1',
                  data_format='channels_first')(x2_pool)
    x3_2 = Conv2D(256, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block3_conv2',
                  data_format='channels_first')(x3_1)
    x3_3 = Conv2D(256, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block3_conv3',
                  data_format='channels_first')(x3_2)
    x3_pool = MaxPooling2D((2, 2),
                           strides=(2, 2),
                           name='block3_pool',
                           data_format='channels_first')(x3_3)

    # Block 4
    # x4_1 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1',data_format='channels_first')(x3_3)
    x4_1 = Conv2D(512, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block4_conv1',
                  data_format='channels_first')(x3_pool)
    #x4_drop1 = Dropout(0.5, name='dr1')(x4_1)
    x4_2 = Conv2D(512, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block4_conv2',
                  data_format='channels_first')(x4_1)
    #x4_drop2 = Dropout(0.5, name='dr2')(x4_2)
    x4_3 = Conv2D(512, (3, 3),
                  activation='relu',
                  padding='same',
                  name='block4_conv3',
                  data_format='channels_first')(x4_2)
    #x4_drop3 = Dropout(0.5, name='dr3')(x4_3)

    x1_2_16 = Conv2D(16, (3, 3),
                     name='x1_2_16',
                     padding='same',
                     data_format='channels_first')(x1_2)
    x2_2_16 = Conv2D(16, (3, 3),
                     name='x2_2_16',
                     padding='same',
                     data_format='channels_first')(x2_2)
    x3_3_16 = Conv2D(16, (3, 3),
                     name='x3_3_16',
                     padding='same',
                     data_format='channels_first')(x3_3)
    x4_3_16 = Conv2D(16, (3, 3),
                     name='x4_3_16',
                     padding='same',
                     data_format='channels_first')(x4_3)

    conv4_to_1 = Conv2D(2, (3, 3),
                        padding='same',
                        data_format='channels_first',
                        name='conv4_to_1')(x4_3)
    # conv4_to_1_up=UpSampling2D(size=(8,8),data_format='channels_first')(conv4_to_1)
    conv4_to_1_up = Deconv2D(filters=2,
                             kernel_size=16,
                             strides=(8, 8),
                             data_format='channels_first')(conv4_to_1)
    # crop4to1_back = Cropping2D(cropping=(patch_height, patch_width), data_format='channels_first')(conv4_to_1_up)
    # conv1_2_17=concatenate([conv4_to_1_up,x1_2_16],axis=1)
    crop4to1_back = cropfunc(conv4_to_1_up, inputs)
    conv1_2_17 = concatenate([crop4to1_back, x1_2_16], axis=1)
    dsn1_in = Conv2D(1, (1, 1), data_format='channels_first',
                     name='dsn1_in')(conv1_2_17)

    conv1_to_2 = Conv2D(2, (1, 1),
                        name='conv1_to_2',
                        data_format='channels_first')(x1_2_16)
    # side_multi2_up=UpSampling2D(size=(2,2),data_format='channels_first')(x2_2_16)
    side_multi2_up = Deconv2D(16,
                              4,
                              strides=(2, 2),
                              padding='same',
                              data_format='channels_first')(x2_2_16)
    # upside_multi2 = Cropping2D(cropping=(patch_height, patch_width), data_format='channels_first')(side_multi2_up)
    # conv2_2_17=concatenate([conv1_to_2,side_multi2_up],axis=1)
    upside_multi2 = cropfunc(side_multi2_up, inputs)
    upside_multi27 = concatenate([conv1_to_2, upside_multi2], axis=1)
    # dsn2_in=Conv2D(1,(1,1),data_format='channels_first',padding='same',name='dsn2_in')(conv2_2_17)
    dsn2_in = Conv2D(1, (1, 1), data_format='channels_first',
                     name='dsn2_in')(upside_multi27)

    # conv2_to_3=Conv2D(1,(1,1),padding='same',name='conv2_to_3',data_format='channels_first')(conv2_2_17)
    # side_multi3_up=UpSampling2D(size=(4,4),data_format='channels_first')(x3_3_16)
    conv2_to_3 = Conv2D(1, (1, 1),
                        name='conv2_to_3',
                        data_format='channels_first')(upside_multi27)
    side_multi3_up = Deconv2D(16,
                              8,
                              strides=(4, 4),
                              data_format='channels_first')(x3_3_16)
    # upside_multi3 = Cropping2D(cropping=(patch_height, patch_width), data_format='channels_first')(side_multi3_up)
    upside_multi3 = cropfunc(side_multi3_up, inputs)
    # conv3_2_17=concatenate([conv2_to_3,side_multi3_up],axis=1)
    upside_multi37 = concatenate([conv2_to_3, upside_multi3], axis=1)
    # dsn3_in=Conv2D(1,(1,1),data_format='channels_first',padding='same',name='dsn3_in')(conv3_2_17)
    dsn3_in = Conv2D(1, (1, 1), data_format='channels_first',
                     name='dsn3_in')(upside_multi37)

    # conv3_to_4=Conv2D(1,(1,1),padding='same',name='conv3_to_4',data_format='channels_first')(conv3_2_17)
    # side_multi4_up=UpSampling2D(size=(8,8),data_format='channels_first')(x4_3_16)
    # conv4_2_17=concatenate([conv3_to_4,side_multi4_up],axis=1)
    # dsn4_in=Conv2D(1,(1,1),data_format='channels_first',padding='same',name='dsn4_in')(conv4_2_17)
    conv3_to_4 = Conv2D(1, (1, 1),
                        name='conv3_to_4',
                        data_format='channels_first')(upside_multi37)
    side_multi4_up = Deconv2D(16,
                              16,
                              strides=(8, 8),
                              data_format='channels_first')(x4_3_16)
    # upside_multi4 = Cropping2D(cropping=(patch_height, patch_width), data_format='channels_first')(side_multi4_up)
    upside_multi4 = cropfunc(side_multi4_up, inputs)
    upside_multi47 = concatenate([conv3_to_4, upside_multi4], axis=1)
    dsn4_in = Conv2D(1, (1, 1), data_format='channels_first',
                     name='dsn4_in')(upside_multi47)

    dsn = concatenate([dsn1_in, dsn2_in, dsn3_in, dsn4_in], axis=1)
    dsn_out = Conv2D(1, (1, 1),
                     data_format='channels_first',
                     padding='same',
                     name='dsn_out')(dsn)
    # dsn_out = Conv2D(1, (1, 1), data_format='channels_first', name='dsn_out', activation='sigmoid')(dsn)

    target = Input(shape=(1, patch_height, patch_width), name="target")
    MyLoss = Lambda(lambda x: MyEntropyLoss(*x), name="complex_loss")\
        ([target, dsn1_in, dsn2_in, dsn3_in, dsn4_in, dsn_out])

    # outputs = [dsn_out, MyLoss]
    outputs = [dsn_out, dsn1_in, dsn2_in, dsn3_in, dsn4_in, MyLoss]
    model = Model(inputs=[inputs, target], outputs=outputs)
    # model = Model(inputs=inputs, outputs=[dsn1_in, dsn2_in, dsn3_in, dsn4_in, dsn_out])

    # model.add_loss(myloss)
    # model.compile(loss=[None] * len(model.outputs), optimizer='SGD')
    # model.compile(optimizer='SGD')

    model._losses = []
    model._per_input_losses = {}
    for loss_name in ["complex_loss"]:
        layer = model.get_layer(loss_name)
        if layer.output in model.losses:
            continue
        loss = tf.reduce_mean(layer.output)
        model.add_loss(loss)

    return model
Esempio n. 15
0
def _residual_block(input, dim, kernel_size, with_bn, resample=None):
    output = input
    output = _bn_relu(output, with_bn)

    if resample == "up":
        input_shape = K.int_shape(input)
        output = Reshape(target_shape=input_shape[1:-1] + (1, ) +
                         input_shape[-1:])(output)
        output = Deconv2D(dim,
                          strides=(2, 1),
                          kernel_size=kernel_size,
                          padding="same",
                          kernel_initializer="he_normal")(output)
        deconvolved_shape = K.int_shape(output)
        output = Reshape(deconvolved_shape[1:-2] +
                         (deconvolved_shape[-1], ))(output)
    else:
        output = Conv1D(dim,
                        kernel_size=kernel_size,
                        padding="same",
                        kernel_initializer="he_normal")(output)

    output = _bn_relu(output, with_bn=with_bn)

    if resample == "down":
        output = Conv1D(dim,
                        kernel_size=kernel_size,
                        padding="same",
                        kernel_initializer="he_normal")(output)
        output = AveragePooling1D(2)(output)
    else:
        output = Conv1D(dim,
                        kernel_size=kernel_size,
                        padding="same",
                        kernel_initializer="he_normal")(output)

    input_shape = K.int_shape(input)
    output_shape = K.int_shape(output)

    if input_shape[-1] == output_shape[-1] and resample is None:
        shortcut = input
    else:
        if resample == "down":
            shortcut = Conv1D(output_shape[-1], kernel_size=1,
                              padding="same")(input)
            shortcut = AveragePooling1D(2)(shortcut)
        elif resample == "up":
            input_shape = K.int_shape(input)
            shortcut = Reshape(input_shape[1:-1] + (1, ) +
                               input_shape[-1:])(input)
            shortcut = Deconv2D(dim,
                                strides=(2, 1),
                                kernel_size=kernel_size,
                                padding="same",
                                kernel_initializer="he_normal")(shortcut)
            deconvolved_shape = K.int_shape(shortcut)
            shortcut = Reshape(deconvolved_shape[1:-2] +
                               (deconvolved_shape[-1], ))(shortcut)
        else:
            shortcut = Conv1D(output_shape[-1], kernel_size=1,
                              padding="same")(input)

    return add([shortcut, output])
Esempio n. 16
0
    def construct_model(self):

        inputs = Input(shape=(self.frequency_bins, self.frames, 1))

        conv1 = Conv2D(16, 5, strides=2, padding=self.padding)(inputs)
        conv1 = BatchNormalization()(conv1)
        conv1 = LeakyReLU(alpha=0.2)(conv1)

        conv2 = Conv2D(32, 5, strides=2, padding=self.padding)(conv1)
        conv2 = BatchNormalization()(conv2)
        conv2 = LeakyReLU(alpha=0.2)(conv2)

        conv3 = Conv2D(64, 5, strides=2, padding=self.padding)(conv2)
        conv3 = BatchNormalization()(conv3)
        conv3 = LeakyReLU(alpha=0.2)(conv3)

        conv4 = Conv2D(128, 5, strides=2, padding=self.padding)(conv3)
        conv4 = BatchNormalization()(conv4)
        conv4 = LeakyReLU(alpha=0.2)(conv4)

        conv5 = Conv2D(256, 5, strides=2, padding=self.padding)(conv4)
        conv5 = BatchNormalization()(conv5)
        conv5 = LeakyReLU(alpha=0.2)(conv5)

        conv6 = Conv2D(512, 5, strides=2, padding=self.padding)(conv5)
        conv6 = BatchNormalization()(conv6)
        conv6 = LeakyReLU(alpha=0.2)(conv6)

        deconv7 = Deconv2D(256, 5, strides=2, padding=self.padding)(conv6)
        deconv7 = BatchNormalization()(deconv7)
        deconv7 = Dropout(0.5)(deconv7)
        deconv7 = Activation('relu')(deconv7)

        deconv8 = Concatenate(axis=3)([deconv7, conv5])
        deconv8 = Deconv2D(128, 5, strides=2, padding=self.padding)(deconv8)
        deconv8 = BatchNormalization()(deconv8)
        deconv8 = Dropout(0.5)(deconv8)
        deconv8 = Activation('relu')(deconv8)

        deconv9 = Concatenate(axis=3)([deconv8, conv4])
        deconv9 = Deconv2D(64, 5, strides=2, padding=self.padding)(deconv9)
        deconv9 = BatchNormalization()(deconv9)
        deconv9 = Dropout(0.5)(deconv9)
        deconv9 = Activation('relu')(deconv9)

        deconv10 = Concatenate(axis=3)([deconv9, conv3])
        deconv10 = Deconv2D(32, 5, strides=2, padding=self.padding)(deconv10)
        deconv10 = BatchNormalization()(deconv10)
        deconv10 = Activation('relu')(deconv10)

        deconv11 = Concatenate(axis=3)([deconv10, conv2])
        deconv11 = Deconv2D(16, 5, strides=2, padding=self.padding)(deconv11)
        deconv11 = BatchNormalization()(deconv11)
        deconv11 = Activation('relu')(deconv11)

        deconv12 = Concatenate(axis=3)([deconv11, conv1])
        deconv12 = Deconv2D(1, 5, strides=2, padding=self.padding)(deconv12)
        deconv12 = Activation('relu')(deconv12)

        model = Model(inputs, deconv12)

        return model
def Build_end2end_VGG16_VT_Model(selected_layer_nbr, input_batch_shape):

    img_input = Input(batch_shape=input_batch_shape)

    # encoder part
    masks = {}
    # Block 1
    if selected_layer_nbr >= 0:
        x = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   use_bias=False,
                   name='block1_conv1')(img_input)
    if selected_layer_nbr >= 1:
        x = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   use_bias=False,
                   name='block1_conv2')(x)
    if selected_layer_nbr >= 2:
        mask_1 = MaskPooling2D(padding='SAME')(x)
        masks['block1_mask'] = mask_1
        x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2
    if selected_layer_nbr >= 3:
        x = Conv2D(128, (3, 3),
                   activation='relu',
                   padding='same',
                   use_bias=False,
                   name='block2_conv1')(x)
    if selected_layer_nbr >= 4:
        x = Conv2D(128, (3, 3),
                   activation='relu',
                   padding='same',
                   use_bias=False,
                   name='block2_conv2')(x)
    if selected_layer_nbr >= 5:
        mask_2 = MaskPooling2D(padding='SAME')(x)
        masks['block2_mask'] = mask_2
        x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

        # Block 3
    if selected_layer_nbr >= 6:
        x = Conv2D(256, (3, 3),
                   activation='relu',
                   padding='same',
                   use_bias=False,
                   name='block3_conv1')(x)
    if selected_layer_nbr >= 7:
        x = Conv2D(256, (3, 3),
                   activation='relu',
                   padding='same',
                   use_bias=False,
                   name='block3_conv2')(x)
    if selected_layer_nbr >= 8:
        x = Conv2D(256, (3, 3),
                   activation='relu',
                   padding='same',
                   use_bias=False,
                   name='block3_conv3')(x)
    if selected_layer_nbr >= 9:
        mask_3 = MaskPooling2D(padding='SAME')(x)
        masks['block3_mask'] = mask_3
        x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)

        # Block 4
    if selected_layer_nbr >= 10:
        x = Conv2D(512, (3, 3),
                   activation='relu',
                   padding='same',
                   use_bias=False,
                   name='block4_conv1')(x)
    if selected_layer_nbr >= 11:
        x = Conv2D(512, (3, 3),
                   activation='relu',
                   padding='same',
                   use_bias=False,
                   name='block4_conv2')(x)
    if selected_layer_nbr >= 12:
        x = Conv2D(512, (3, 3),
                   activation='relu',
                   padding='same',
                   use_bias=False,
                   name='block4_conv3')(x)
    if selected_layer_nbr >= 13:
        mask_4 = MaskPooling2D(padding='SAME')(x)
        masks['block4_mask'] = mask_4
        x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

        # Block 5
    if selected_layer_nbr >= 14:
        x = Conv2D(512, (3, 3),
                   activation='relu',
                   padding='same',
                   use_bias=False,
                   name='block5_conv1')(x)
    if selected_layer_nbr >= 15:
        x = Conv2D(512, (3, 3),
                   activation='relu',
                   padding='same',
                   use_bias=False,
                   name='block5_conv2')(x)
    if selected_layer_nbr >= 16:
        x = Conv2D(512, (3, 3),
                   activation='relu',
                   padding='same',
                   use_bias=False,
                   name='block5_conv3')(x)
    if selected_layer_nbr >= 17:
        mask_5 = MaskPooling2D(padding='SAME')(x)
        masks['block5_mask'] = mask_5
        x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

    # decoder part

    # Block5
    if selected_layer_nbr == 17:
        x = DePooling2D()([x, mask_5])
    if selected_layer_nbr >= 16:
        x = Deconv2D(512, (3, 3),
                     activation='relu',
                     padding='same',
                     use_bias=False,
                     name='block5_deconv3')(x)
    if selected_layer_nbr >= 15:
        x = Deconv2D(512, (3, 3),
                     activation='relu',
                     padding='same',
                     use_bias=False,
                     name='block5_deconv2')(x)
    if selected_layer_nbr >= 14:
        x = Deconv2D(512, (3, 3),
                     activation='relu',
                     padding='same',
                     use_bias=False,
                     name='block5_deconv1')(x)

    # Block 4
    if selected_layer_nbr >= 13:
        x = DePooling2D()([x, mask_4])
    if selected_layer_nbr >= 12:
        x = Deconv2D(512, (3, 3),
                     activation='relu',
                     padding='same',
                     use_bias=False,
                     name='block4_deconv3')(x)
    if selected_layer_nbr >= 11:
        x = Deconv2D(512, (3, 3),
                     activation='relu',
                     padding='same',
                     use_bias=False,
                     name='block4_deconv2')(x)
    if selected_layer_nbr >= 10:
        x = Deconv2D(256, (3, 3),
                     activation='relu',
                     padding='same',
                     use_bias=False,
                     name='block4_deconv1')(x)

    # Block 3
    if selected_layer_nbr >= 9:
        x = DePooling2D()([x, mask_3])
    if selected_layer_nbr >= 8:
        x = Deconv2D(256, (3, 3),
                     activation='relu',
                     padding='same',
                     use_bias=False,
                     name='block3_deconv3')(x)
    if selected_layer_nbr >= 7:
        x = Deconv2D(256, (3, 3),
                     activation='relu',
                     padding='same',
                     use_bias=False,
                     name='block3_deconv2')(x)
    if selected_layer_nbr >= 6:
        x = Deconv2D(128, (3, 3),
                     activation='relu',
                     padding='same',
                     use_bias=False,
                     name='block3_deconv1')(x)

    # Block 2
    if selected_layer_nbr >= 5:
        x = DePooling2D()([x, mask_2])
    if selected_layer_nbr >= 4:
        x = Deconv2D(128, (3, 3),
                     activation='relu',
                     padding='same',
                     use_bias=False,
                     name='block2_deconv2')(x)
    if selected_layer_nbr >= 3:
        x = Deconv2D(64, (3, 3),
                     activation='relu',
                     padding='same',
                     use_bias=False,
                     name='block2_deconv1')(x)

    # Block 1
    if selected_layer_nbr >= 2:
        x = DePooling2D()([x, mask_1])
    if selected_layer_nbr >= 1:
        x = Deconv2D(64, (3, 3),
                     activation='relu',
                     padding='same',
                     use_bias=False,
                     name='block1_deconv2')(x)
    if selected_layer_nbr >= 0:
        x = Deconv2D(3, (3, 3),
                     activation='relu',
                     padding='same',
                     use_bias=False,
                     name='block1_deconv1')(x)

    predictions = x

    # build the model
    model = Model(inputs=img_input, outputs=predictions)

    return model
Esempio n. 18
0
    def from_scratch(cls, cat_dim, cont_dim, noise_dim, img_size, img_ch,
                     batch_size):
        """
        Generator model of the DCGAN

        :param cat_dim: Latent categorical dimension
        :param cont_dim: Latent continuous dimension
        :param noise_dim: Noise dimension
        :param img_size: Image width == height (only specify for CelebA)
        :param start_size: The side resolution at which the deconvolutions start
        :param batch_size: Batch size that the model can take
        :return: model (keras NN) the Neural Net model
        """

        # Set up modifiable parameters
        f = 128
        nb_upconv = 4
        start_size = 4
        filter_dim = (3, 3)
        stride_dim = (2, 2)

        # Create the network
        cat_input = Input(shape=(cat_dim, ), name="cat_input")
        cont_input = Input(shape=(cont_dim, ), name="cont_input")
        noise_input = Input(shape=(noise_dim, ), name="noise_input")

        gen_input = concatenate([cat_input, cont_input, noise_input])

        x = Dense(1024)(gen_input)
        x = BatchNormalization()(x)
        x = Activation("relu")(x)

        x = Dense(f * start_size * start_size)(x)
        x = BatchNormalization()(x)
        x = Activation("relu")(x)

        x = Reshape((start_size, start_size, f))(x)

        # Transposed conv blocks
        for i in range(nb_upconv - 1):
            nb_filters = int(f / (2**(i + 1)))
            img_size = start_size * (2**(i + 1))
            o_shape = (batch_size, img_size, img_size, nb_filters)
            x = Deconv2D(nb_filters,
                         filter_dim,
                         output_shape=o_shape,
                         strides=stride_dim,
                         padding="same")(x)
            x = BatchNormalization(axis=-1)(x)
            x = Activation("relu")(x)

        # Last block
        img_size = start_size * (2**(nb_upconv))
        o_shape = (batch_size, img_size, img_size, img_ch)
        x = Deconv2D(img_ch, (3, 3),
                     output_shape=o_shape,
                     strides=(2, 2),
                     padding="same")(x)
        x = Activation("tanh")(x)

        generator_model = Model(inputs=[cat_input, cont_input, noise_input],
                                outputs=[x],
                                name=cls.MODEL_NAME)

        return cls(generator_model=generator_model)
Esempio n. 19
0
    def __init__(self, img_dim, architecture="upsampling"):
        unet_input = Input(shape=img_dim, name="unet_input")

        if architecture == "upsampling":
            nb_filters = 64

            if K.image_dim_ordering() == "channels_first":
                bn_axis = 1
                nb_channels = img_dim[0]
                min_s = min(img_dim[1:])
            else:
                bn_axis = -1
                nb_channels = img_dim[-1]
                min_s = min(img_dim[:-1])

            # Prepare encoder filters
            nb_conv = int(np.floor(np.log(min_s) / np.log(2)))
            list_nb_filters = [nb_filters * min(8, (2 ** i)) for i in range(nb_conv)]

            # Encoder
            list_encoder = [Conv2D(list_nb_filters[0], (3, 3),
                                   strides=(2, 2), name="unet_conv2D_1", padding="same")(unet_input)]
            for i, filters in enumerate(list_nb_filters[1:]):
                name = "unet_conv2D_%s" % (i + 2)
                conv = conv_block_unet(list_encoder[-1], filters, bn_axis, name=name)
                list_encoder.append(conv)

            # Prepare decoder filters
            list_nb_filters = list_nb_filters[:-2][::-1]
            if len(list_nb_filters) < nb_conv - 1:
                list_nb_filters.append(nb_filters)

            # Decoder
            list_decoder = [up_conv_block_unet(list_encoder[-1], list_encoder[-2],
                                               list_nb_filters[0], bn_axis, dropout=True, name="unet_upconv2D_1")]
            for i, filters in enumerate(list_nb_filters[1:]):
                name = "unet_upconv2D_%s" % (i + 2)
                # Dropout only on first few layers
                conv = up_conv_block_unet(list_decoder[-1], list_encoder[-(i + 3)], filters, bn_axis,
                                          dropout=i < 2, name=name)
                list_decoder.append(conv)

            x = Activation("relu")(list_decoder[-1])
            x = UpSampling2D(size=(2, 2))(x)
            x = Conv2D(nb_channels, (3, 3), name="last_conv", padding="same")(x)
            x = Activation("tanh")(x)
        elif architecture == "deconv":
            assert K.backend() == "tensorflow", "Not implemented with theano backend"

            nb_filters = 64
            bn_axis = -1
            nb_channels = img_dim[-1]
            min_s = min(img_dim[:-1])

            # Prepare encoder filters
            nb_conv = int(np.floor(np.log(min_s) / np.log(2)))
            list_nb_filters = [nb_filters * min(8, (2 ** i)) for i in range(nb_conv)]

            # Encoder
            list_encoder = [Conv2D(list_nb_filters[0], (3, 3),
                                   strides=(2, 2), name="unet_conv2D_1", padding="same")(unet_input)]
            # update current "image" h and w
            for i, filters in enumerate(list_nb_filters[1:]):
                name = "unet_conv2D_%s" % (i + 2)
                conv = conv_block_unet(list_encoder[-1], filters, bn_axis, name=name)
                list_encoder.append(conv)

            # Prepare decoder filters
            list_nb_filters = list_nb_filters[:-1][::-1]
            if len(list_nb_filters) < nb_conv - 1:
                list_nb_filters.append(nb_filters)

            # Decoder
            list_decoder = [deconv_block_unet(list_encoder[-1], list_encoder[-2],
                                              list_nb_filters[0], bn_axis, dropout=True, name="unet_upconv2D_1")]
            for i, filters in enumerate(list_nb_filters[1:]):
                name = "unet_upconv2D_%s" % (i + 2)
                # Dropout only on first few layers
                conv = deconv_block_unet(list_decoder[-1], list_encoder[-(i + 3)], filters, bn_axis, dropout=i < 2,
                                         name=name)
                list_decoder.append(conv)

            x = Activation("relu")(list_decoder[-1])
            # o_shape = (batch_size,) + img_dim
            x = Deconv2D(nb_channels, (3, 3), strides=(2, 2), padding="same")(x)
            x = Activation("tanh")(x)
        else:
            raise ValueError(architecture)

        super(Generator, self).__init__(inputs=[unet_input], outputs=[x])