Ejemplo n.º 1
0
def generator_nw_unet(**kwargs):
    n_x = kwargs['input_dim']
    #first encoder

    #generator params

    fe1 = 64

    fe2 = 64
    fe3 = 32
    BottleneckDim = 16
    fe5 = 32
    fe6 = 64
    a = 0.2  #alpha
    dout = 0.2  #dropout

    X_in = Input(shape=(n_x, ), name='financial_cond_input')
    Y_in = Input(
        shape=(1, ),
        name='financial_manip')  #this is the dimension we are manipulating

    concat_en = concat([X_in, Y_in])

    #image encoder layers
    h1_en = Dense(fe1)(concat_en)
    h1_en = Dropout(dout)(h1_en)

    h2_en = Dense(fe2)(h1_en)
    h2_en = LeakyReLU(alpha=a)(h2_en)
    h2_en = Dropout(dout)(h2_en)

    h3_en = Dense(fe3)(h2_en)
    h3_en = LeakyReLU(alpha=a)(h3_en)
    h3_en = Dropout(dout)(h3_en)

    h4_bot = Dense(BottleneckDim)(h3_en)
    h4_bot = LeakyReLU(alpha=a)(h4_bot)
    h4_bot = Dropout(dout)(h4_bot)

    h5_dec = Dense(fe5)(h4_bot)
    h5_dec = LeakyReLU(alpha=a)(h5_dec)
    h5_dec = Dropout(dout)(h5_dec)
    h5_dec = concat([h5_dec, h2_en])

    h6_dec = Dense(fe6)(h5_dec)
    h6_dec = LeakyReLU(alpha=a)(h6_dec)
    h6_dec = Dropout(dout)(h6_dec)
    h6_dec = concat([h6_dec, h1_en])

    out_dec = Dense(n_x)(h6_dec)
    out_dec = concat([out_dec, Y_in])

    Generator = Model([X_in, Y_in], out_dec)
    Generator.summary()
    Generator.compile(optimizer='adam', loss=recon_loss)
    return Generator
Ejemplo n.º 2
0
def CVAE_Model(Optimizer, batch_size=16, lr=0.01, dim_size=32):
    input3D_shape = (dim_size, dim_size, dim_size, 1)
    input2D_shape = (dim_size, dim_size, 1)
    output2D_dim = 32
    output3D_dim = 32
    latent_dim = 10
    decoder3D_dense_shape = (4 * 4 * 4 * 64)
    decoder3D_reshape_shape = (4, 4, 4, 64)

    input_3d = Input(input3D_shape, name='3D_Encoder_input')
    input_2d = Input(input2D_shape, name='2D_Encoder_input')

    Code_3d = Encoder3D(input_3d, output3D_dim)
    Code_2d = Encoder2D(input_2d, output2D_dim)

    inputs = concat([Code_3d, Code_2d], name='input_concat')

    encoder = Dense(512, activation='relu', name='encoder_dense')(inputs)
    mu = Dense(latent_dim, activation='linear', name='mu')(encoder)
    sigma = Dense(latent_dim, activation='linear', name='sigma')(encoder)
    latent = Lambda(sampling, output_shape=(latent_dim, ),
                    name='latent')([mu, sigma])
    latent_concat = concat([latent, Code_2d], name='latent_concat')
    output = Decoder3D(latent_concat, decoder3D_dense_shape,
                       decoder3D_reshape_shape)

    cvae = Model([input_3d, input_2d], output)

    encoder = Model([input_3d, input_2d], mu)

    d_in = Input(shape=(latent_dim + output2D_dim, ))
    d_out = Decoder3D(d_in, decoder3D_dense_shape, decoder3D_reshape_shape)
    decoder = Model(d_in, d_out)

    Optimizer.lr = lr
    cvae.compile(optimizer=Optimizer,
                 loss=vae_loss,
                 metrics=[KL_loss, recon_loss])
    cvae.summary()

    #cvae.save('cvae.h5')
    return cvae
Ejemplo n.º 3
0
def cond_gan_nw(Generator, Discriminator, x_train):

    input_dim = x_train.shape[1]
    #build gan model
    make_trainable(Discriminator, False)
    gan_input = Input(shape=(input_dim, ))
    gan_inputy = Input(shape=(1, ))

    gan_gen_out = Generator([gan_input, gan_inputy])
    gan_discrim_inp = concat([gan_gen_out, gan_input])

    gan_output = Discriminator(gan_discrim_inp)

    GAN = Model([gan_input, gan_inputy], [gan_gen_out, gan_output])

    GAN.summary()

    #gan_loss_weights=[1E2,1]

    return GAN
Ejemplo n.º 4
0
encoder_dim1 = 512  # dim of encoder hidden layer
#encoder_dim2 = 128 # dim of encoder hidden layer
decoder_dim = 512  # dim of decoder hidden layer
decoder_out_dim = 784  # dim of decoder output layer
activ = 'relu'
optim = Adam(lr=0.0005)

n_x = X_train.shape[1]
n_y = y_train.shape[1]

n_epoch = 100

X = Input(shape=(n_x, ))
label = Input(shape=(n_y, ))

inputs = concat([X, label])

encoder_h = Dense(encoder_dim1, activation=activ,
                  activity_regularizer='l2')(inputs)
#encoder_h = Dense(encoder_dim2, activation=activ)(encoder_h)
mu = Dense(n_z, activation='linear')(encoder_h)
l_sigma = Dense(n_z, activation='linear')(encoder_h)


def sample_z(args):
    mu, l_sigma = args
    eps = K.random_normal(shape=(m, n_z), mean=0., stddev=1.)
    return mu + K.exp(l_sigma / 2) * eps


# Sampling latent space
Ejemplo n.º 5
0
input3D_shape = (32, 32, 32, 1)
input2D_shape = (32, 32, 1)
output2D_dim = 32
output3D_dim = 32
latent_dim = 10
decoder3D_dense_shape = (4 * 4 * 4 * 64)
decoder3D_reshape_shape = (4, 4, 4, 64)
batch_size = 16

input_3d = Input(input3D_shape, name='3D_Encoder_input')
input_2d = Input(input2D_shape, name='2D_Encoder_input')

Code_3d = Encoder3D(input_3d, output3D_dim)
Code_2d = Encoder2D(input_2d, output2D_dim)

inputs = concat([Code_3d, Code_2d], name='input_concat')

#encoder = Dense(512, activation='relu',name='encoder_dense')(inputs)
mu = Dense(latent_dim, activation='linear', name='mu')(inputs)
sigma = Dense(latent_dim, activation='linear', name='sigma')(inputs)
latent = Lambda(sampling, output_shape=(latent_dim, ),
                name='latent')([mu, sigma])
latent_concat = concat([latent, Code_2d], name='latent_concat')
output = Decoder3D(latent_concat, decoder3D_dense_shape,
                   decoder3D_reshape_shape)

cvae = Model(inputs=[input_3d, input_2d], outputs=output)

encoder = Model([input_3d, input_2d], mu)

d_in = Input(shape=(latent_dim + output2D_dim, ))
Ejemplo n.º 6
0
def generator_nw_5_u(x_train,
                     g_noise=0.00,
                     nodes=[64, 32, 16, 32, 64],
                     y=False,
                     prelu_bias=0.1,
                     drop_ra=0.0,
                     ker_init=None,
                     compile=True,
                     output_dim=None):

    input_dim = x_train.shape[1]
    if output_dim is None:
        output_dim = input_dim

    fe1 = nodes[0]
    fe2 = nodes[1]
    Bottleneck_Dim = nodes[2]
    fe3 = nodes[3]
    fe4 = nodes[4]

    #Maybe 1 or 2 inputs:
    if y:
        X_in = Input(shape=(input_dim, ), name='financial_cond_input')
        Y_in = Input(
            shape=(1, ),
            name='financial_manip')  #this is the dimension we are manipulating
        #concatenate the two inputs
        concat_en = concat([X_in, Y_in])
        inpu = [X_in, Y_in]
    else:
        X_in = Input(shape=(input_dim, ), name='financial_cond_input')
        concat_en = X_in
        inpu = X_in

    #image encoder layers
    h1_en = Dropout(drop_ra, name='H1_dropout')(concat_en)
    h1_en = GaussianNoise(g_noise, name='H1_noise')(h1_en)
    h1_en = Dense(fe1, kernel_initializer=ker_init, name='H1_layer')(h1_en)
    h1_en = PReLU(name='H1_activation',
                  alpha_initializer=Constant(value=prelu_bias))(h1_en)
    h1_en = BatchNormalization(name='H1_batch_norm')(h1_en)

    h2_en = Dropout(drop_ra, name='H2_dropout')(h1_en)
    h2_en = GaussianNoise(g_noise, name='H12_noise')(h2_en)
    h2_en = Dense(fe2, kernel_initializer=ker_init, name='H2_layer')(h2_en)
    h2_en = PReLU(alpha_initializer=Constant(value=prelu_bias),
                  name='H2_activation')(h2_en)
    h2_en = BatchNormalization(name='H2_batch_norm')(h2_en)

    h3_en = Dropout(drop_ra, name='H3_dropout')(h2_en)
    h3_en = GaussianNoise(g_noise, name='H3_noise')(h3_en)
    h3_en = Dense(Bottleneck_Dim, kernel_initializer=ker_init,
                  name='H3_layer')(h3_en)
    h3_en = PReLU(alpha_initializer=Constant(value=prelu_bias),
                  name='H3_activation')(h3_en)
    Latent_space = BatchNormalization(name='H3_batch_norm')(h3_en)

    h4_dec = concat([Latent_space, h2_en])
    h4_dec = Dense(fe3, kernel_initializer=ker_init,
                   name='H4_layer')(Latent_space)
    h4_dec = PReLU(alpha_initializer=Constant(value=prelu_bias),
                   name='H4_activation')(h4_dec)

    h5_dec = concat([Latent_space, h1_en])
    h5_dec = Dense(fe4, kernel_initializer=ker_init, name='H5_layer')(h4_dec)
    h5_dec = PReLU(alpha_initializer=Constant(value=prelu_bias),
                   name='H5_activation')(h5_dec)

    out_dec = Dense(output_dim, name='Output_layer')(h4_dec)

    if y:
        out_dec = concat([out_dec, Y_in])

    Generator = Model(inpu, out_dec)
    Generator.summary()

    #if compile:
    #	gen_compile_dic={'loss':sparse_recon_loss_mse,'metrics':metrics,'optimizer':'adam','early_stop':ES}
    #	Generator.compile(**gen_compile_dic)

    return Generator
Ejemplo n.º 7
0
    def _configure(self):

        # build the encoder outputs for the whole powerset
        encoder_outputs_powerset = [
        ]  # Holds M=|encoder_powerset|-1 encoder networks
        for encoder_set in self.encoder_powerset:
            element_output = []
            for encoder_element in encoder_set:
                element_output.append(
                    encoder_element[0]
                )  # The first element is always an input layer
                # Append the next layer to the currently appended element in the set
                for encoder_layer in encoder_element[1:]:
                    element_output[-1] = encoder_layer(element_output[-1])
            # Concat all elements of the current set
            # Note: It is "just" concat to enforce the parameter layers (z_mean & z_logvar) to learn linear
            #       combinations of the uni-modal cases
            # TODO: Extension to add a non-linear layer after the concatination
            if len(element_output) > 1:
                encoder_outputs_powerset.append(concat(element_output))
                #################################################################################################
                #encoder_outputs_powerset.append(Dense(int(intermediate_dim/2), activation='relu')(concat(element_output)))
                #################################################################################################
            else:
                encoder_outputs_powerset.append(element_output[0])

        # Add sampling for every permutation
        self.Z = []
        self.Z_mean = []
        self.Z_logvar = []
        for encoder_output in encoder_outputs_powerset:
            self.Z_mean.append(Dense(self.z_dim)(encoder_output))
            self.Z_logvar.append(Dense(self.z_dim)(encoder_output))
            self.Z.append(
                self.sampling_layer([self.Z_mean[-1], self.Z_logvar[-1]]))

        # Add a decoder output for every permutation
        self.decoder_outputs_powerset = []
        for z, decoder_set in zip(self.Z, self.decoder_powerset):
            element_output = []
            for decoder_element in decoder_set:
                element_output.append(decoder_element[0](
                    z))  # Input the sample z into the first decoder layer
                # Append the next layer to the currently appended element in the set
                for decoder_layer in decoder_element[1:]:
                    #print(element_output[-1])
                    element_output[-1] = decoder_layer(element_output[-1])
            self.decoder_outputs_powerset.append(element_output)

        # collection of loss layers
        self.loss_layers = []

        # Calculate entropy losses for all sets in the powerset
        reconstruction_loss = []
        # Traverse the sets of the powerset
        for x_set, x_decoded_mean_set, encoder_inputs_dim, reconstruction_loss_metrics in \
                                                             zip(self.encoder_inputs_powerset, \
                                                                 self.decoder_outputs_powerset, \
                                                                 self.encoder_inputs_dim_powerset, \
                                                                 self.reconstruction_loss_metrics_powerset):
            reconstruction_loss_set = [
            ]  # Holds the loss for the whole powerset
            # Traverse the elements per set
            for x, x_decoded_mean, encoder_input_dim, reconstruction_loss_metric in zip(x_set, \
                                                                                        x_decoded_mean_set, \
                                                                                        encoder_inputs_dim, \
                                                                                        reconstruction_loss_metrics):

                # Choose the proper reconstruction loss metric
                #print("encoder_input_dim: ", encoder_input_dim)
                rl = {
                    ReconstructionLoss.MSE:
                    LosslayerReconstructionMSE(weight=encoder_input_dim),
                    ReconstructionLoss.BCE:
                    LosslayerReconstructionBCE(weight=encoder_input_dim),
                }
                #print("reconstruction_loss_metric: ", reconstruction_loss_metric)
                loss_layer = rl.get(reconstruction_loss_metric)
                self.loss_layers.append(
                    loss_layer)  # Backup the layer for callbacks, etc.
                loss = loss_layer([x, x_decoded_mean])
                reconstruction_loss_set.append(loss)
            reconstruction_loss.extend(reconstruction_loss_set)

        # Calculate the prior losses for all sets in the powerset
        kl_prior_loss = []
        for z_mean, z_logvar, inputs, encoder_inputs_dim in zip(
                self.Z_mean, self.Z_logvar, self.encoder_powerset,
                self.encoder_inputs_dim_powerset):
            loss_layer = LosslayerDistributionGaussianPrior(
                weight=self.get_beta(x_dim=sum(encoder_inputs_dim)))
            self.loss_layers.append(
                loss_layer)  # Backup the layer for callbacks, etc.
            loss = loss_layer([z_mean, z_logvar])
            kl_prior_loss.append(loss)

        # Calculate the mutual KL divergences for the to sets A and B of the powerset,
        # where |A|=|B|-1 and A is a proper subset of B (which is always valid for only one pair of sets)
        kl_mutual_loss = []
        subset_idx, superset_idx = setfun.find_proper_subsets(
            self.encoder_inputs_powerset, cardinality_difference=1, debug=True)
        for A_idx, B_idx in zip(subset_idx, superset_idx):
            loss_layer = LosslayerDistributionGaussianMutual(
                weight=self.beta_mutual)
            self.loss_layers.append(
                loss_layer)  # Backup the layer for callbacks, etc.
            loss = loss_layer([
                self.Z_mean[B_idx], self.Z_mean[A_idx], self.Z_logvar[B_idx],
                self.Z_logvar[A_idx]
            ])
            kl_mutual_loss.append(loss)

        loss_list = reconstruction_loss + kl_prior_loss + kl_mutual_loss
        #print("\n +++++++++++++++++++++++ \n")
        #print("loss_list: ", loss_list)
        #print("reconstruction_loss: ", reconstruction_loss)
        #print("kl_prior_loss: ", kl_prior_loss)
        #print("kl_mutual_loss: ", kl_mutual_loss)
        #print("\n +++++++++++++++++++++++ \n")
        return loss_list
Ejemplo n.º 8
0
n_y = ytrain.shape[1]

epochs = 100

# okay, let's define our model

# okay, so the variational autoeencoder consists of two parts - firstly the encoder which is a mapping/NN representation of the probability distribution p(z|X) i.e. it maps from the input space X to the latent variable space Z. we want to maximise the usefulness ot this thing and also how closely it approximates the standard normal N(0,1) as regularisation
# secondly we want to map in the decoder from the latent variable to another X X^ and therefore maximise p(X|z) to match the data which is a stanard log thing, so I don't know
# we'll generally want a much more dimensional latent variable, and I'm not sure how I shuold get it tbh
# like the mu and sigma are just meant to be a standard normal vector, right? in multiple dimensions
# so we should be able to deal with that, but we can't, so what do we do there?

X = Input(shape=(n_x,))
label = Input(shape=(n_y,))
#now we concatenate x and y so we can igure this out
inputs = concat([X, label]) # not sure why we need this
# we need to merge them within the context of the graph

encoder_h = Dense(encoder_dim1, activation=activation, activity_regularizer='l2')(inputs)
mu = Dense(n_z, activation='linear')(encoder_h)
l_sigma = Dense(n_z, activation='linear')(encoder_h) # this is our mapping t omu and sigma via ANNs

#now we have a function which add random noise to our sampling process, and use a lambda layer in keras for this - cool!
# this is the reparametrization trick. basicaly, we're meant to be sampling from our calculated z normal N(mu, sigma) which are the outputs of the neural network, but this isn't differentiable, so what we do is instead sample from the standard normal, which isn't differentiable, but this doesn't matter as it's not actually dependent on anything in the network or trainable, so it doesn't need to ahve gradients, and then we multiply and add the other bits, which are differentiable, so the reparametrisation works, and the network is differentiable through these things

def sample_z(args):
	mu, l_sigma = args
	eps = K.random_normal
	return mu + K.exp(log_sigma/2) * eps

# we use a lambda layer to sample our standard thing
Ejemplo n.º 9
0
def get_mrSabzi_net():
    # ------------------------------------------------------------------------------
    def vae_loss(y_true, y_pred):
        recon = K.sum(K.binary_crossentropy(y_true, y_pred), axis=-1)
        kl = 0.5 * K.sum(K.exp(sigma) + K.square(mu) - 1. - sigma, axis=-1)
        return recon + kl

    def KL_loss(y_true, y_pred):
        return (0.5 * K.sum(K.exp(sigma) + K.square(mu) - 1. - sigma, axis=1))

    def recon_loss(y_true, y_pred):
        return K.sum(K.binary_crossentropy(y_true, y_pred), axis=-1)

    def sampling(args):
        mu, l_sigma = args
        eps = K.random_normal(shape=(batch_size, latent_dim),
                              mean=0.,
                              stddev=1.)
        return mu + K.exp(l_sigma / 2) * eps

    def Encoder3D(input_3d, ouput_size):
        conv = Conv3D(8, (3, 3, 3),
                      padding='same',
                      kernel_initializer='he_normal',
                      activation='relu',
                      name='3D_conv1_1')(input_3d)
        conv = Conv3D(8, (3, 3, 3),
                      padding='same',
                      kernel_initializer='he_normal',
                      activation='relu',
                      name='3D_conv1_2')(conv)
        pool = MaxPooling3D(name='3d_pool_1')(conv)
        conv = Conv3D(16, (3, 3, 3),
                      padding='same',
                      kernel_initializer='he_normal',
                      activation='relu',
                      name='3D_conv2_1')(pool)
        conv = Conv3D(16, (3, 3, 3),
                      padding='same',
                      kernel_initializer='he_normal',
                      activation='relu',
                      name='3D_conv2_2')(conv)
        pool = MaxPooling3D(name='3d_pool_2')(conv)
        conv = Conv3D(32, (3, 3, 3),
                      padding='same',
                      kernel_initializer='he_normal',
                      activation='relu',
                      name='3D_conv3_1')(pool)
        conv = Conv3D(32, (3, 3, 3),
                      padding='same',
                      kernel_initializer='he_normal',
                      activation='relu',
                      name='3D_conv3_2')(conv)
        pool = MaxPooling3D(name='3d_pool_3')(conv)
        conv = Conv3D(64, (3, 3, 3),
                      padding='same',
                      kernel_initializer='he_normal',
                      activation='relu',
                      name='3D_conv4_1')(pool)
        conv = Conv3D(64, (3, 3, 3),
                      padding='same',
                      kernel_initializer='he_normal',
                      activation='relu',
                      name='3D_conv4_2')(conv)
        flat = Flatten(name='3d_flatten')(conv)
        dense = Dense(256, activation='relu', name='3d_dense_1')(flat)
        dense = Dense(ouput_size, activation='relu', name='3d_dense_2')(dense)
        return dense

    def Encoder2D(input_2d, ouput_size):
        conv = Conv2D(8, (3, 3),
                      padding='same',
                      kernel_initializer='he_normal',
                      activation='relu',
                      name='2D_conv1_1')(input_2d)
        conv = Conv2D(8, (3, 3),
                      padding='same',
                      kernel_initializer='he_normal',
                      activation='relu',
                      name='2D_conv1_2')(conv)
        pool = MaxPooling2D(name='2d_pool_1')(conv)
        conv = Conv2D(16, (3, 3),
                      padding='same',
                      kernel_initializer='he_normal',
                      activation='relu',
                      name='2D_conv2_1')(pool)
        conv = Conv2D(16, (3, 3),
                      padding='same',
                      kernel_initializer='he_normal',
                      activation='relu',
                      name='2D_conv2_2')(conv)
        pool = MaxPooling2D(name='2d_pool_2')(conv)
        conv = Conv2D(32, (3, 3),
                      padding='same',
                      kernel_initializer='he_normal',
                      activation='relu',
                      name='2D_conv3_1')(pool)
        conv = Conv2D(32, (3, 3),
                      padding='same',
                      kernel_initializer='he_normal',
                      activation='relu',
                      name='2D_conv3_2')(conv)
        pool = MaxPooling2D(name='2d_pool_3')(conv)
        conv = Conv2D(64, (3, 3),
                      padding='same',
                      kernel_initializer='he_normal',
                      activation='relu',
                      name='2D_conv4_1')(pool)
        conv = Conv2D(64, (3, 3),
                      padding='same',
                      kernel_initializer='he_normal',
                      activation='relu',
                      name='2D_conv4_2')(conv)
        flat = Flatten(name='2d_flatten')(conv)
        dense = Dense(256, activation='relu', name='2d_dense_1')(flat)
        dense = Dense(ouput_size, activation='relu', name='2d_dense_2')(dense)
        return dense

    def Decoder3D(latent_output, decoder3D_dense_shape,
                  decoder3D_reshape_shape):
        dense = Dense(decoder3D_dense_shape, activation='relu')(latent_output)
        reshape = Reshape(decoder3D_reshape_shape)(dense)
        conv = Conv3D(64, (3, 3, 3),
                      kernel_initializer='he_normal',
                      padding='same',
                      activation='relu',
                      name='3D_conv5_1')(reshape)
        conv = Conv3D(64, (3, 3, 3),
                      kernel_initializer='he_normal',
                      padding='same',
                      activation='relu',
                      name='3D_conv5_2')(conv)
        up = UpSampling3D(size=(2, 2, 2), name='3D_up1')(conv)
        conv = Conv3D(32, (3, 3, 3),
                      kernel_initializer='he_normal',
                      padding='same',
                      activation='relu',
                      name='3D_conv6_1')(up)
        conv = Conv3D(32, (3, 3, 3),
                      kernel_initializer='he_normal',
                      padding='same',
                      activation='relu',
                      name='3D_conv6_2')(conv)
        up = UpSampling3D(size=(2, 2, 2), name='3D_up2')(conv)
        conv = Conv3D(16, (3, 3, 3),
                      kernel_initializer='he_normal',
                      padding='same',
                      activation='relu',
                      name='3D_conv7_1')(up)
        conv = Conv3D(16, (3, 3, 3),
                      kernel_initializer='he_normal',
                      padding='same',
                      activation='relu',
                      name='3D_conv7_2')(conv)
        up = UpSampling3D(size=(2, 2, 2), name='3D_up3')(conv)
        conv = Conv3D(8, (3, 3, 3),
                      kernel_initializer='he_normal',
                      padding='same',
                      activation='relu',
                      name='3D_conv8_1')(up)
        conv = Conv3D(8, (3, 3, 3),
                      kernel_initializer='he_normal',
                      padding='same',
                      activation='relu',
                      name='3D_conv8_2')(conv)
        conv = Conv3D(2, (3, 3, 3),
                      kernel_initializer='he_normal',
                      padding='same',
                      activation='relu',
                      name='3D_conv8_3')(conv)
        conv = Conv3D(1, (3, 3, 3),
                      kernel_initializer='he_normal',
                      padding='same',
                      activation='sigmoid',
                      name='3D_conv_output')(conv)
        return conv

    input3D_shape = (32, 32, 32, 1)
    input2D_shape = (32, 32, 1)
    output2D_dim = 32
    output3D_dim = 32
    latent_dim = 10
    decoder3D_dense_shape = (4 * 4 * 4 * 64)
    decoder3D_reshape_shape = (4, 4, 4, 64)
    batch_size = 16

    input_3d = Input(input3D_shape, name='3D_Encoder_input')
    input_2d = Input(input2D_shape, name='2D_Encoder_input')

    Code_3d = Encoder3D(input_3d, output3D_dim)
    Code_2d = Encoder2D(input_2d, output2D_dim)

    inputs = concat([Code_3d, Code_2d], name='input_concat')

    encoder = Dense(512, activation='relu', name='encoder_dense')(inputs)
    mu = Dense(latent_dim, activation='linear', name='mu')(encoder)
    sigma = Dense(latent_dim, activation='linear', name='sigma')(encoder)
    latent = Lambda(sampling, output_shape=(latent_dim, ),
                    name='latent')([mu, sigma])
    latent_concat = concat([latent, Code_2d], name='latent_concat')
    output = Decoder3D(latent_concat, decoder3D_dense_shape,
                       decoder3D_reshape_shape)

    cvae = Model([input_3d, input_2d], output)

    encoder = Model([input_3d, input_2d], mu)

    d_in = Input(shape=(latent_dim + output2D_dim, ))
    d_out = Decoder3D(d_in, decoder3D_dense_shape, decoder3D_reshape_shape)
    decoder = Model(d_in, d_out)

    cvae.compile(optimizer='Adam',
                 loss=vae_loss,
                 metrics=[KL_loss, recon_loss])
    cvae.summary()
    cvae.save('cvae.h5')
    return cvae


# ------------------------------------------------------------------------------