def build_decoder(self, latent_dim, block_size, k_size):
        model = Sequential()
        model.add(Input(shape=(latent_dim, )))
        model.add(Dense(units=latent_dim * 2, activation='relu'))
        model.add(Reshape(target_shape=(latent_dim, latent_dim)))

        current_len = model.layers[-1].output_shape[1]
        filters = 256
        while current_len < block_size:
            model.add(
                Conv1DTranspose(filters,
                                strides=1,
                                kernel_size=k_size,
                                padding='same'))
            model.add(UpSampling1D(size=2))
            model.add(Activation('relu'))
            filters = filters // 2
            current_len = model.layers[-1].output_shape[1]

        model.add(
            Conv1DTranspose(1,
                            kernel_size=k_size,
                            padding='same',
                            activation=None))
        return model
def define_generator(latent_dim, n_classes=91):
	# weight initialization
	init = RandomNormal(stddev=0.02)
	# label input
	in_label = Input(shape=(1,))
    #print("in_label.shape===",in_label.shape)
	# embedding for categorical input
	li = Embedding(n_classes, 50)(in_label)
    #print("li.shape====",li.shape)
	# linear multiplication
	n_nodes = 1250*128  #196*128
    #print("n_nodes.shape==",n_nodes.shape)
	li = Dense(n_nodes, kernel_initializer=init)(li)
	# reshape to additional channel
	li = Reshape((1250, 128))(li) #196,128
	# image generator input
	in_lat = Input(shape=(latent_dim,))
	# foundation for 7x7 image
	n_nodes = int(128 *(5000/4) * 1)
	gen = Dense(n_nodes, kernel_initializer=init)(in_lat)
	gen = Activation('relu')(gen)
	gen = Reshape((math.ceil(5000/4)* 1, 128))(gen)
	# merge image gen and label input
	merge = Concatenate()([gen, li])
	# upsample to 14x14
	gen = Conv1DTranspose(128, 4*1, strides=2*1, padding='same', kernel_initializer=init)(merge)
	gen = BatchNormalization()(gen)
	gen = Activation('relu')(gen)
	# upsample to 28x28
	gen = Conv1DTranspose(1, 4*1, strides=2*1, padding='same', kernel_initializer=init)(gen)
	out_layer = Activation('tanh')(gen)
	# define model
	model = Model([in_lat, in_label], out_layer)
	return model
Beispiel #3
0
def build_generator(z_dim):
    model = Sequential()
    model.add(Reshape((z_dim, 1), input_dim=z_dim))

    model.add(Conv1DTranspose(8, kernel_size=4, strides=2))
    model.add(LayerNormalization())
    model.add(LeakyReLU())

    model.add(Conv1DTranspose(8, kernel_size=6, strides=2))
    model.add(LayerNormalization())
    model.add(LeakyReLU())

    model.add(Conv1DTranspose(8, kernel_size=6, strides=1))
    model.add(LayerNormalization())
    model.add(LeakyReLU())

    model.add(Conv1DTranspose(1, kernel_size=7, strides=1, activation='tanh'))

    # inputs = tf.keras.Input(shape=(z_dim,))
    # x1 = Dense(16)(inputs)
    # x1 = LayerNormalization()(x1)
    # x1 = LeakyReLU(alpha=0.01)(x1)
    # x2 = Reshape((16, 1))(x1)
    # x3 = Conv1DTranspose(1,kernel_size=10, strides=2)(x2)
    # x3 = LayerNormalization()(x3)
    # x3 = LeakyReLU(alpha=0.01)(x3)
    # x4 = Conv1DTranspose(1,kernel_size=10, strides=2)(x3)
    # x4 = LayerNormalization()(x4)
    # x4 = LeakyReLU(alpha=0.01)(x4)
    # x5 = Conv1DTranspose(1,kernel_size=12, strides=1, activation='tanh')(x4)
    # x5 = Reshape([99])(x5)
    # model = tf.keras.Model(inputs=inputs,
    # outputs=x5)

    return model
def create_fcdae(input_shape, activation_function, kernel_initializer, lr):
    autoencoder_model = Sequential()
    
    kernel_size = 3
    n_layers = 5
    
    # encoder part
    autoencoder_model.add(Conv1D(32, kernel_size = kernel_size ,input_shape = input_shape, strides = 1, padding = 'same', 
                                        activation = activation_function,  
                                        kernel_initializer = kernel_initializer))
    
    for i in range(1, n_layers):
      autoencoder_model.add(Conv1D(2**(i + 5), kernel_size = kernel_size, strides = 2, padding = 'same', 
                                          activation = activation_function,
                                          kernel_initializer = kernel_initializer))
    
    # decoder part
    for i in range(n_layers - 2, -1, -1):
      autoencoder_model.add(Conv1DTranspose(2**(i + 5) , kernel_size = kernel_size, strides = 2, padding = 'same',  
                              activation = activation_function,  
                              kernel_initializer = kernel_initializer
                              ))
    
    
    autoencoder_model.add(Conv1DTranspose(4 , kernel_size = kernel_size, strides = 1, padding = 'same',  
                              activation = 'linear',  
                              kernel_initializer = kernel_initializer
                              ))
    

    opt = tf.keras.optimizers.Adam(learning_rate = lr)
    autoencoder_model.compile(optimizer=opt, loss= 'mse')

    return autoencoder_model
Beispiel #5
0
 def __design_generator(self):
     inputs = Input(shape=self.latent_features)
     x = Dense(units=self.latent_features * 32,
               kernel_initializer=RandomNormal(stddev=0.02))(inputs)
     x = LeakyReLU(alpha=0.2)(x)
     x = Reshape(target_shape=(self.latent_features, 32))(x)
     x = Conv1DTranspose(filters=32,
                         kernel_size=4,
                         strides=2,
                         padding='same',
                         kernel_initializer=RandomNormal(stddev=0.02))(x)
     x = LeakyReLU(alpha=0.2)(x)
     x = Conv1DTranspose(filters=32,
                         kernel_size=4,
                         strides=2,
                         padding='same',
                         kernel_initializer=RandomNormal(stddev=0.02))(x)
     x = LeakyReLU(alpha=0.2)(x)
     x = Conv1DTranspose(filters=32,
                         kernel_size=4,
                         strides=2,
                         padding='same',
                         kernel_initializer=RandomNormal(stddev=0.02))(x)
     x = LeakyReLU(alpha=0.2)(x)
     x = Flatten()(x)
     outputs = Dense(units=self.features,
                     activation='tanh',
                     kernel_initializer=RandomNormal(stddev=0.02))(x)
     model = Model(inputs=inputs, outputs=outputs)
     return model
    def __init__(self, input_shape):
        super(Autoencoder, self).__init__()
        self.encoder = tf.keras.Sequential([
            Input(shape=input_shape),
            Conv1D(128, 3, activation='relu', padding='same', strides=1),
            Conv1D(64, 3, activation='relu', padding='same', strides=1),
            Conv1D(32, 3, activation='relu', padding='same', strides=1),
        ])

        self.decoder = tf.keras.Sequential([
            Conv1DTranspose(32,
                            kernel_size=3,
                            strides=1,
                            activation='relu',
                            padding='same'),
            Conv1DTranspose(64,
                            kernel_size=3,
                            strides=1,
                            activation='relu',
                            padding='same'),
            Conv1DTranspose(128,
                            kernel_size=3,
                            strides=1,
                            activation='relu',
                            padding='same'),
            Conv1D(1, kernel_size=3, activation='sigmoid', padding='same')
        ])
Beispiel #7
0
    def define_generator_tf2(latent_dim, n_outputs=2):
        '''
        questo modello funziona solo con TF2
        modello generatore, coda del modello, impara a creare un dato che possa essere confuso con un dato reale.
        latent_dim: (int) dimensione dello spazio latente, che sono numeri random
        n_outputs: (int) dimensione del dato reale, questo e' il dato generato che vuole imitare il dato reale
        return: model
        '''

        model = Sequential()

        # foundation for 7x7 image
        n_nodes = 128 * int(n_outputs / 4)

        model.add(Dense(n_nodes, input_dim=latent_dim))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Reshape((int(n_outputs / 4), 128)))

        # upsample to 14x14
        model.add(Conv1DTranspose(128, 4, strides=2, padding='same'))
        model.add(LeakyReLU(alpha=0.2))

        # upsample to 28x28
        model.add(Conv1DTranspose(128, 4, strides=2, padding='same'))
        model.add(LeakyReLU(alpha=0.2))
        model.add(
            Conv1D(1, int(n_outputs / 4), activation='sigmoid',
                   padding='same'))
        model.add(Flatten())
        return model
    def __init__(self, kernel_size, block_size):
        super(Autoencoder, self).__init__()

        stride = 2

        self.encoder = tf.keras.Sequential([
            Input(shape=(block_size, 1)),
            Conv1D(64, kernel_size, strides=stride, padding='same'),
            # AveragePooling1D(pool_size=2),
            Conv1D(64, kernel_size, strides=stride, padding='same'),
            # AveragePooling1D(pool_size=2),
            Conv1D(16, kernel_size, strides=stride, padding='same'),
            # AveragePooling1D(pool_size=2),
            Conv1D(16, kernel_size, strides=stride, padding='same'),
            # AveragePooling1D(pool_size=2),
            Conv1D(4, kernel_size, strides=stride, padding='same'),
            # Conv1D(1, kernel_size, activation='tanh', padding='same',
            # activity_regularizer=tf.keras.regularizers.l1(10e-5))])
            Conv1D(1, kernel_size, activation=None, padding='same')
        ])

        self.decoder = tf.keras.Sequential([
            Conv1DTranspose(4,
                            kernel_size,
                            strides=stride,
                            activation='tanh',
                            padding='same'),
            # UpSampling1D(size=2),
            Conv1DTranspose(16,
                            kernel_size,
                            strides=stride,
                            activation='tanh',
                            padding='same'),
            # UpSampling1D(size=2),
            Conv1DTranspose(16,
                            kernel_size,
                            strides=stride,
                            activation='tanh',
                            padding='same'),
            # UpSampling1D(size=2),
            Conv1DTranspose(64,
                            kernel_size,
                            strides=stride,
                            activation='tanh',
                            padding='same'),
            # UpSampling1D(size=2),
            Conv1DTranspose(64,
                            kernel_size,
                            strides=stride,
                            activation='tanh',
                            padding='same'),
            Conv1D(1, kernel_size, padding='same', activation='tanh')
        ])

        self.encoded_shape = self.encoder.layers[-1].output_shape
        print(f'ENCODED SHAPE {self.encoded_shape}')
Beispiel #9
0
    def build_model(self, input_shape, latent_dim=4):
        encoder_input = Input(shape=input_shape)
        encoder = Conv1D(320,
                         kernel_size=9,
                         strides=4,
                         activation='relu',
                         padding='same')(encoder_input)
        encoder = Conv1D(160,
                         kernel_size=9,
                         strides=4,
                         activation='relu',
                         padding='same')(encoder)
        encoder = Conv1D(8,
                         kernel_size=9,
                         strides=4,
                         activation='relu',
                         padding='same')(encoder)
        encoder = Conv1D(1, kernel_size=9, activation=None,
                         padding='same')(encoder)
        encoder_output = Dense(latent_dim)(encoder)
        print(encoder_output.shape)

        decoder_input = Input(
            (encoder_output.shape[1], encoder_output.shape[2]))
        decoder = Conv1DTranspose(8,
                                  kernel_size=9,
                                  strides=4,
                                  activation='relu',
                                  padding='same')(decoder_input)
        decoder = Conv1DTranspose(160,
                                  kernel_size=9,
                                  strides=4,
                                  activation='relu',
                                  padding='same')(decoder)
        decoder = Conv1DTranspose(320,
                                  kernel_size=9,
                                  strides=4,
                                  activation='relu',
                                  padding='same')(decoder)
        decoder_output = Conv1D(1,
                                kernel_size=9,
                                strides=1,
                                activation='tanh',
                                padding='same')(decoder)

        return Model(inputs=[encoder_input],
                     outputs=[encoder_output]), Model(inputs=[decoder_input],
                                                      outputs=[decoder_output])
Beispiel #10
0
def up_down(config, name,down_sample, l1, l2, gms, filters_up, filters_down, vocab):
    projection = Dense(filters_down[0],
                                     activation = None,
                                     use_bias = False,
                                     kernel_regularizer = tf.keras.regularizers.L1L2(l1=l1, l2=l2),
                                     name=name+"_in_proj")
    
    conv_down = [Conv1D(filters_down[i], 9,
                 strides=2,
                 padding="same",
                 activation = "relu",
                 kernel_regularizer = tf.keras.regularizers.L1L2(l1=l1, l2=l2),
                 name = name + "_down_{}".format(i)) for i in range(down_sample)]

    conv_up = [Conv1DTranspose(filters_up[i], 9,
                                strides=2,
                                padding="same",
                                activation = "relu",
                                kernel_regularizer = tf.keras.regularizers.L1L2(l1=l1, l2=l2),
                                name = name + "up_{}".format(i)) for i in range(down_sample)]

    outconv1 = Conv1D(filters_up[-1],
                    9,
                    padding = 'same',
                    activation = "relu",
                    name = name + "_output1")
    outconv2 = SpectralNormalization(Conv1D(vocab, 9, padding = 'same', activation = gms, name = name + "_output2"), name = name + "_output2_sn")
    return projection, conv_down, conv_up, outconv1, outconv2
Beispiel #11
0
    def __init__(self):
        super(SSRN, self).__init__()
        self.model = tf.keras.Sequential([
            Conv1D(filters=c, kernel_size=1, padding='same', kernel_initializer='he_normal'),
            highway(filters=c, kernel_size=3, padding='same', dilation_rate=1),
            highway(filters=c, kernel_size=3, padding='same', dilation_rate=3)

        ])

        for _ in range(2):
            self.model.add(Conv1DTranspose(filters=c, kernel_size=2, strides=2, padding='same', kernel_initializer='he_normal'))
            self.model.add(highway(filters=c, kernel_size=3, padding='same', dilation_rate=1))
            self.model.add(highway(filters=c, kernel_size=3, padding='same', dilation_rate=3))

        self.model.add(Conv1D(filters=2*c, kernel_size=1, padding='same', kernel_initializer='he_normal'))

        for _ in range(2):
            self.model.add(highway(filters=2*c, kernel_size=3, padding='same', dilation_rate=1))

        self.model.add(Conv1D(filters=f, kernel_size=1, padding='same', kernel_initializer='he_normal'))

        for _ in range(2):
            self.model.add(Conv1D(filters=f, kernel_size=1, padding='same', activation='relu', kernel_initializer='he_normal'))

        self.model.add(Conv1D(filters=f, kernel_size=1, padding='same', activation='sigmoid', kernel_initializer='he_normal'))
Beispiel #12
0
 def __init__(self, latent_dim, inputShape, temperature=1):
     """
         Parameters
         ----------
         inputShape : tuple
             The shape of the data for which the model is trained on.
             The input shape is the shape of one element in your dataset, 
             not the shape of the dataset itself. 
         latent_dim : int
             Dimension of the encoded matrix. Default is 2.
         tau : float 
             Temperature of the Gumbel-Softmax activation. 
             Using tau=0 discretizes the output of the decoder 
             and the higher the temperature, the less discrete 
             the output will be.
     """
     super(VAE, self).__init__()
     self.latent_dim = latent_dim
     self.inputShape = inputShape
     self.tau = temperature
     #self.encoder = Encoder(latent_dim, encoder_shape)
     self.encoder = tf.keras.Sequential(
         [
             Input(self.inputShape),
             Conv1D(8, 3, activation="relu", strides=2, padding="same"),
             Conv1D(16, 3, activation="relu", strides=2, padding="same"),
             Conv1D(48, 3, activation="relu", strides=3, padding="same"),
             Conv1D(144, 3, activation="relu", strides=3, padding="same"),
             Flatten(),
             Dense(latent_dim + latent_dim)
         ]
     )
     #self.decoder = Decoder()
     self.decoder = tf.keras.Sequential(
         [
             Input((latent_dim,)),
             Dense((14 * 144), activation="relu"),
             Reshape((14, 144)),
             Conv1DTranspose(48, 3, activation="relu", strides=3, padding="same"),
             Conv1DTranspose(16, 3, activation="relu", strides=3, padding="same"),
             Conv1DTranspose(8, 3, activation="relu", strides=2, padding="same"),
             Conv1DTranspose(8, 3, activation="relu", strides=2, padding="same"),
             Conv1DTranspose(5, 3, activation="relu", padding="same"),
             Activation(Gumbel_Softmax(temperature=self.tau))
         ]
     )
Beispiel #13
0
 def __init__(self, frame_length, **kwargs):
     super(Decoder, self).__init__(**kwargs)
     self.frame_length = frame_length
     self.upsampling = UpSampling1D(size=frame_length // 64)
     self.dense1 = Dense(128, activation='softplus')
     self.dense2 = Dense(64, activation='softplus')
     self.dense3 = Dense(64, activation='softplus')
     self.dense4 = Dense(128, activation='relu')
     self.convtranspose1 = Conv1DTranspose(1, 128, padding='same')
Beispiel #14
0
def create_conv_transpose_autoencoder(
    input_dimension,
    optimizer='adam',
    loss='binary_crossentropy',
    kernel_size=7,
    dropout_rate=0.2,
    strides=2,
    activation='relu',
    padding='same',
    number_of_features=1,
) -> Model:
    model = Sequential([
        Input(shape=(input_dimension, number_of_features)),
        Conv1D(filters=32,
               kernel_size=kernel_size,
               padding=padding,
               strides=strides,
               activation=activation),
        Dropout(rate=dropout_rate),
        Conv1D(filters=16,
               kernel_size=kernel_size,
               padding=padding,
               strides=strides,
               activation=activation),
        Conv1DTranspose(filters=16,
                        kernel_size=kernel_size,
                        padding=padding,
                        strides=strides,
                        activation=activation),
        Dropout(rate=dropout_rate),
        Conv1DTranspose(filters=32,
                        kernel_size=kernel_size,
                        padding=padding,
                        strides=strides,
                        activation=activation),
        Conv1DTranspose(filters=1, kernel_size=kernel_size, padding=padding),
    ])

    model.compile(optimizer=optimizer, loss=loss)

    return model
 def __init__(self, latent_dim):
     super(CVAE, self).__init__()
     self.latent_dim = latent_dim
     self.encoder = Sequential(
         [InputLayer(input_shape=(1,90001)),
         Conv1D(64,1,2),
         Conv1D(128,1,2),
         Conv1D(128,1,2),
         Conv1D(256,1,2),
         Flatten(),
         Dense(latent_dim+latent_dim)
         ])
     self.decoder = tf.keras.Sequential(
         [InputLayer(input_shape=(latent_dim,)),
         Reshape(target_shape=(1,latent_dim)),
         Conv1DTranspose(512,1,1),
         Conv1DTranspose(256,1,1),
         Conv1DTranspose(128,1,1),
         Conv1DTranspose(64,1,1),
         Conv1DTranspose(90001,1,1),
         ]
     )
Beispiel #16
0
def test_conv1dtranspose():
    ignore = True
    major, minor, _ = tf.version.VERSION.split(".")
    if int(major) >= 2 and int(minor) >= 3:
        ignore = False
    if ignore:
        return
    from tensorflow.keras.layers import Conv1DTranspose

    in_w = 32

    in_ch = 3
    kernel = 32
    ker_w = 3

    model = Sequential(
        Conv1DTranspose(kernel, (ker_w,), padding="same", input_shape=(in_w, in_ch))
    )
    flops = get_flops(model, batch_size=1)
    assert flops == ((2 * ker_w * in_ch) + 1) * in_w * kernel + 1
    def __init__(self,
                 _latent_dim=10,
                 _input_shape=(100, 5, 1),
                 _encoder_filters=[32, 64],
                 _encoder_kernel_sizes=[5, 5],
                 _encoder_strides=[1, 1],
                 _decoder_filters=[64, 32],
                 _decoder_kernel_sizes=[5, 5],
                 _decoder_strides=[1, 1],
                 _encoder_use_bias=True,
                 _decoder_use_bias=True):

        super(VAE, self).__init__()
        # Shared Hyper Parameters
        self.latent_dim = _latent_dim
        self.leaky_relu = LeakyReLU(alpha=0.5)
        self.weight_initializer = RandomNormal(mean=0.0, stddev=0.02)
        self.lamb = 0.00005
        # Encoder Hyper Parameters
        self.encoder_filters = _encoder_filters
        self.encoder_kernel_sizes = _encoder_kernel_sizes
        self.encoder_strides = _encoder_strides
        self.encoder_use_bias = _encoder_use_bias
        # Decoder Hyper Parameters
        self.decoder_filters = _decoder_filters
        self.decoder_kernel_sizes = _decoder_kernel_sizes
        self.decoder_strides = _decoder_strides
        self.decoder_use_bias = _decoder_use_bias
        self.optimizer = tf.keras.optimizers.Adam(1e-4)

        # Encoder
        self.encoder = tf.keras.Sequential([
            Conv1D(filters=self.encoder_filters[0],
                   kernel_size=self.encoder_kernel_sizes[0],
                   strides=self.encoder_strides[0],
                   padding='SAME',
                   use_bias=self.encoder_use_bias,
                   kernel_initializer=self.weight_initializer,
                   input_shape=_input_shape),
            self.leaky_relu,
            Conv1D(filters=self.encoder_filters[1],
                   kernel_size=self.encoder_kernel_sizes[1],
                   strides=self.encoder_strides[1],
                   padding='SAME',
                   use_bias=self.encoder_use_bias,
                   kernel_initializer=self.weight_initializer),
            self.leaky_relu,
            Flatten(),
            Dense(self.latent_dim + self.latent_dim),
        ])
        # Decoder
        self.decoder = tf.keras.Sequential([
            InputLayer(input_shape=(self.latent_dim, )),
            Dense(units=100 * 64), self.leaky_relu,
            Reshape(target_shape=(100, 64)),
            Conv1DTranspose(filters=self.decoder_filters[0],
                            kernel_size=self.decoder_kernel_sizes[0],
                            strides=self.decoder_strides[0],
                            padding='SAME',
                            use_bias=self.decoder_use_bias,
                            kernel_initializer=self.weight_initializer),
            self.leaky_relu,
            Conv1DTranspose(filters=self.decoder_filters[1],
                            kernel_size=self.decoder_kernel_sizes[1],
                            strides=self.decoder_strides[1],
                            padding='SAME',
                            use_bias=self.decoder_use_bias,
                            kernel_initializer=self.weight_initializer),
            self.leaky_relu,
            Conv1DTranspose(filters=5,
                            kernel_size=1,
                            strides=1,
                            padding='SAME',
                            activation=tf.nn.tanh)
        ])
Beispiel #18
0
    def __init__(
        self,
        sequence_length=30,
        number_of_vars=10,
        fil1_conv_enc=16,
        fil2_conv_enc=32,
        unit_den_enc=16,
        fil1_conv_dec=32,
        fil2_conv_dec=16,
        kernel=3,
        strs=2,
        drop_rate=0.2,
        batch_size=100,
        latent_dim=1,
        epochs=100,
        learning_rate=1e-2,
        decay_rate=0.96,
        decay_step=1000,
    ):

        # reparameterization trick
        # instead of sampling from Q(z|X), sample epsilon = N(0,I)
        # z = z_mean + sqrt(var) * epsilon
        def sampling(args):
            """Reparameterization trick by sampling from an isotropic unit Gaussian.
        
            # Arguments
                args (tensor): mean and log of variance of Q(z|X)
        
            # Returns
                z (tensor): sampled latent vector
            """
            z_mean, z_log_var = args
            batch = K.shape(z_mean)[0]
            dim = K.int_shape(z_mean)[1]
            # by default, random_normal has mean = 0 and std = 1.0
            epsilon = K.random_normal(shape=(batch, dim))
            return z_mean + K.exp(0.5 * z_log_var) * epsilon

        # network parameters
        input_shape = (sequence_length, number_of_vars)
        self.batch_size = batch_size
        self.epochs = epochs

        # VAE model = encoder + decoder

        # build encoder model
        inputs = Input(shape=input_shape, name='enc_input')
        h_enc = Conv1D(fil1_conv_enc,
                       kernel,
                       activation='tanh',
                       strides=strs,
                       padding="same",
                       name='enc_conv1d_1')(inputs)
        h_enc = Dropout(drop_rate)(h_enc)
        h_enc = Conv1D(fil2_conv_enc,
                       kernel,
                       activation='tanh',
                       strides=strs,
                       padding="same",
                       name='enc_conv1d_2')(h_enc)
        h_enc = Flatten(name='enc_flatten')(h_enc)
        h_enc = Dense(unit_den_enc, activation='tanh',
                      name='enc_output')(h_enc)

        # reparameterization trick
        z_mean = Dense(latent_dim, name='z_mean')(h_enc)
        z_log_var = Dense(latent_dim, name='z_log_var')(h_enc)

        # use reparameterization trick to push the sampling out as input
        # note that "output_shape" isn't necessary with the TensorFlow backend
        z = Lambda(sampling, output_shape=(latent_dim, ),
                   name='z')([z_mean, z_log_var])

        # instantiate encoder model
        encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
        encoder.summary()

        # build decoder model
        reduce = sequence_length // (strs * 2)
        latent_inputs = Input(shape=(latent_dim, ), name='z_sampling')
        h_dec = Dense(reduce * fil1_conv_dec,
                      activation='tanh',
                      name='dec_input')(latent_inputs)
        h_dec = Reshape((reduce, fil1_conv_dec), name='dec_reshape')(h_dec)
        h_dec = Conv1DTranspose(fil1_conv_dec,
                                kernel,
                                activation='tanh',
                                strides=strs,
                                padding="same",
                                name='dec_conv1d_1')(h_dec)
        h_dec = Dropout(drop_rate)(h_dec)
        h_dec = Conv1DTranspose(fil2_conv_dec,
                                kernel,
                                activation='tanh',
                                strides=strs,
                                padding="same",
                                name='dec_conv1d_2')(h_dec)
        outputs = Conv1DTranspose(number_of_vars,
                                  kernel,
                                  activation=None,
                                  padding="same",
                                  name='dec_conv1d_output')(h_dec)

        # instantiate decoder model
        decoder = Model(latent_inputs, outputs, name='decoder')
        decoder.summary()

        # instantiate VAE model
        outputs = decoder(encoder(inputs)[2])
        self.vae = Model(inputs, outputs, name='vae_mlp')

        reconstruction_loss = tf.reduce_mean(mse(inputs, outputs))
        reconstruction_loss *= (sequence_length * number_of_vars)
        kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
        kl_loss = K.sum(kl_loss, axis=-1)
        kl_loss *= -0.5
        vae_loss = K.mean(reconstruction_loss + kl_loss)
        self.vae.add_loss(vae_loss)

        lr_schedule = optimizers.schedules.ExponentialDecay(
            learning_rate,
            decay_steps=decay_step,
            decay_rate=decay_rate,
            staircase=True,
        )

        opt = optimizers.Adam(learning_rate=lr_schedule)
        self.vae.compile(optimizer=opt)
Beispiel #19
0
def get_generator(config, vocab):
    # Input layer
    model_input = tf.keras.layers.Input(shape=(512,21))
    
    # Parameters
    n_layers = config['n_layers']
    down_sample = config['down_sample']
    l1 = config['l1']
    l2 = config['l2']
    atte_loc = config['attention_loc']
    use_atte = config['use_attention']
    filters = config['filters']
    kernels = config['kernels']
    dilation = config['dilations']
    use_spectral_norm = config['use_spectral_norm']
    norm=config['norm']
    use_gumbel = config['use_gumbel']
    
    filters_down = [128, 256]
    filters_up = [256, 128]
    
    print(filters)
    print(atte_loc)
    
    # Sampling
    if use_gumbel:
        gms = GumbelSoftmax(temperature = 1)
    else:
        gms = Softmax()
    

    if use_spectral_norm:
        projection = SpectralNormalization(Conv1D(64,9, padding='same', activation = 'relu', use_bias = True ,kernel_regularizer = tf.keras.regularizers.L1L2(l1=l1, l2=l2)))
        att = SelfAttentionSN(filters[atte_loc])
        down = [SpectralNormalization(Conv1D(filters_down[i], 9,
                 strides=2,
                 padding="same",
                 activation = "relu",
                 kernel_regularizer = tf.keras.regularizers.L1L2(l1=l1, l2=l2))) for i in range(down_sample)]
        up = [SpectralNormalization(Conv1DTranspose(filters_up[i], 9,
                                strides=2,
                                padding="same",
                                activation = "relu",
                                kernel_regularizer = tf.keras.regularizers.L1L2(l1=l1, l2=l2))) for i in range(down_sample)]
        
        outconv1 = SpectralNormalization(Conv1D(filters_up[-1],9,padding = 'same', activation = "relu"))
        outconv2 = SpectralNormalization(Conv1D(vocab, 9, padding = 'same', activation = gms))
    else:
        projection = Conv1D(64,9, padding='same', activation = 'relu', use_bias = True ,kernel_regularizer = tf.keras.regularizers.L1L2(l1=l1, l2=l2))
        att = SelfAttention(filters[atte_loc])
        down = [Conv1D(filters_down[i], 9,
                 strides=2,
                 padding="same",
                 activation = "relu",
                 kernel_regularizer = tf.keras.regularizers.L1L2(l1=l1, l2=l2)) for i in range(down_sample)]
        up = [Conv1DTranspose(filters_up[i], 9,
                                strides=2,
                                padding="same",
                                activation = "relu",
                                kernel_regularizer = tf.keras.regularizers.L1L2(l1=l1, l2=l2)) for i in range(down_sample)]

        outconv1 = Conv1D(filters_up[-1],9,padding = 'same', activation = "relu")
        outconv2 = Conv1D(vocab, 9, padding = 'same', activation = None)
      
    res = [residual_mod(filters[i],
                        kernels[i],
                        dilation=dilation[i],
                        l1=l1,
                        l2=l2,
                        use_dout = False,
                        use_bias=True,
                        norm=norm,
                        sn = use_spectral_norm,
                        act="ReLU") for i in range(n_layers)]

    # Normalisations
    if norm == "Layer":
        norm_up_down = [LayerNormalization(axis = -1, epsilon = 1e-6) for i in range(down_sample*2+1)]
    elif norm == "Batch":
        norm_up_down = [BatchNormalization() for i in range(down_sample*2+1)]
    elif norm == "Instance":
        norm_up_down = [tfa.layers.InstanceNormalization() for i in range(down_sample*2+1)]
    else:
        norm_up_down = [linear for i in range(down_sample*2+1)]

            

    skip = []
    x = model_input
    x = projection(x)       
    x = norm_up_down[0](x)
    
    x = down[0](x)
    x = norm_up_down[1](x) 
    skip.append(x)
    
    x = down[1](x)
    x = norm_up_down[2](x)
    skip.append(x)

           
    for i in range(n_layers):
        x_out  = res[i](x)
        x = tf.keras.layers.Add()([x_out, x])
        if i == atte_loc and use_atte:
            x = att(x)[0]
    x = tf.keras.layers.Concatenate()([skip[1], x])
    x = up[0](x)
    x = norm_up_down[3](x)
    x = tf.keras.layers.Concatenate()([skip[0], x])
    x = up[1](x)
    x = norm_up_down[4](x)  
    x = outconv1(x)
    x = outconv2(x)
    x = gms(x) 
    model = tf.keras.Model(inputs=model_input, outputs=x)
    return model
def my_net(k = 1, k_size = 3, num_classes = 6, input_shape= (30, 300)):
        
    img_input = Input(input_shape) 

    #1
    x = Conv1D(64 * k , k_size, padding='same')(img_input) 
    x = BatchNormalization()(x) 
    x = Activation('relu')(x)

    x = Conv1D(64 * k , k_size, padding='same')(x)  
    x = BatchNormalization()(x)     
    x = Activation('relu')(x) 

    x = MaxPooling1D()(x)

    #2
    x = Conv1D(128 * k , k_size, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)  

    x = Conv1D(128 * k , k_size, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = MaxPooling1D()(x)

    #3
    x = Conv1D(256 * k , k_size, padding='same')(x)
    x = BatchNormalization()(x)               
    x = Activation('relu')(x)                     

    x = Conv1D(256 * k , k_size, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv1D(256 * k , k_size, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    
    x = MaxPooling1D()(x)

    #4
    x = Conv1D(512 * k , k_size, padding='same')(x)
    x = BatchNormalization()(x) 
    x = Activation('relu')(x)

    x = Conv1D(512 * k , k_size, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv1D(512 * k , k_size, padding='same')(x)
    x = BatchNormalization()(x)      
    x = Activation('relu')(x) 

    # UP 2
    x = Conv1DTranspose(256, kernel_size=2, strides=2, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x) 

    x = Conv1D(256 * k , k_size, padding='same')(x) 
    x = BatchNormalization()(x) 
    x = Activation('relu')(x)

    x = Conv1D(256 * k , k_size, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    
    # UP 3
    x = Conv1DTranspose(128, kernel_size=2, strides=2, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x) 

    x = Conv1D(128 * k , k_size, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv1D(128 * k , k_size, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # UP 4
    x = Conv1DTranspose(64, kernel_size=2, strides=2, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv1D(64 * k , k_size, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv1D(64 * k , k_size, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv1D(num_classes, k_size, activation='sigmoid', padding='same')(x)

    model = Model(img_input, x) 

    model = Model(img_input, x)
    model.compile(optimizer=Adam(0.0025), 
                  loss='categorical_crossentropy',
                  metrics=[dice_coef])
  
    return model
Beispiel #21
0
def build_1d_unet(signal_length, channels):
    inputs = Input((signal_length, channels))

    c0 = Conv1D(32, (3,), activation='relu', kernel_initializer='he_normal', padding='same')(inputs)
    c0 = BatchNormalization()(c0)
    c0 = Dropout(0.1)(c0)
    c0 = Conv1D(32, (3,), activation='relu', kernel_initializer='he_normal', padding='same')(c0)
    c0 = BatchNormalization()(c0)
    p0 = MaxPooling1D(2)(c0)

    c1 = Conv1D(32, (3,), activation='relu', kernel_initializer='he_normal', padding='same')(p0)
    c1 = BatchNormalization()(c1)
    c1 = Dropout(0.1)(c1)
    c1 = Conv1D(32, (3,), activation='relu', kernel_initializer='he_normal', padding='same')(c1)
    c1 = BatchNormalization()(c1)
    p1 = MaxPooling1D(2)(c1)

    c2 = Conv1D(64, (3,), activation='relu', kernel_initializer='he_normal', padding='same')(p1)
    c2 = BatchNormalization()(c2)
    c2 = Dropout(0.1)(c2)
    c2 = Conv1D(64, (3,), activation='relu', kernel_initializer='he_normal', padding='same')(c2)
    c2 = BatchNormalization()(c2)
    p2 = MaxPooling1D(2)(c2)

    c3 = Conv1D(128, (3,), activation='relu', kernel_initializer='he_normal', padding='same')(p2)
    c3 = BatchNormalization()(c3)
    c3 = Dropout(0.2)(c3)
    c3 = Conv1D(128, (3,), activation='relu', kernel_initializer='he_normal', padding='same')(c3)
    c3 = BatchNormalization()(c3)
    p3 = MaxPooling1D(2)(c3)

    c4 = Conv1D(256, (3,), activation='relu', kernel_initializer='he_normal', padding='same')(p3)
    c4 = BatchNormalization()(c4)
    c4 = Dropout(0.2)(c4)
    c4 = Conv1D(256, (3,), activation='relu', kernel_initializer='he_normal', padding='same')(c4)
    c4 = BatchNormalization()(c4)
    p4 = MaxPooling1D(2)(c4)

    c5 = Conv1D(512, (3,), activation='relu', kernel_initializer='he_normal', padding='same')(p4)
    c5 = BatchNormalization()(c5)
    c5 = Dropout(0.3)(c5)
    c5 = Conv1D(512, (3,), activation='relu', kernel_initializer='he_normal', padding='same')(c5)
    c5 = BatchNormalization()(c5)

    u6 = Conv1DTranspose(128, (2,), strides=(2,), padding='same')(c5)
    u6 = check_and_add_padding(u6, c4)
    u6 = concatenate([u6, c4])
    c6 = Conv1D(256, (3,), activation='relu', kernel_initializer='he_normal', padding='same')(u6)
    c6 = BatchNormalization()(c6)
    c6 = Dropout(0.2)(c6)
    c6 = Conv1D(256, (3,), activation='relu', kernel_initializer='he_normal', padding='same')(c6)
    c6 = BatchNormalization()(c6)

    u7 = Conv1DTranspose(64, (2,), strides=(2,), padding='same')(c6)
    u7 = check_and_add_padding(u7, c3)
    u7 = concatenate([u7, c3])
    c7 = Conv1D(128, (3,), activation='relu', kernel_initializer='he_normal', padding='same')(u7)
    c7 = BatchNormalization()(c7)
    c7 = Dropout(0.2)(c7)
    c7 = Conv1D(128, (3,), activation='relu', kernel_initializer='he_normal', padding='same')(c7)
    c7 = BatchNormalization()(c7)

    u8 = Conv1DTranspose(32, (2,), strides=(2,), padding='same')(c7)
    u8 = check_and_add_padding(u8, c2)
    u8 = concatenate([u8, c2])
    c8 = Conv1D(64, (3,), activation='relu', kernel_initializer='he_normal', padding='same')(u8)
    c8 = BatchNormalization()(c8)
    c8 = Dropout(0.1)(c8)
    c8 = Conv1D(64, (3,), activation='relu', kernel_initializer='he_normal', padding='same')(c8)
    c8 = BatchNormalization()(c8)

    u9 = Conv1DTranspose(16, (2,), strides=(2,), padding='same')(c8)
    u9 = check_and_add_padding(u9, c1)
    u9 = concatenate([u9, c1])
    c9 = Conv1D(32, (3,), activation='relu', kernel_initializer='he_normal', padding='same')(u9)
    c9 = BatchNormalization()(c9)
    c9 = Dropout(0.1)(c9)
    c9 = Conv1D(32, (3,), activation='relu', kernel_initializer='he_normal', padding='same')(c9)
    c9 = BatchNormalization()(c9)

    u10 = Conv1DTranspose(16, (2,), strides=(2,), padding='same')(c9)
    u10 = check_and_add_padding(u10, c0)
    u10 = concatenate([u10, c0])
    c10 = Conv1D(32, (3,), activation='relu', kernel_initializer='he_normal', padding='same')(u10)
    c10 = BatchNormalization()(c10)
    c10 = Dropout(0.1)(c10)
    c10 = Conv1D(32, (3,), activation='relu', kernel_initializer='he_normal', padding='same')(c10)
    c10 = BatchNormalization()(c10)

    outputs = Conv1D(1, (1,), activation='sigmoid')(c10)

    return Model(inputs=[inputs], outputs=[outputs])
Beispiel #22
0
    def __init__(self,
                 sequence_length=30,
                 number_of_vars=1,
                 hidden_units_e=100,
                 hidden_units_d=100,
                 batch_size=100,
                 latent_dim=1,
                 epochs=100,
                 learning_rate=1e-2,
                 decay_rate=0.96,
                 decay_step=1000):

        # reparameterization trick
        # instead of sampling from Q(z|X), sample epsilon = N(0,I)
        # z = z_mean + sqrt(var) * epsilon
        def sampling(args):
            """Reparameterization trick by sampling from an isotropic unit Gaussian.
        
            # Arguments
                args (tensor): mean and log of variance of Q(z|X)
        
            # Returns
                z (tensor): sampled latent vector
            """
            z_mean, z_log_var = args
            batch = K.shape(z_mean)[0]
            dim = K.int_shape(z_mean)[1]
            # by default, random_normal has mean = 0 and std = 1.0
            epsilon = K.random_normal(shape=(batch, dim))
            return z_mean + K.exp(0.5 * z_log_var) * epsilon

        # network parameters
        self.sequence_length = sequence_length
        self.number_of_vars = number_of_vars
        self.learning_rate = learning_rate
        self.decay_rate = decay_rate
        self.decay_step = decay_step
        self.input_shape = (sequence_length, number_of_vars)
        self.hidden_units_e = hidden_units_e
        self.hidden_units_d = hidden_units_d
        self.batch_size = batch_size
        self.latent_dim = latent_dim
        self.epochs = epochs

        # VAE model = encoder + decoder

        # build encoder model
        self.__inputs = Input(shape=self.input_shape, name='enc_input')
        self.__inputs_h_enc = Conv1D(16,
                                     3,
                                     activation='tanh',
                                     strides=2,
                                     padding="same",
                                     name='enc_conv1d_1')(self.__inputs)
        self.__inputs_h_enc = Dropout(0.2)(self.__inputs_h_enc)
        self.__inputs_h_enc = Conv1D(32,
                                     3,
                                     activation='tanh',
                                     strides=2,
                                     padding="same",
                                     name='enc_conv1d_2')(self.__inputs_h_enc)
        self.__inputs_flatten = Flatten(name='enc_flatten')(
            self.__inputs_h_enc)
        self.__h_enc = Dense(self.hidden_units_e,
                             activation='tanh',
                             name='enc_output')(self.__inputs_flatten)

        # reparameterization trick
        self.__z_mean = Dense(self.latent_dim, name='z_mean')(self.__h_enc)
        self.__z_log_var = Dense(self.latent_dim,
                                 name='z_log_var')(self.__h_enc)

        # use reparameterization trick to push the sampling out as input
        # note that "output_shape" isn't necessary with the TensorFlow backend
        self.__z = Lambda(sampling, output_shape=(self.latent_dim, ),
                          name='z')([self.__z_mean, self.__z_log_var])

        # instantiate encoder model
        encoder = Model(self.__inputs,
                        [self.__z_mean, self.__z_log_var, self.__z],
                        name='encoder')
        logger.info('Encoder. \n %s', encoder.summary())

        # build decoder model
        self.__latent_inputs = Input(shape=(self.latent_dim, ),
                                     name='z_sampling')
        self.__h_z = Dense(60 * 32, activation='tanh',
                           name='dec_input')(self.__latent_inputs)
        self.__h_z = Reshape((60, 32), name='dec_reshape')(self.__h_z)
        self.__h_z = Conv1DTranspose(32,
                                     3,
                                     activation='tanh',
                                     strides=2,
                                     padding="same",
                                     name='dec_conv1d_1')(self.__h_z)
        self.__inputs_h_enc = Dropout(0.2)(self.__h_z)
        self.__h_z = Conv1DTranspose(16,
                                     3,
                                     activation='tanh',
                                     strides=2,
                                     padding="same",
                                     name='dec_conv1d_2')(self.__h_z)
        self.__outputs = Conv1DTranspose(self.number_of_vars,
                                         3,
                                         activation=None,
                                         padding="same",
                                         name='dec_conv1d_output')(self.__h_z)
        #self.__x_recons_flatten = Dense(sequence_length * number_of_vars)(self.__h_z)
        #self.__outputs = Reshape((sequence_length, number_of_vars))(self.__x_recons_flatten)

        # instantiate decoder model
        decoder = Model(self.__latent_inputs, self.__outputs, name='decoder')
        logger.info('Decoder. \n %s', decoder.summary())

        # instantiate VAE model
        self.__outputs = decoder(encoder(self.__inputs)[2])
        self.vae = Model(self.__inputs, self.__outputs, name='vae_mlp')