Exemple #1
0
def define_generator(latent_dim, n_classes=17):
    print("generator architecture")
    model = Sequential()
    model.add(Dense(512, activation="relu", input_dim=latent_dim))
    model.add(Reshape((1, 512)))
    model.add(BatchNormalization(momentum=0.8))
    model.add(UpSampling1D())
    model.add(Conv1D(512, kernel_size=4, padding="same"))
    model.add(Activation("relu"))
    model.add(BatchNormalization(momentum=0.8))
    model.add(UpSampling1D(size=5))
    model.add(Conv1D(128, kernel_size=4, padding="same"))
    model.add(Activation("relu"))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Conv1D(1, kernel_size=4, padding='same'))
    model.add(Activation("tanh"))
    model.summary()

    noise = Input(shape=(latent_dim, ))
    label = Input(shape=(1, ), dtype='int32')
    label_embedding = Flatten()(Embedding(n_classes, latent_dim)(label))

    model_input = multiply([noise, label_embedding])
    img = model(model_input)

    return Model([noise, label], img)
Exemple #2
0
 def build_split_conv_decoder(self, invec1, invec2, ae_nodes):
     fx, m, k, l2_val = 'relu', 2, 3, self.l2_val
     ae_nodes = ae_nodes[::-1]
     z = keras.layers.concatenate([invec1, invec2], axis=-1)
     h4 = Conv1D(filters=ae_nodes[0],
                 kernel_size=k,
                 padding='same',
                 activation=fx,
                 kernel_regularizer=regularizers.l2(l2_val))(z)
     print("h4", h4.shape)
     u4 = UpSampling1D(m)(h4)
     print("u4", u4.shape)
     for n in ae_nodes[1:-1]:
         h4 = Conv1D(filters=n,
                     kernel_size=k,
                     padding='same',
                     activation=fx,
                     kernel_regularizer=regularizers.l2(l2_val))(u4)
         print("h4", n, h4.shape)
         u4 = UpSampling1D(m)(h4)
         print("u4", n, u4.shape)
     # normalize the values between -1 and 1 ???
     decoded = Conv1D(1, kernel_size=k, padding='same')(u4)
     print("decoded", decoded.shape)
     return decoded
    def create_model(self,
                     model_params={
                         'n1': 32,
                         'n2': 64,
                         'n3': 32,
                         'frame_len': 80
                     }):
        frame_len = self.frame_len
        n1 = model_params['n1']
        n2 = model_params['n2']
        n3 = model_params['n3']

        input_sque = Input(shape=(frame_len, 1))
        c1 = Conv1D(n1, 3, padding='same')(input_sque)
        c1 = Activation(selu)(c1)
        c1 = Conv1D(n1, 3, padding='same')(c1)
        c1 = Activation(selu)(c1)
        x = MaxPooling1D(2)(c1)

        c2 = Conv1D(n2, 3, padding='same')(x)
        c2 = Activation(selu)(c2)
        c2 = Conv1D(n2, 3, padding='same')(c2)
        c2 = Activation(selu)(c2)
        x = MaxPooling1D(2)(c2)

        c3 = Conv1D(n3, 3, padding='same')(x)
        c3 = Activation(selu)(c3)
        x = UpSampling1D(2)(c3)

        c2_2 = Conv1D(n2, 3, padding='same')(x)
        c2_2 = Activation(selu)(c2_2)
        c2_2 = Conv1D(n2, 3, padding='same')(c2_2)
        c2_2 = Activation(selu)(c2_2)

        m1 = Add()([c2, c2_2])
        m1 = UpSampling1D(2)(m1)

        c1_2 = Conv1D(n1, 3, padding='same')(m1)
        c1_2 = Activation(selu)(c1_2)
        c1_2 = Conv1D(n1, 3, padding='same')(c1_2)
        c1_2 = Activation(selu)(c1_2)

        m2 = Add()([c1, c1_2])

        decoded = Conv1D(1, 5, padding='same', activation='linear')(m2)

        model = Model(input_sque, decoded)
        model.summary()

        learning_rate = self.learning_rate
        # adam = optimizers.Adam(lr=learning_rate)
        # model.compile(optimizer=adam, loss='mse', metrics=[SNRLoss])

        adam_wn = AdamWithWeightnorm(lr=learning_rate,
                                     beta_1=0.9,
                                     beta_2=0.999,
                                     epsilon=1e-08)
        model.compile(optimizer=adam_wn, loss='mse', metrics=[snr])

        return model
Exemple #4
0
def load_model_ae1(input_dim, data_set_size, l2_reg=0.01, activation='relu',
                   hidden_layer_size=64, hidden_layer_num=3,
                   dropout_rate=0.25):
    print('Building CNN-Model.')
    
    model = Sequential()
    
    pool_length = POOL_LENGTH
    
    # So far:   h = 64/4, 64/2
    #           p = 4
    # layers:   2x1
    
    # Convolutional Layer 1
    model.add(Convolution1D(hidden_layer_size, FILTER_SIZE, border_mode='same',
                            activation=activation, input_shape=(input_dim, 1)))
    # model.add(Convolution1D(hidden_layer_size, FILTER_SIZE, border_mode='same',
    #                         activation=activation))
    model.add(MaxPooling1D(pool_length=pool_length, stride=None, border_mode='valid'))
    
    # Convolutional Layer 1
    model.add(Convolution1D(hidden_layer_size * 2, FILTER_SIZE, border_mode='same',
                            activation=activation))
    # model.add(Convolution1D(hidden_layer_size * 2, FILTER_SIZE, border_mode='same',
    #                         activation=activation))
    model.add(MaxPooling1D(pool_length=pool_length, stride=None, border_mode='valid'))

    # Convolutional Layer 1
    model.add(Convolution1D(hidden_layer_size * 2, FILTER_SIZE, border_mode='same',
                            activation=activation))
    # model.add(Convolution1D(hidden_layer_size * 2, FILTER_SIZE, border_mode='same',
    #                         activation=activation))
    model.add(MaxPooling1D(pool_length=pool_length, stride=None, border_mode='valid'))

    
    # Deconvolutional Layer 2
    model.add(Convolution1D(hidden_layer_size * 2, FILTER_SIZE, border_mode='same',
                            activation=activation))
    # model.add(Convolution1D(hidden_layer_size * 2, FILTER_SIZE, border_mode='same',
    #                         activation=activation))
    model.add(UpSampling1D(length=pool_length))
    
    model.add(Convolution1D(hidden_layer_size * 2, FILTER_SIZE, border_mode='same',
                            activation=activation))
    # model.add(Convolution1D(hidden_layer_size * 2, FILTER_SIZE, border_mode='same',
    #                         activation=activation))
    model.add(UpSampling1D(length=pool_length))
    
    # Deconvolutional Layer 2
    model.add(Convolution1D(hidden_layer_size, FILTER_SIZE, border_mode='same',
                            activation=activation))
    # model.add(Convolution1D(hidden_layer_size, FILTER_SIZE, border_mode='same',
    #                         activation=activation))
    model.add(UpSampling1D(length=pool_length))
    
    model.add(Convolution1D(1, 3, border_mode='same'))
    
    model.summary()
    
    return model
Exemple #5
0
def gen_model_v1(lr_gen=0.001
                 ):  # For the model they used lr 20*desc=10*both=gen=default
    model = Sequential()
    model.add(Dense(units=64, input_dim=32))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(248))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Reshape((62, 4), input_shape=(248, )))
    model.add(UpSampling1D(size=2))

    model.add(Conv1D(16, 3, padding='same'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(UpSampling1D(size=2))
    model.add(Conv1D(1, 3, padding='same'))
    model.add(Activation('softmax'))

    Adam = keras.optimizers.Adam(lr=lr_gen)
    model.compile(loss="categorical_crossentropy",
                  optimizer=Adam,
                  metrics=['accuracy'])
    return model
Exemple #6
0
 def build_decoder(self, latent, name='conv', n_layers=1):
     decoder = latent
     if name == 'conv':
         if n_layers == 3:
             decoder = Dense(self.window_size//64*256, activation='relu', name='decoder_dense1')(decoder)
             decoder = Reshape((self.window_size//64, 256), name='decoder_reshape1')(decoder)
             decoder = UpSampling1D(4, name='decoder_upsample1')(decoder)
             decoder = Conv1D(128, 3, padding='same', activation='relu', name='decoder_conv1')(decoder)
             decoder = UpSampling1D(4, name='decocer_upsample2')(decoder)
             decoder = Conv1D(64, 3, padding='same', activation='relu', name='decoder_conv2')(decoder)
         if n_layers == 2:
             decoder = Dense(self.window_size//16*128, activation='relu', name='decoder_dense1')(decoder)
             decoder = Reshape((self.window_size//16, 128), name='decoder_reshape1')(decoder)
             decoder = UpSampling1D(4, name='decocer_upsample2')(decoder)
             decoder = Conv1D(64, 3, padding='same', activation='relu', name='decoder_conv2')(decoder)
         if n_layers == 1:
             decoder = Dense(self.window_size//4*64, activation='relu', name='decoder_dense1')(decoder)
             decoder = Reshape((self.window_size//4, 64), name='decoder_reshape1')(decoder)
         decoder = UpSampling1D(4, name='decoder_upsample3')(decoder)
         decoder = Conv1D(4, 1, padding='same', name='decoder_conv3')(decoder)
         decoder = Lambda(lambda x: K.softmax(x, axis=-1), name='output_softmax')(decoder)
         decoder = Lambda(lambda x: K.mean(K.reshape(x, (-1, self.n_sampler, self.window_size, self.n_channels)), axis=1), 
             name='output_mean')(decoder)
     elif name == 'mlp':
         if n_layers >= 2:
             decoder = Dense(128, activation='relu', name='decoder_dense2')(decoder)
         decoder = Dense(self.window_size*self.n_channels, name='decoder_dense3')(decoder)
         decoder = Lambda(lambda x: K.softmax(x, axis=-1), name='output_softmax')(decoder)
         decoder = Lambda(lambda x: K.mean(K.reshape(x, (-1, self.n_sampler, self.window_size, self.n_channels)), axis=1), 
             name='output_mean')(decoder)
     elif name == 'lstm':
         decoder = LSTM(64, name='encoder_lstm1', return_sequences=True)(decoder)
     return decoder 
def ED_TCN():
    inputs = Input((300, nb_input_vector))  # 规定输入大小
    label_classes = 4
    # Encoder
    x = Conv1D(128, 45, strides=1, padding='same', use_bias=True)(inputs)
    x = LeakyReLU(alpha=0.3)(x)
    x = MaxPooling1D(pool_size=2, strides=2, padding='same')(x)

    x = Conv1D(160, 45, strides=1, padding='same', use_bias=True)(x)
    x = LeakyReLU(alpha=0.3)(x)
    x = MaxPooling1D(pool_size=2, strides=2, padding='same')(x)

    # Decoder
    x = UpSampling1D(size=2)(x)
    x = Conv1D(160, 45, strides=1, padding='same', use_bias=True)(x)
    x = LeakyReLU(alpha=0.3)(x)

    x = UpSampling1D(size=2)(x)
    x = Conv1D(128, 45, strides=1, padding='same', use_bias=True)(x)
    x = LeakyReLU(alpha=0.3)(x)

    # combine
    x = Conv1D(4, 1, strides=1, padding='valid', use_bias=True)(x)
    x = LeakyReLU(alpha=0.3)(x)
    x = Activation("softmax")(x)
    outputs = GlobalAveragePooling1D()(x)
    model = Model(inputs, outputs)
    model.summary()
    return model
Exemple #8
0
def build_generator():
    '''
    Put together a model that takes in one-dimensional noise and outputs two-dimensional
    data representing a black and white image, with -1 for black and 1 for white.
    
    returns: the model object
    '''

    noise_shape = (noise_len,)
    
    model = Sequential()

    model.add(Dense(img_cols*16*2, activation="relu", input_shape=noise_shape))
    model.add(Reshape((13, 128)))
    model.add(BatchNormalization(momentum=0.8))
    model.add(UpSampling1D(size=2))
    model.add(Conv1D(128, kernel_size=3, padding="same"))
    model.add(Activation("relu"))
    model.add(BatchNormalization(momentum=0.8)) 
    model.add(UpSampling1D(size=2))
    model.add(Conv1D(64, kernel_size=3, padding="same"))
    model.add(Activation("relu"))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Conv1D(300, kernel_size=3, padding="same"))
    model.add(Activation("tanh"))

    return model
Exemple #9
0
def generator_model_44():  # CDNN Model
    model = Sequential()

    model.add(Convolution1D(16, 5, border_mode='same', input_shape=(CODE_LN, 1)))
    model.add(Activation('relu'))

    model.add(UpSampling1D(length=4))
    model.add(Convolution1D(32, 5, border_mode='same'))
    model.add(Activation('relu'))

    model.add(UpSampling1D(length=4))
    model.add(Convolution1D(1, 5, border_mode='same'))
    # model.add(Activation('relu'))
    return model
    def build_generator(self):

        model = Sequential()

        model.add(
            Dense(128 * 125, activation=LeakyReLU(),
                  input_dim=self.latent_dim))
        model.add(Reshape((125, 128)))

        model.add(UpSampling1D())
        model.add(Conv1D(128, kernel_size=3, padding="same"))
        model.add(LeakyReLU())
        model.add(InstanceNormalization())
        # model.add(BatchNormalization(momentum=0.8))

        model.add(UpSampling1D())
        model.add(Conv1D(128, kernel_size=3, padding="same"))
        model.add(LeakyReLU())
        model.add(InstanceNormalization())

        model.add(UpSampling1D())
        model.add(Conv1D(256, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU())
        model.add(InstanceNormalization())
        # model.add(BatchNormalization(momentum=0.8))

        model.add(UpSampling1D())
        model.add(Conv1D(128, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU())
        model.add(InstanceNormalization())

        model.add(Conv1D(self.channels, kernel_size=3, padding="same"))
        # change to LeakyRelu since a lot of signal exceeding 1
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(self.latent_dim, ))
        # img = model(noise)

        label = Input(shape=(1, ))
        label_embedding = Flatten()(Embedding(self.num_classes,
                                              np.prod(
                                                  self.signal_shape))(label))
        # label_embedding = Flatten()(RepeatVector(self.latent_dim)(label))
        model_input = multiply([noise, label_embedding])
        img = model(model_input)

        return Model([noise, label], img)
Exemple #11
0
def generator_model():  # CDNN Model
    print(INPUT_LN, N_GEN_l, CODE_LN) 

    model = Sequential()

    model.add(Convolution1D(16, 5, border_mode='same', input_shape=(CODE_LN, 1)))
    model.add(Activation('relu'))

    model.add(UpSampling1D(length=N_GEN_l[0]))
    model.add(Convolution1D(32, 5, border_mode='same'))
    model.add(Activation('relu'))

    model.add(UpSampling1D(length=N_GEN_l[1]))
    model.add(Convolution1D(1, 5, border_mode='same'))
    model.add(Activation('tanh'))
    return model
def build_annotator(input_channels=1, output_channels=1):
    def conv_layer(layer_input, filters, kernel_size=5, strides=2):
        d = Conv1D(filters, kernel_size, strides=strides, padding='same')(layer_input)
        d = LeakyReLU(alpha=0.20)(d)
        d = InstanceNormalization()(d)
        return d

    def deconv_layer(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
        u = UpSampling1D(size=2)(layer_input)
        u = Conv1D(filters, f_size, strides=1, padding='same', activation='relu')(u)
        if dropout_rate:
            u = Dropout(dropout_rate)(u)
        u = InstanceNormalization()(u)
        u = Concatenate()([u, skip_input])
        return u

    # Input samples
    input_samples = Input(shape=(input_length, input_channels))

    # Downsampling:
    d1 = conv_layer(input_samples, 32, 8, 2)
    d2 = conv_layer(d1, 64, 8, 2)
    d3 = conv_layer(d2, 128, 8, 2)
    d4 = conv_layer(d3, 256, 8, 2)

    # Now Upsample:
    u1 = deconv_layer(d4, d3, 128, f_size=8)
    u2 = deconv_layer(u1, d2, 64, f_size=8)
    u3 = deconv_layer(u2, d1, 32, f_size=8)
    u4 = UpSampling1D(size=2)(u3)
    output_samples = Conv1D(output_channels, kernel_size=8, strides=1, padding='same', activation='softmax')(u4)
    return Model(input_samples, output_samples)
Exemple #13
0
def build_generator():
    audio_lr = Input(shape=(32, LRSHAPE))
    c1 = Conv1D(filters=256, kernel_size=7, strides=2,
                padding='same')(audio_lr)
    b1 = BatchNormalization()(c1)
    a1 = LeakyReLU(alpha=0.2)(b1)

    c2 = Conv1D(filters=512, kernel_size=5, strides=2, padding='same')(a1)
    b2 = BatchNormalization()(c2)
    a2 = LeakyReLU(alpha=0.2)(b2)

    c3 = Conv1D(filters=512, kernel_size=3, strides=2, padding='same')(a2)
    b3 = BatchNormalization()(c3)
    a3 = LeakyReLU(alpha=0.2)(b3)

    c4 = Conv1D(filters=1024, kernel_size=3, strides=2, padding='same')(a3)
    b4 = BatchNormalization()(c4)
    a4 = LeakyReLU(alpha=0.2)(b4)

    c5 = Conv1D(filters=512, kernel_size=3, strides=1, padding='same')(a4)
    u5 = UpSampling1D(size=2)(c5)
    b5 = BatchNormalization()(u5)
    a5 = LeakyReLU(alpha=0.2)(b5)
    A5 = Add()([a5, a3])

    c6 = Conv1D(filters=512, kernel_size=5, strides=1, padding='same')(A5)
    u6 = UpSampling1D(size=2)(c6)
    b6 = BatchNormalization()(u6)
    a6 = LeakyReLU(alpha=0.2)(b6)
    A6 = Add()([a6, a2])

    c7 = Conv1D(filters=256, kernel_size=7, strides=1, padding='same')(A6)
    u7 = UpSampling1D(size=2)(c7)
    b7 = BatchNormalization()(u7)
    a7 = LeakyReLU(alpha=0.2)(b7)
    A7 = Add()([a7, a1])

    c8 = Conv1D(filters=HRSHAPE, kernel_size=7, strides=1, padding='same')(A7)
    u8 = UpSampling1D(size=2)(c8)
    b8 = BatchNormalization()(u8)
    a8 = LeakyReLU(alpha=0.2)(b8)

    c9 = Conv1D(filters=HRSHAPE, kernel_size=9, strides=1, padding='same')(a8)
    # b9 = BatchNormalization()(c9)
    # a9 = LeakyReLU(alpha=0.2)(b9)

    return Model(audio_lr, c9)
Exemple #14
0
def transposeconv(layer, filters, size, stride):
    x = UpSampling1D(size=stride)(layer)
    x = Conv1D(filters,
               size,
               strides=1,
               padding='same',
               kernel_initializer='he_normal')(x)
    return x
 def deconv_layer(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
     u = UpSampling1D(size=2)(layer_input)
     u = Conv1D(filters, f_size, strides=1, padding='same', activation='relu')(u)
     if dropout_rate:
         u = Dropout(dropout_rate)(u)
     u = InstanceNormalization()(u)
     u = Concatenate()([u, skip_input])
     return u
Exemple #16
0
 def build_generator(self):
 
     model = Sequential()
     model.add(Reshape((self.noise_dim,self.channels), input_shape=(self.noise_dim,)))
     model.add(Conv1D(128, kernel_size=4, padding="same", data_format="channels_last"))
     model.add(BatchNormalization(momentum=0.8))
     model.add(Activation(self.conv_activation))
     model.add(UpSampling1D())
     model.add(Conv1D(128, kernel_size=4, padding="same"))
     model.add(BatchNormalization(momentum=0.8))
     model.add(Activation(self.conv_activation))
     model.add(UpSampling1D())
     model.add(Conv1D(64, kernel_size=4, padding="same"))
     model.add(BatchNormalization(momentum=0.8))
     model.add(Activation(self.conv_activation))
     model.add(UpSampling1D())
     model.add(Conv1D(32, kernel_size=4, padding="same"))
     model.add(BatchNormalization(momentum=0.8))
     model.add(Activation(self.conv_activation))
     model.add(Conv1D(16, kernel_size=4, padding="same"))
     model.add(BatchNormalization(momentum=0.8))
     model.add(Activation(self.conv_activation))
     model.add(Conv1D(self.channels, kernel_size=4, padding="same"))        
     model.add(BatchNormalization(momentum=0.8))
     model.add(Activation(self.conv_activation))
     model.add(Flatten())
     model.add(Dense(self.num_steps * self.channels))
     model.add(Activation(self.activation_function))
     model.add(Reshape((self.num_steps,self.channels)))
     
     if self.sliding_window > 0:
         model.add(Lambda(self.moving_avg, output_shape=self.seq_shape, name='mvg_avg'))
     
     if self.training_mode:
         print('Generator model:')
         model.summary()
         model_json = model.to_json()
         
         with open('./output/generator.json', "w") as json_file:
             json_file.write(model_json)
             
         file_name = './output/generator.png'
         plot_model(model, to_file=file_name, show_shapes = True)        
 
     return model
Exemple #17
0
def generator_model(noise_dim=100, aux_dim=47, model_name="generator"):
    # Merge noise and auxilary inputs
    gen_input = Input(shape=(noise_dim, ), name="noise_input")
    aux_input = Input(shape=(aux_dim, ), name="auxilary_input")
    x = merge([gen_input, aux_input], mode="concat", concat_axis=-1)

    # Dense Layer 1
    x = Dense(10 * 100)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x)  # output shape is 10*100

    # Reshape the tensors to support CNNs
    x = Reshape((100, 10))(x)  # shape is 100 x 10

    # Conv Layer 1
    x = Convolution1D(nb_filter=250,
                      filter_length=13,
                      border_mode='same',
                      subsample_length=1)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x)  # output shape is 100 x 250
    x = UpSampling1D(length=2)(x)  # output shape is 200 x 250

    # Conv Layer 2
    x = Convolution1D(nb_filter=100,
                      filter_length=13,
                      border_mode='same',
                      subsample_length=1)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x)  # output shape is 200 x 100
    x = UpSampling1D(length=2)(x)  # output shape is 400 x 100

    # Conv Layer 3
    x = Convolution1D(nb_filter=1,
                      filter_length=13,
                      border_mode='same',
                      subsample_length=1)(x)
    x = BatchNormalization()(x)
    x = Activation('tanh')(x)  # final output shape is 400 x 1

    generator_model = Model(input=[gen_input, aux_input],
                            output=[x],
                            name=model_name)

    return generator_model
Exemple #18
0
def gen_model_stock(lr=.01):
    model = Sequential()
    model.add(Dense(units=64, input_dim=32))
    model.add(Activation('tanh'))
    model.add(Dense(248))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    model.add(Reshape((62, 4), input_shape=(248, )))
    model.add(UpSampling1D(size=2))
    model.add(Conv1D(16, 3, padding='same'))
    model.add(Activation('tanh'))
    model.add(UpSampling1D(size=2))
    model.add(Conv1D(1, 3, padding='same'))
    model.add(Activation('tanh'))
    gen_optim = SGD(lr=lr, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer=gen_optim)  ### lr=0.01
    return model
Exemple #19
0
def gen_model_stock(lr=.01, max_time=128):
    model = Sequential()
    model.add(Dense(units=64, input_dim=max_time))
    model.add(Activation('tanh'))
    model.add(Dense(256))
    model.add(BatchNormalization())
    model.add(Activation('tanh'))
    model.add(Reshape((64, 4)))
    model.add(UpSampling1D(size=2))
    model.add(Conv1D(16, 3, padding='same'))
    model.add(Activation('tanh'))
    model.add(UpSampling1D(size=2))
    model.add(Conv1D(1, 3, padding='same'))
    model.add(MaxPooling1D(strides=2))
    model.add(Activation('tanh'))
    gen_optim = SGD(lr=lr, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer=gen_optim)  ### lr=0.01
    model.summary()
    return model
Exemple #20
0
def build_generator():
    model = Sequential()
    model.add(Dense(128 * seq_init_length * 1, activation='relu', input_dim=latent_dim))
    model.add(Reshape((seq_init_length, 128)))
    model.add(BatchNormalization(momentum=0.8))
    model.add(UpSampling1D(size=2))
    model.add(Conv1D(128, kernel_size=6, padding='same'))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.8))
    model.add(UpSampling1D(size=2))
    model.add(Conv1D(64, kernel_size=9, padding='same'))
    model.add(Activation('relu'))
    model.add(BatchNormalization(momentum=0.8))
    model.add(UpSampling1D(size=2))
    model.add(Conv1D(1, kernel_size=12, padding='same'))
    model.add(Activation('tanh'))
    model.summary()

    noise_ = Input(shape=(latent_dim,))
    input_sample_ = model(noise_)
    return Model(noise_, input_sample_)
def create_generator(inputs_gen):
    n1 = 32
    n2 = 64
    n3 = 32

    c1 = Conv1D(n1, 3, padding='same', name='conv_1')(inputs_gen)
    c1 = Activation(selu, name='act_1')(c1)
    c1 = Conv1D(n1, 3, padding='same', name='conv_2')(c1)
    c1 = Activation(selu, name='act_2')(c1)
    x = MaxPooling1D(2, name='mpool_1')(c1)

    c2 = Conv1D(n2, 3, padding='same', name='conv_3')(x)
    c2 = Activation(selu, name='act_3')(c2)
    c2 = Conv1D(n2, 3, padding='same', name='conv_4')(c2)
    c2 = Activation(selu, name='act_4')(c2)
    x = MaxPooling1D(2, name='mpool_2')(c2)

    c3 = Conv1D(n3, 3, padding='same', name='conv_5')(x)
    c3 = Activation(selu, name='act_5')(c3)
    x = UpSampling1D(2, name='usample_1')(c3)

    c2_2 = Conv1D(n2, 3, padding='same', name='conv_6')(x)
    c2_2 = Activation(selu, name='act_6')(c2_2)
    c2_2 = Conv1D(n2, 3, padding='same', name='conv_7')(c2_2)
    c2_2 = Activation(selu, name='act_7')(c2_2)

    m1 = Add(name='add_1')([c2, c2_2])
    m1 = UpSampling1D(2, name='usample_2')(m1)

    c1_2 = Conv1D(n1, 3, padding='same', name='conv_8')(m1)
    c1_2 = Activation(selu, name='act_8')(c1_2)
    c1_2 = Conv1D(n1, 3, padding='same', name='conv_9')(c1_2)
    c1_2 = Activation(selu, name='act_9')(c1_2)

    m2 = Add(name='add_2')([c1, c1_2])

    decoded = Conv1D(1, 5, padding='same', activation='linear',
                     name='conv_10')(m2)

    return decoded
Exemple #22
0
def generator_model_cnn():
  input_noise = Input(shape=(100,))
  model = Dense(128)(input_noise)
  model_1 = Reshape((128,1))(model)
  model_1 = UpSampling1D(2) (model_1)
  model_1 = Convolution1D(64,35,border_mode='same')(model_1)
  model_1 = BatchNormalization()(model_1)
  model_1 = LeakyReLU()(model_1)
  model_1 = UpSampling1D(2) (model_1)
  model_1 = Convolution1D(64,25,border_mode='same')(model_1)
  model_1 = BatchNormalization()(model_1)
  model_1 = LeakyReLU()(model_1)
  model_1 = UpSampling1D(2) (model_1)
  model_1 = Convolution1D(64,15,border_mode='same')(model_1)
  model_1 = BatchNormalization()(model_1)
  model_1 = LeakyReLU()(model_1)
  model_1 = UpSampling1D(2) (model_1)
  model_1 = Convolution1D(64,15,border_mode='same')(model_1)
  model_1 = BatchNormalization()(model_1)
  model_1 = LeakyReLU()(model_1)
  model_1 = UpSampling1D(2) (model_1)
  model_1 = Convolution1D(64,15,border_mode='same')(model_1)
  model_1 = BatchNormalization()(model_1)
  model_1 = LeakyReLU()(model_1)
  model_1 = UpSampling1D(2) (model_1)
  model_1 = Convolution1D(64,15,border_mode='same')(model_1)
  model_1_1 = Convolution1D(64,7,border_mode='same')(model_1)
  model_1_1_1 = Convolution1D(64,4,border_mode='same')(model_1)
  model_1 = Add()([model_1,model_1_1,model_1_1_1])
  model_1 = BatchNormalization()(model_1)
  model_1 = LeakyReLU()(model_1)
  model_1 = Convolution1D(1,1,border_mode='same')(model_1)
  model_1 = Activation('tanh')(model_1)
  model = Model(input_noise,model_1)
  return model
Exemple #23
0
    def create_model(self):
        n_filters = [128,256,512,512,512,512,512,512]
        n_filtersizes = [65,33,17,9,9,9,9,9,9]
        residual = []
        
        print("Building model...")
        #cubic upsampling 
        x = self.X

        #downsampling layers
        for l, nf, fs in zip(range(self.n_layer), n_filters, n_filtersizes):
            x = Conv1D(filters=nf, kernel_size=fs, strides=2, kernel_initializer=Orthogonal, bias_initializer=Orthogonal)(x)
            x = BatchNormalization()(x)
            x = LeakyReLU(0.2)(x)
            residual.append(self.model(x))
            print('D-Block: ', x.get_shape())

        #bottleneck layers
        x = Conv1D(filters=nf[-1], kernel_size=fs[-1], strides=2, kernel_initializer=Orthogonal, bias_initializer=Orthogonal)(x)
        x = Dropout(rate=0.5)(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(0.2)(x)
        
        #upsampling layers
        for l, nf, fs, l_in in reversed(zip(range(self.n_layer), n_filters, n_filtersizes, residual)):
            x = Conv1D(filters=2*nf, kernel_size=fs, kernel_initializer=Orthogonal, bias_initializer=Orthogonal)(x)
            x = BatchNormalization()(x)
            x = Dropout(rate=0.5)(x)
            x = Activation('relu')(x)              
            x = UpSampling1D(size=2)(x)
            x = Concatenate([x, l_in], axis=-1) #residual
            print('U-Block: ', x.get_shape())

        #output layer 
        x = Conv1D(filters=2, kernel_size=9, kernel_initializer=RandomNormal, bias_initializer=RandomNormal)(x)
        x = UpSampling1D(size=2)(x)
        print(x.get_shape())
        g = Add([x, X]) #residual
        
        return g
Exemple #24
0
def gen_model_standard(lr=.01):
    model = Sequential()
    model.add(Dense(units=64, input_dim=32))
    model.add(Activation('relu'))
    model.add(Dense(248))
    model.add(BatchNormalization())

    model.add(Activation('relu'))
    model.add(Reshape((62, 4), input_shape=(248, )))
    model.add(UpSampling1D(size=2))
    model.add(Conv1D(16, 3, padding='same'))
    #model.add(BatchNormalization())

    model.add(Activation('relu'))
    model.add(UpSampling1D(size=2))
    model.add(Conv1D(1, 3, padding='same'))
    model.add(Activation('tanh'))
    Adam = keras.optimizers.Adam(lr=lr)
    model.compile(loss="categorical_crossentropy",
                  optimizer=Adam,
                  metrics=['accuracy'])
    return model
Exemple #25
0
def generator_model_mlp_cnn():
  input_noise = Input(shape=(100,))
  model = Dense(128)(input_noise)
  model_1 = Reshape((128,1))(model)
  model_1 = UpSampling1D(2) (model_1)
  model_1 = Convolution1D(64,35,border_mode='same')(model_1)
  model_1 = BatchNormalization()(model_1)
  model_1 = LeakyReLU()(model_1)
  model_1 = UpSampling1D(2) (model_1)
  model_1 = Convolution1D(64,25,border_mode='same')(model_1)
  model_1 = BatchNormalization()(model_1)
  model_1 = LeakyReLU()(model_1)
  model_1 = UpSampling1D(2) (model_1)
  model_1 = Convolution1D(64,15,border_mode='same')(model_1)
  model_1 = BatchNormalization()(model_1)
  model_1 = LeakyReLU()(model_1)
  model_1 = UpSampling1D(2) (model_1)
  model_1 = Convolution1D(64,15,border_mode='same')(model_1)
  model_1 = BatchNormalization()(model_1)
  model_1 = LeakyReLU()(model_1)
  model_1 = UpSampling1D(2) (model_1)
  model_1 = Convolution1D(64,15,border_mode='same')(model_1)
  model_1 = BatchNormalization()(model_1)
  model_1 = LeakyReLU()(model_1)
  model_1 = UpSampling1D(2) (model_1)
  model_1 = Convolution1D(64,15,border_mode='same')(model_1)
  model_1_1 = Convolution1D(64,7,border_mode='same')(model_1)
  model_1_1_1 = Convolution1D(64,4,border_mode='same')(model_1)
  model_1 = Add()([model_1,model_1_1,model_1_1_1])
  model_1 = BatchNormalization()(model_1)
  model_1 = LeakyReLU()(model_1)
  model_1 = Convolution1D(1,1,border_mode='same')(model_1)
  model_1 = Activation('tanh')(model_1)
  model_2 = Dense(8192)(model)
  model_2 = Activation('tanh')(model_2)
  model_2 = Reshape((8192,1))(model_2)
  model = Multiply()([model_1,model_2])
  from keras.layers import Lambda
  def mean_computation(x):
    return K.mean(x,axis=1)

  def mean_computation_output_shape(input_shape):
    new_shape = tuple([input_shape[0],input_shape[-1]])
    return new_shape                                          
  
  def std_computation(x):
    return K.std(x,axis=1)

  def std_computation_output_shape(input_shape):
    new_shape = tuple([input_shape[0],input_shape[-1]])
    return new_shape                                          

  mean_layer = Lambda(mean_computation,output_shape=mean_computation_output_shape)
  std_layer = Lambda(std_computation,output_shape=std_computation_output_shape)
  mean = mean_layer(model)
  std = std_layer(model)
  model = Model(input_noise,model)
  model_statistics = Model(input_noise,[mean,std])
  return model,model_statistics
Exemple #26
0
        def UpConv1DModule(input, filters, filters_size=5, dropout = 0.0):
            def Conv1DTranspose(input, filters, filters_size, strides=2, padding='same'):
                x = Lambda(lambda x: K.expand_dims(x, axis=2))(input)
                x = Conv2DTranspose(filters=filters, kernel_size=(filters_size, 1), strides=(strides, 1), padding=padding)(x)
                x = Lambda(lambda x: K.squeeze(x, axis=2))(x)
                return x

            x = Conv1DTranspose(input, filters, filters_size)
            x = UpSampling1D(size=2)(x)

            x = BatchNormalization(momentum=0.8)(x)

            return x
Exemple #27
0
def generator_model(noise_dim=100, aux_dim=47, model_name="generator"):
    # Merge noise and auxilary inputs
    gen_input = Input(shape=(noise_dim, ), name="noise_input")
    aux_input = Input(shape=(aux_dim, ), name="auxilary_input")
    x = concatenate([gen_input, aux_input], axis=-1)

    # Dense Layer 1
    x = Dense(10 * 100)(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x)  # output shape is 10*100

    # Reshape the tensors to support CNNs
    x = Reshape((100, 10))(x)  # shape is 100 x 10

    # Conv Layer 1
    x = Conv1D(filters=250, kernel_size=13, padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x)  # output shape is 100 x 250
    x = UpSampling1D(size=2)(x)  # output shape is 200 x 250

    # Conv Layer 2
    x = Conv1D(filters=100, kernel_size=13, padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x)  # output shape is 200 x 100
    x = UpSampling1D(size=2)(x)  # output shape is 400 x 100

    # Conv Layer 3
    x = Conv1D(filters=1, kernel_size=13, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('tanh')(x)  # final output shape is 400 x 1

    generator_model = Model(outputs=[x],
                            inputs=[gen_input, aux_input],
                            name=model_name)

    return generator_model
Exemple #28
0
def CAE():

    # Encoder
    input_sig = Input(shape=(300, 1))
    e = Conv1D(128, 12, strides=1, activation='relu',
               padding="same")(input_sig)
    e1 = MaxPooling1D(2)(e)
    e2 = Conv1D(64, 6, strides=1, activation='relu', padding="same")(e1)
    e3 = MaxPooling1D(2)(e2)

    # LAY DATA O LAYER NAY DE FEED VAO 1D CNN BEN DUOI
    #flat = Flatten()(x3)

    # Decoder
    d3 = UpSampling1D(2)(e3)
    d2 = Conv1D(128, 6, strides=1, activation='relu', padding="same")(d3)
    d1 = UpSampling1D(2)(d2)
    decoded = Conv1D(1, 12, strides=1, activation='relu', padding="same")(d1)

    autoencoder = Model(input_sig, decoded)
    autoencoder.compile(optimizer='adadelta', loss='mse')
    print("^^^^^^^^^^^^^^^^ autoencoder ^^^^^^^^^^^^^^^^ \n",
          autoencoder.summary())
    return autoencoder
Exemple #29
0
    def build_generator(self):

        model = Sequential()

        model.add(
            Dense(128 * 7 * 7, activation="relu", input_dim=self.latent_dim))
        model.add(Reshape((7, 7, 128)))
        model.add(UpSampling1D())
        model.add(Conv1D(128, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(UpSampling1D())
        model.add(Conv1D(64, kernel_size=3, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Activation("relu"))
        model.add(Conv1D(self.channels, kernel_size=3, padding="same"))
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(self.latent_dim, ))
        img = model(noise)

        return Model(noise, img)
    def build_generator(self):

        model = Sequential()

        model.add(Dense(128 * 125, input_dim=self.latent_dim))
        model.add(Reshape((125, 128)))
        # 5
        model.add(UpSampling1D())
        model.add(Conv1D(128, kernel_size=3, strides=2, padding="same"))
        model.add(InstanceNormalization())
        model.add(LeakyReLU(alpha=0.2))

        model.add(UpSampling1D())
        model.add(Conv1D(64, kernel_size=3, strides=2, padding="same"))
        model.add(InstanceNormalization())
        model.add(LeakyReLU(alpha=0.2))
        # 7
        model.add(UpSampling1D())
        model.add(Conv1D(64, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        # 8
        model.add(UpSampling1D())
        model.add(Conv1D(64, kernel_size=3, strides=2, padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        # 9
        model.add(UpSampling1D())
        model.add(Conv1D(64, kernel_size=3, strides=1, padding="same"))
        model.add(LeakyReLU(alpha=0.2))

        model.add(UpSampling1D())
        model.add(Conv1D(self.channels, kernel_size=3, padding="same"))
        # change to LeakyRelu since a lot of signal exceeding 1
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=(self.latent_dim, ))
        # img = model(noise)

        label = Input(shape=(4, ))

        # label_embedding = Flatten()(Embedding(4,np.prod(self.signal_shape)//4,input_length=4)(label))

        # label_embedding = Flatten()(Embedding(4,np.prod(self.signal_shape)//4)(label))
        label_embedding = Flatten()(RepeatVector(
            np.prod(self.signal_shape) // 4)(Dense(4)(label)))
        model_input = multiply([noise, label_embedding])
        # label_embedding = Flatten()(RepeatVector(self.latent_dim)(label))
        # print("@",label.shape,label_embedding.shape,noise.shape)
        # model_input = concatenate([noise, label])
        # model_input = noise
        img = model(model_input)

        return Model([noise, label], img)