Beispiel #1
0
def form_discriminator():
    # ,recurrent_regularizer=l2(0.01)
    model = Sequential()
    model.add(
        LSTM(input_shape=(seq_length, 1),
             units=ncell,
             unit_forget_bias=True,
             return_sequences=True,
             kernel_regularizer=l2(0.01),
             recurrent_regularizer=l2(0.01),
             bias_regularizer=l2(0.00)))
    for i in range(nlayer - 1):
        model.add(
            LSTM(units=ncell,
                 unit_forget_bias=True,
                 return_sequences=True,
                 kernel_regularizer=l2(0.00),
                 recurrent_regularizer=l2(0.00),
                 bias_regularizer=l2(0.00)))
    model.add(Dense(units=1))
    model.add(Activation('sigmoid'))
    model.add(pooling.AveragePooling1D(pool_size=seq_length, strides=None))

    model.summary()
    model_json = model.to_json()
    with open('{0}/model_dis.json'.format(filepath), 'w') as f:
        f = json.dump(model_json, f)
    return model
Beispiel #2
0
    def build_network(self):
        model = Sequential()
        model.add(Bidirectional(LSTM(units=cell_num,unit_forget_bias=True,return_sequences=True),input_shape=(signal_len,1)))
        for i in range(lstm_num-1):
            model.add(Bidirectional(LSTM(units=cell_num,unit_forget_bias=True,return_sequences=True)))
        model.add(Dense(units=1,activation='sigmoid'))
        model.add(pooling.AveragePooling1D(pool_size=signal_len,strides=None))

        return model
Beispiel #3
0
 def build_time_discriminator(self):
     # with tf.device('/cpu:0'):
     input = Input(shape = (seq_length, feature_count + class_num))
     model =LSTM(units = ncell, use_bias=True, unit_forget_bias = True, return_sequences = True, recurrent_regularizer = l2(0.01))(input)
     for i in range(nlayer - 1):
         model = LSTM(units = ncell, use_bias=True, unit_forget_bias = True, return_sequences = True, recurrent_regularizer = l2(0.01))(model)
     # model = LSTM(units = ncell, use_bias=True, unit_forget_bias = True, return_sequences = True, recurrent_regularizer = l2(0.01))(model)
     
     # model = LSTM(units = 1, use_bias=True, activation='sigmoid', unit_forget_bias = True, return_sequences = True, recurrent_regularizer = l2(0.01))(model)
     model = Dense(units=1, activation='sigmoid')(model)
     model = pooling.AveragePooling1D(pool_size = seq_length, strides = None)(model)
     return Model(inputs=input, outputs=model)
Beispiel #4
0
 def create_emb_layer(self):
     iw = Input(shape=(self.max_ngram_num, ),
                dtype='int32',
                name="inputword")
     emb_in = embeddings.Embedding(output_dim=self.vector_size,
                                   input_dim=self.ngram_size,
                                   init="uniform",
                                   name="input_layer")
     vv_iw = emb_in(iw)
     conv_l = convolutional.Convolution1D(self.vector_size,
                                          30,
                                          border_mode='same')
     conv = conv_l(vv_iw)
     pool = pooling.AveragePooling1D(self.max_ngram_num, border_mode="same")
     pool_res = pool(conv)
     return ([iw], emb_in, pool_res)
Beispiel #5
0
 def build_discriminator(self):
     input = Input(shape=(seq_length, output_count))
     model = LSTM(units=ncell,
                  use_bias=True,
                  unit_forget_bias=False,
                  return_sequences=True,
                  recurrent_regularizer=l2(0.01))(input)
     for i in range(nlayer - 1):
         model = LSTM(units=ncell,
                      use_bias=True,
                      unit_forget_bias=False,
                      return_sequences=True,
                      recurrent_regularizer=l2(0.01))(model)
     model = Dense(units=1, activation='sigmoid')(model)
     model = pooling.AveragePooling1D(pool_size=seq_length,
                                      strides=None)(model)
     return Model(input, model)
Beispiel #6
0
 def create_emb_layer(self):
     iw = Input(shape=(self.max_ngram_num, ),
                dtype='int32',
                name="inputword")
     emb_in = embeddings.Embedding(output_dim=self.vector_size,
                                   input_dim=self.ngram_size,
                                   init="uniform",
                                   mask_zero=True,
                                   name="input_layer")
     vv_iw = emb_in(iw)
     zm = ZeroMaskedEntries()
     zm.build((None, self.max_ngram_num, self.vector_size))
     zero_masked_emd = zm(vv_iw)
     conv_l = convolutional.Convolution1D(self.vector_size,
                                          30,
                                          border_mode='same')
     conv = conv_l(zero_masked_emd)
     pool = pooling.AveragePooling1D(self.max_ngram_num, border_mode="same")
     pool_res = pool(conv)
     return ([iw], emb_in, pool_res)
Beispiel #7
0
    def build_freq_discriminator(self):
        len_src = self.y.shape[1]
        t = np.argmin(0, 1, 1/len_src)
        omega = 2*np.pi*np.arange(0, len_src)
        t = np.ones((t.shape[0], omega.shape[0]))*t
        cos_mat = np.cos(omega*t.T)
        sin_mat = np.sin(omega*t.T)

        # with tf.device('/cpu:0'):
        input = Input(shape = (None, feature_count))
        # class_label = Lambda(lambda x: x[:, :5], output_shape=(5,))(model_input)

        # fft_layer = x1 = Lambda(lambda x: x[:, :5], output_shape=(5,))(input)


        model =LSTM(units = ncell, use_bias=True, unit_forget_bias = True, return_sequences = True, recurrent_regularizer = l2(0.01))(input)
        for i in range(nlayer - 1):
            model = LSTM(units = ncell, use_bias=True, unit_forget_bias = True, return_sequences = True, recurrent_regularizer = l2(0.01))(model)
        # model = LSTM(units = ncell, use_bias=True, unit_forget_bias = True, return_sequences = True, recurrent_regularizer = l2(0.01))(model)
        
        # model = LSTM(units = 1, use_bias=True, activation='sigmoid', unit_forget_bias = True, return_sequences = True, recurrent_regularizer = l2(0.01))(model)
        model = Dense(units=1, activation='sigmoid')(model)
        model = pooling.AveragePooling1D(pool_size = seq_length, strides = None)(model)
        return Model(inputs=input, outputs=model)
Beispiel #8
0
    def create_emb_layer(self):
        iw3 = Input(shape=(self.max_ngram_one_class, ),
                    dtype='int32',
                    name="inputword3")
        iw4 = Input(shape=(self.max_ngram_one_class, ),
                    dtype='int32',
                    name="inputword4")
        iw5 = Input(shape=(self.max_ngram_one_class, ),
                    dtype='int32',
                    name="inputword5")
        iw6 = Input(shape=(self.max_ngram_one_class, ),
                    dtype='int32',
                    name="inputword6")
        emb_in = embeddings.Embedding(output_dim=self.vector_size,
                                      input_dim=self.ngram_size,
                                      init="uniform",
                                      mask_zero=True,
                                      name="input_layer")

        vv_iw3 = emb_in(iw3)
        vv_iw4 = emb_in(iw4)
        vv_iw5 = emb_in(iw5)
        vv_iw6 = emb_in(iw6)

        zm = ZeroMaskedEntries()
        zm.build((None, self.max_ngram_num, self.vector_size))

        zero_masked_emd3 = zm(vv_iw3)
        zero_masked_emd4 = zm(vv_iw4)
        zero_masked_emd5 = zm(vv_iw5)
        zero_masked_emd6 = zm(vv_iw6)

        conv_l3 = convolutional.Convolution1D(self.vector_size,
                                              10,
                                              border_mode='same')
        conv3 = conv_l3(zero_masked_emd3)
        conv_l4 = convolutional.Convolution1D(self.vector_size,
                                              10,
                                              border_mode='same')
        conv4 = conv_l4(zero_masked_emd4)
        conv_l5 = convolutional.Convolution1D(self.vector_size,
                                              10,
                                              border_mode='same')
        conv5 = conv_l5(zero_masked_emd5)
        conv_l6 = convolutional.Convolution1D(self.vector_size,
                                              10,
                                              border_mode='same')
        conv6 = conv_l6(zero_masked_emd6)
        pool3 = pooling.AveragePooling1D(self.max_ngram_one_class,
                                         border_mode="same")
        pool_res3 = pool3(conv3)
        pool4 = pooling.AveragePooling1D(self.max_ngram_one_class,
                                         border_mode="same")
        pool_res4 = pool4(conv4)
        pool5 = pooling.AveragePooling1D(self.max_ngram_one_class,
                                         border_mode="same")
        pool_res5 = pool5(conv5)
        pool6 = pooling.AveragePooling1D(self.max_ngram_one_class,
                                         border_mode="same")
        pool_res6 = pool6(conv6)

        merge_conv = Merge(mode='ave', concat_axis=1)
        merged = merge_conv([pool_res3, pool_res4, pool_res5, pool_res6])
        return ([iw3, iw4, iw5, iw6], emb_in, merged)