def test_nn_operations(self): check_single_tensor_operation('relu', (4, 2), alpha=0.1, max_value=0.5) check_single_tensor_operation('softmax', (4, 10)) check_single_tensor_operation('softplus', (4, 10)) check_single_tensor_operation('elu', (4, 10), alpha=0.5) check_single_tensor_operation('sigmoid', (4, 2)) check_single_tensor_operation('hard_sigmoid', (4, 2)) check_single_tensor_operation('tanh', (4, 2)) # dropout val = np.random.random((100, 100)) xth = KTH.variable(val) xtf = KTF.variable(val) zth = KTH.eval(KTH.dropout(xth, level=0.2)) ztf = KTF.eval(KTF.dropout(xtf, level=0.2)) assert zth.shape == ztf.shape # dropout patterns are different, only check mean assert np.abs(zth.mean() - ztf.mean()) < 0.05 check_two_tensor_operation('binary_crossentropy', (4, 2), (4, 2), from_logits=True) check_two_tensor_operation('categorical_crossentropy', (4, 2), (4, 2), from_logits=True) check_two_tensor_operation('binary_crossentropy', (4, 2), (4, 2), from_logits=False) check_two_tensor_operation('categorical_crossentropy', (4, 2), (4, 2), from_logits=False) check_single_tensor_operation('l2_normalize', (4, 3), axis=-1) check_single_tensor_operation('l2_normalize', (4, 3), axis=1)
def test_nn_operations(self): check_single_tensor_operation('relu', (4, 2), alpha=0.1, max_value=0.5) check_single_tensor_operation('softmax', (4, 10)) check_single_tensor_operation('softplus', (4, 10)) check_single_tensor_operation('sigmoid', (4, 2)) check_single_tensor_operation('hard_sigmoid', (4, 2)) check_single_tensor_operation('tanh', (4, 2)) # dropout val = np.random.random((100, 100)) xth = KTH.variable(val) xtf = KTF.variable(val) zth = KTH.eval(KTH.dropout(xth, level=0.2)) ztf = KTF.eval(KTF.dropout(xtf, level=0.2)) assert zth.shape == ztf.shape # dropout patterns are different, only check mean assert np.abs(zth.mean() - ztf.mean()) < 0.05 check_two_tensor_operation('binary_crossentropy', (4, 2), (4, 2), from_logits=True) check_two_tensor_operation('categorical_crossentropy', (4, 2), (4, 2), from_logits=True) check_two_tensor_operation('binary_crossentropy', (4, 2), (4, 2), from_logits=False) check_two_tensor_operation('categorical_crossentropy', (4, 2), (4, 2), from_logits=False) check_single_tensor_operation('l2_normalize', (4, 3), axis=-1) check_single_tensor_operation('l2_normalize', (4, 3), axis=1)
def build_model(self): initializer = keras.initializers.glorot_normal(seed=None) corrupted_poses = Input(shape=(None, self.input_dim)) #sequential. recon_poses = layers.TimeDistributed(self.DAE_model)(corrupted_poses) print(recon_poses.shape) poses = layers.Lambda(lambda x: K.dropout(x, level=self.dropout_rate))(poses) if self.GPU: encoder = CuDNNLSTM(self.hidden_dim, return_state=True, kernel_regularizer=regularizers.l2(self.W_regularizer_val), kernel_initializer=initializer) decoder = CuDNNLSTM(self.hidden_dim, return_sequences=True, kernel_regularizer=regularizers.l2(self.W_regularizer_val), go_backwards=True, kernel_initializer=initializer) else: encoder = LSTM(self.hidden_dim, return_state=True, kernel_regularizer=regularizers.l2(self.W_regularizer_val), kernel_initializer=initializer) decoder = LSTM(self.hidden_dim, return_sequences=True, kernel_regularizer=regularizers.l2(self.W_regularizer_val), go_backwards=True, kernel_initializer=initializer) encoder_outputs, state_h, state_c = encoder(poses) encoder_states = [state_h, state_c] # the last state? en_out = layers.Lambda(lambda x: x[:, None, :])(encoder_outputs) # reshape to (?,1,hidden_dim) if self.VAE: print('VAE approach indeed') z_mean = Dense(self.latent_dim, name='z_mean')(state_h) z_log_var = Dense(self.latent_dim, name='z_log_var')(state_h) z = layers.Lambda(sampling, name='z', output_shape=(self.latent_dim,))([z_mean, z_log_var]) if self.latent_dim < self.hidden_dim: h = Dense(self.hidden_dim, name='sampled_representation')(z) else: h = state_h initial_state = [h,state_c] else: initial_state = encoder_states decoder_inputs = layers.Lambda(lambda x: x[:, 1:, :])(poses) decoder_outputs = decoder(decoder_inputs, initial_state=initial_state) if self.concat_h: ts_dense_inputs = layers.concatenate([en_out, decoder_outputs], 1) else: ts_dense_inputs = decoder_outputs ts_dense = layers.TimeDistributed(layers.Dense(self.input_dim, kernel_initializer=initializer)) ts_dense_inputs = Activation('relu')(ts_dense_inputs) pred = ts_dense(ts_dense_inputs) if self.VAE: if self.kl_weight: self.vae_loss = compute_vae_loss(z_log_var,z_mean,self.kl_weight) else: print('Error: need to input a value for kl_weight in VAE') else: self.vae_loss = None self.model = Model(corrupted_poses, pred) if self.VAE: self.enc_model_pre_inference = Model(corrupted_poses, en_out) self.enc_model = Model(corrupted_poses, [z_mean, z_log_var]) else: self.enc_model = Model(corrupted_poses, en_out)