def training_step(self, sess, batch_index, piano, orch, mask_orch, summarize_dict): feed_dict, orch_t = Standard_trainer.build_feed_dict(self, batch_index, piano, orch, duration_piano, mask_orch) feed_dict[self.keras_learning_phase] = True # Generate a mask for the input batch_size, orch_dim = orch_t.shape mask = np.zeros_like(orch_t) for batch_ind in range(batch_size): # Number of known units d = random.randint(0, orch_dim) # Indices ind = np.random.random_integers(0, orch_dim-1, (d,)) mask[batch_ind, ind] = 1 feed_dict[self.mask_input] = mask # No need to mask orch_t here, its done in the tensorflow graph feed_dict[self.orch_pred] = orch_t SUMMARIZE = summarize_dict['bool'] merged_node = summarize_dict['merged_node'] if SUMMARIZE: _, loss_batch, preds_batch, sparse_loss_batch, summary = sess.run([self.train_step, self.loss, self.preds, self.sparse_loss_mean, merged_node], feed_dict) else: _, loss_batch, preds_batch, sparse_loss_batch = sess.run([self.train_step, self.loss, self.preds, self.sparse_loss_mean], feed_dict) summary = None debug_outputs = [sparse_loss_batch] return loss_batch, preds_batch, debug_outputs, summary
def valid_step(self, sess, batch_index, piano, orch, mask_orch, PLOTING_FOLDER): batch_index = batch_index[::20] feed_dict, orch_t = Standard_trainer.build_feed_dict( self, batch_index, piano, orch, duration_piano, mask_orch) loss_batch, preds_batch = self.generate_mean_ordering( sess, feed_dict, orch_t, PLOTING_FOLDER) return loss_batch, preds_batch, orch_t
def training_step(self, sess, batch_index, piano, orch, mask_orch, summarize_dict): feed_dict, orch_t = Standard_trainer.build_feed_dict( self, batch_index, piano, orch, duration_piano, mask_orch) feed_dict[self.keras_learning_phase] = True # Generate a mask for the input batch_size, orch_dim = orch_t.shape mask = np.zeros_like(orch_t) for batch_ind in range(batch_size): # Number of known units d = random.randint(0, orch_dim) # Indices ind = np.random.random_integers(0, orch_dim - 1, (d, )) mask[batch_ind, ind] = 1 ############################################# ############################################# ############################################# # import pdb; pdb.set_trace() # # Compute test Jacobian, to check that gradients are set to zero : Test passed ! # mask_deb = np.zeros_like(orch_t) # mask_deb[:,:20] = 1 # feed_dict[self.mask_input] = mask_deb # feed_dict[self.orch_pred] = orch_t # for trainable_parameter in tf.trainable_variables(): # if trainable_parameter.name == "dense_3/bias:0": # AAA = trainable_parameter # grads = tf.gradients(self.loss, AAA) # loss_batch, dydx = sess.run([self.loss, grads], feed_dict) ############################################# ############################################# ############################################# feed_dict[self.mask_input] = mask # No need to mask orch_t here, its done in the tensorflow graph feed_dict[self.orch_pred] = orch_t SUMMARIZE = summarize_dict['bool'] merged_node = summarize_dict['merged_node'] if SUMMARIZE: _, loss_batch, preds_batch, sparse_loss_batch, summary = sess.run([ self.train_step, self.loss, self.preds, self.sparse_loss_mean, merged_node ], feed_dict) else: _, loss_batch, preds_batch, sparse_loss_batch = sess.run([ self.train_step, self.loss, self.preds, self.sparse_loss_mean ], feed_dict) summary = None debug_outputs = [sparse_loss_batch] return loss_batch, preds_batch, debug_outputs, summary