def virtual_adversarial_loss(self, x, adj, logit): adv_x = x + self.r_vadv logit_p = tf.stop_gradient(logit) logit_m = self.forward(adv_x, adj) loss = kl_divergence_with_logit(logit_p, logit_m) return loss
def virtual_adversarial_loss(self, x, adj, logit, epsilon): d = tf.random.normal(shape=tf.shape(x), dtype=self.floatx) r_vadv = get_normalized_vector(d) * epsilon logit_p = tf.stop_gradient(logit) logit_m = self.forward(x + r_vadv, adj) loss = kl_divergence_with_logit(logit_p, logit_m) return loss
def virtual_adversarial_loss(self, x, adj, logit, adv_mask): d = tf.random.normal(shape=tf.shape(x), dtype=self.floatx) for _ in range(self.n_power_iterations): d = get_normalized_vector(d) * self.xi logit_p = logit with tf.GradientTape() as tape: tape.watch(d) logit_m = self.forward(x + d, adj) dist = kl_divergence_with_logit(logit_p, logit_m, adv_mask) grad = tape.gradient(dist, d) d = tf.stop_gradient(grad) r_vadv = get_normalized_vector(d) * self.epsilon logit_p = tf.stop_gradient(logit) logit_m = self.forward(x + r_vadv, adj) loss = kl_divergence_with_logit(logit_p, logit_m, adv_mask) return tf.identity(loss)
def virtual_adversarial_loss(self, x, adj, logit, adv_mask): d = tf.random.normal(shape=tf.shape(x), dtype=self.floatx) model = self.model for _ in range(self.n_power_iterations): d = get_normalized_vector(d) * self.xi logit_p = logit with tf.GradientTape() as tape: tape.watch(d) logit_m = model([x + d, adj, self.index_all], training=True) dist = kl_divergence_with_logit(logit_p, logit_m, adv_mask) grad = tape.gradient(dist, d) d = tf.stop_gradient(grad) r_vadv = get_normalized_vector(d) * self.epsilon logit_p = tf.stop_gradient(logit) logit_m = model([x + r_vadv, adj, self.index_all]) loss = kl_divergence_with_logit(logit_p, logit_m, adv_mask) return loss