def make_loss(self, ll_constraint=True, ll_cutoff=0.004):
        """
        Construct the update of the adjacency matrix based on the meta gradients.

        Parameters
        ----------
        ll_constraint: bool
            Whether to enforce the unnoticeability constraint on the degree distribution.

        ll_cutoff: float
            Cutoff value for the unnoticeability constraint. Smaller means stricter constraint. 0.004 corresponds to a
            p-value of 0.95 in the Chi-square distribution with one degree of freedom.

        """
        with self.graph.as_default():

            logits_attack = tf.gather(self.logits_final, self.idx_attack)
            labels_atk = tf.gather(self.labels_onehot, self.idx_attack)
            attack_loss_per_node = tf.nn.softmax_cross_entropy_with_logits_v2(
                logits=logits_attack, labels=labels_atk)
            attack_loss = tf.reduce_mean(attack_loss_per_node)

            # Meta gradient computation.
            self.adjacency_meta_grad = tf.multiply(
                tf.gradients(attack_loss, self.adjacency_changes)[0],
                tf.reshape(self.modified_adjacency, [-1]) * -2 + 1,
                name="Meta_gradient")

            # Make sure that the minimum entry is 0.
            self.adjacency_meta_grad -= tf.reduce_min(self.adjacency_meta_grad)

            # Set entries to 0 that could lead to singleton nodes.
            singleton_mask = self.filter_potential_singletons()
            self.adjacency_meta_grad = tf.multiply(self.adjacency_meta_grad,
                                                   singleton_mask)

            if ll_constraint:
                print("Enforcing likelihood ratio constraint with cutoff {}".
                      format(ll_cutoff))
                allowed_mask, self.ll_ratio = self.log_likelihood_constraint(
                    ll_cutoff)

                # Set entries to 0 that would violate the log likelihood constraint.
                self.adjacency_meta_grad = tf.multiply(
                    self.adjacency_meta_grad, allowed_mask)

            # Get argmax of the meta gradients.
            adj_meta_grad_argmax = tf.argmax(self.adjacency_meta_grad)

            # Compute the index corresponding to the reverse direction of the edge (i.e. in the other triangle of the
            # matrix).
            adj_argmax_transpose_ix = utils.ravel_index(
                utils.unravel_index_tf(adj_meta_grad_argmax,
                                       [self.N, self.N])[::-1],
                [self.N, self.N])
            # Stack both indices to make sure our matrix remains symmetric.
            adj_argmax_combined = tf.stack(
                [adj_meta_grad_argmax, adj_argmax_transpose_ix],
                name="Meta_grad_argmax_combined")

            # Add the change to the perturbations.
            self.adjacency_meta_update = tf.scatter_add(
                self.adjacency_changes,
                indices=adj_argmax_combined,
                updates=-2 *
                tf.gather(tf.reshape(self.modified_adjacency, [-1]),
                          adj_argmax_combined) + 1)

            if self.attack_features:
                # Get meta gradients of the attributes.
                self.attribute_meta_grad = tf.multiply(
                    tf.gradients(attack_loss, self.attribute_changes)[0],
                    tf.reshape(self.attributes, [-1]) * -2 + 1)
                self.attribute_meta_grad -= tf.reduce_min(
                    self.attribute_meta_grad)

                attribute_meta_grad_argmax = tf.argmax(
                    self.attribute_meta_grad)

                self.attribute_meta_update = tf.scatter_add(
                    self.attribute_changes,
                    indices=attribute_meta_grad_argmax,
                    updates=-2 * tf.gather(tf.reshape(self.attributes, [-1]),
                                           attribute_meta_grad_argmax) + 1),

                adjacency_meta_grad_max = tf.reduce_max(
                    self.adjacency_meta_grad)
                attribute_meta_grad_max = tf.reduce_max(
                    self.attribute_meta_grad)

                # If the meta gradient of the structure meta gradient is larger, we perform a structure perturbation.
                # Otherwise, we change an attribute.
                cond = adjacency_meta_grad_max > attribute_meta_grad_max

                self.combined_update = tf.cond(
                    cond, lambda: self.adjacency_meta_update,
                    lambda: self.attribute_meta_update)
    def make_loss(self, ll_constraint=True, ll_cutoff=0.004):
        """
        Construct the update of the adjacency matrix based on the (approximate) meta gradients.

        Parameters
        ----------
        ll_constraint: bool
            Whether to enforce the unnoticeability constraint on the degree distribution.

        ll_cutoff: float
            Cutoff value for the unnoticeability constraint. Smaller means stricter constraint. 0.004 corresponds to a
            p-value of 0.95 in the Chi-square distribution with one degree of freedom.

        """

        with self.graph.as_default():

            logits_labeled = tf.gather(self.logits, self.idx_labeled)
            labels_train = tf.gather(self.labels_onehot, self.idx_labeled)
            logits_attack = tf.gather(self.logits, self.idx_unlabeled)
            labels_attack = tf.gather(self.labels_onehot, self.idx_attack)

            loss_labeled = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits_v2(
                    logits=logits_labeled, labels=labels_train))
            loss_attack = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits_v2(
                    logits=logits_attack, labels=labels_attack))

            if self.lambda_ == 1:
                attack_loss = loss_labeled
            elif self.lambda_ == 0:
                attack_loss = loss_attack
            else:
                attack_loss = self.lambda_ * loss_labeled + (
                    1 - self.lambda_) * loss_attack

            # This variable "stores" the gradients of every inner training step.
            self.grad_sum = tf.Variable(np.zeros(self.N * self.N),
                                        dtype=self.dtype)

            self.adjacency_grad = tf.multiply(
                tf.gradients(attack_loss, self.adjacency_changes)[0],
                tf.reshape(self.modified_adjacency, [-1]) * -2 + 1,
                name="Adj_gradient")
            # Add the current gradient to the sum.
            self.grad_sum_add = tf.assign_add(self.grad_sum,
                                              self.adjacency_grad)

            # Make sure that the minimum entry is 0.
            self.grad_sum_mod = self.grad_sum - tf.reduce_min(self.grad_sum)

            # Set entries to 0 that could lead to singleton nodes.
            singleton_mask = self.filter_potential_singletons()
            self.grad_sum_mod = tf.multiply(self.grad_sum_mod, singleton_mask)

            if ll_constraint:
                print("Enforcing likelihood ratio constraint with cutoff {}".
                      format(ll_cutoff))
                allowed_mask, self.ll_ratio = self.log_likelihood_constraint(
                    ll_cutoff)

                # Set entries to 0 that would violate the log likelihood constraint.
                self.grad_sum_mod = tf.multiply(self.grad_sum_mod,
                                                allowed_mask)

            # Get argmax of the approximate meta gradients.
            adj_meta_approx_argmax = tf.argmax(self.grad_sum_mod)

            # Compute the index corresponding to the reverse direction of the edge (i.e. in the other triangle of the
            # matrix).
            adj_argmax_transpose_ix = utils.ravel_index(
                utils.unravel_index_tf(adj_meta_approx_argmax,
                                       [self.N, self.N])[::-1],
                [self.N, self.N])
            # Stack both indices to make sure our matrix remains symmetric.
            adj_argmax_combined = tf.stack(
                [adj_meta_approx_argmax, adj_argmax_transpose_ix],
                name="Meta_approx_argmax_combined")

            # Add the change to the perturbations.
            self.adjacency_update = tf.scatter_add(
                self.adjacency_changes,
                indices=adj_argmax_combined,
                updates=-2 *
                tf.gather(tf.reshape(self.modified_adjacency, [-1]),
                          adj_argmax_combined) + 1)