Beispiel #1
0
    def build_loss(self):
        self.wd = self.config["weight_decay"]

        self.loss = 0
        for var in set(tf.get_collection('reg_vars', self.scope)):
            weight_decay = tf.multiply(tf.nn.l2_loss(var),
                                       self.wd,
                                       name="{}-wd".format('-'.join(
                                           str(var.op.name).split('/'))))
            self.loss += weight_decay

        with tf.name_scope("loss"):
            if self.config["loss_type"] == "cross_entropy":
                self.loss += tf.reduce_mean(
                    tf.nn.sparse_softmax_cross_entropy_with_logits(
                        logits=self.estimation, labels=self.gold_label))
            elif self.config["loss_type"] == "focal_loss":
                one_hot_target = tf.one_hot(self.gold_label, 2)
                print(one_hot_target.get_shape())
                self.loss += focal_loss(self.estimation, one_hot_target)

        tf.add_to_collection('ema/scalar', self.loss)
        print("List of Variables:")
        for v in tf.trainable_variables():
            print(v.name)
    def build_loss(self):

        with tf.name_scope("loss"):
            if self.config["loss_type"] == "cross_entropy":
                self.loss = tf.add(
                    tf.reduce_mean(
                        tf.nn.sparse_softmax_cross_entropy_with_logits(
                            logits=self.estimation, labels=self.gold_label)),
                    tf.reduce_sum(
                        tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)),
                    name="loss")

            elif self.config["loss_type"] == "focal_loss":
                one_hot_target = tf.one_hot(self.gold_label, 2)
                print(one_hot_target.get_shape())
                self.loss = focal_loss(self.estimation, one_hot_target)

            elif self.config["loss_type"] == "contrastive_loss":
                if self.config["distance_metric"] == "l1_similarity":
                    self.loss = tf.add(
                        tf.reduce_mean(
                            tf.losses.softmax_cross_entropy(
                                self.one_hot_label, self.pred_probs)),
                        tf.reduce_sum(
                            tf.get_collection(
                                tf.GraphKeys.REGULARIZATION_LOSSES)),
                        name="loss")

                elif self.config["distance_metric"] == "cosine_constrastive":
                    self.loss = tf.add(
                        tf.reduce_mean(self.contrastive_distance),
                        tf.reduce_sum(
                            tf.get_collection(
                                tf.GraphKeys.REGULARIZATION_LOSSES)),
                        name="loss")

                elif self.config["distance_metric"] == "l2_similarity":
                    self.loss = tf.add(
                        tf.reduce_mean(self.contrastive_distance),
                        tf.reduce_sum(
                            tf.get_collection(
                                tf.GraphKeys.REGULARIZATION_LOSSES)),
                        name="loss")

        tf.add_to_collection('ema/scalar', self.loss)
        print("List of Variables:")
        for v in tf.trainable_variables():
            print(v.name)
Beispiel #3
0
    def build_loss(self):
        self.wd = self.config.get("l2_reg", None)

        with tf.name_scope("loss"):
            if self.config["loss_type"] == "cross_entropy":
                self.loss = tf.reduce_mean(
                    tf.nn.sparse_softmax_cross_entropy_with_logits(
                        logits=self.estimation, labels=self.gold_label))

            elif self.config["loss_type"] == "focal_loss":
                one_hot_target = tf.one_hot(self.gold_label, 2)
                print(one_hot_target.get_shape())
                self.loss = focal_loss(self.estimation, one_hot_target)

            elif self.config["loss_type"] == "contrastive_loss":
                if self.config["distance_metric"] == "l1_similarity":
                    self.loss = tf.reduce_mean(
                        tf.losses.softmax_cross_entropy(
                            self.one_hot_label, self.pred_probs))

                elif self.config["distance_metric"] == "cosine_constrastive":
                    self.loss = tf.reduce_mean(self.contrastive_distance)

                elif self.config["distance_metric"] == "l2_similarity":
                    self.loss = tf.reduce_mean(self.contrastive_distance)

            wd_loss = 0
            print("------------wd--------------", self.wd)
            if self.wd is not None:
                for var in set(tf.get_collection('reg_vars', self.scope)):
                    weight_decay = tf.multiply(
                        tf.nn.l2_loss(var),
                        self.wd,
                        name="{}-wd".format('-'.join(
                            str(var.op.name).split('/'))))
                    wd_loss += weight_decay
                print("---------using l2 regualarization------------")
            self.loss += wd_loss

        tf.add_to_collection('ema/scalar', self.loss)
        print("List of Variables:")
        for v in tf.trainable_variables():
            print(v.name)