Exemple #1
0
    def build_loss(self, *args, **kargs):
        if self.config.loss == "softmax_loss":
            self.loss, _ = point_wise_loss.softmax_loss(self.logits, self.gold_label, 
                                    *args, **kargs)
        elif self.config.loss == "sparse_amsoftmax_loss":
            self.loss, _ = point_wise_loss.sparse_amsoftmax_loss(self.logits, self.gold_label, 
                                        self.config, *args, **kargs)
        elif self.config.loss == "focal_loss_multi_v1":
            self.loss, _ = point_wise_loss.focal_loss_multi_v1(self.logits, self.gold_label, 
                                        self.config, *args, **kargs)
        if self.config.with_center_loss:
            self.center_loss, _ = point_wise_loss.center_loss_v2(self.sent_repres, 
                                            self.gold_label, centers=self.memory,
                                            config=self.config, 
                                            *args, **kargs)
            self.loss = self.loss + self.config.center_gamma * self.center_loss

        if self.config.get("mode", "train") == "train":
            if self.config.with_label_regularization:
                print("===with class regularization===")
                self.class_loss, _ = point_wise_loss.focal_loss_multi_v1(
                                        self.class_logits, self.gold_label, 
                                        self.config, *args, **kargs)
                self.loss += self.config.class_penalty * self.class_loss
        trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope) 
        print("List of Variables:")
        for v in trainable_vars:
            print(v.name)
Exemple #2
0
    def build_loss(self, *args, **kargs):
        if self.config.loss == "softmax_loss":
            self.loss, _ = point_wise_loss.softmax_loss(
                self.logits, self.gold_label, *args, **kargs)
        elif self.config.loss == "sparse_amsoftmax_loss":
            self.loss, _ = point_wise_loss.sparse_amsoftmax_loss(
                self.logits, self.gold_label, self.config, *args, **kargs)
        elif self.config.loss == "focal_loss_binary_v2":
            self.loss, _ = point_wise_loss.focal_loss_binary_v2(
                self.logits, self.gold_label, self.config, *args, **kargs)
        elif self.config.loss == "contrastive_loss":
            if self.config.metric in ["Hyperbolic"]:
                self.loss = pair_wise_loss.hyper_contrastive_loss(
                    self.dist, self.gold_label, self.config, is_quardic=True)
            elif self.config.metric in ["Euclidean", "Arccosine", "Cosine"]:
                self.loss = pair_wise_loss.contrastive_loss(self.dist,
                                                            self.gold_label,
                                                            self.config,
                                                            is_quardic=True)

        if self.config.get("weight_decay", None):
            model_vars = tf.trainable_variables()
            lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in model_vars \
                                if 'bias' not in v.name ])
            lossL2 *= self.config.weight_decay
            self.loss += lossL2
        if self.config.rnn == "universal_transformer":
            print("====encoder type====", self.config.rnn)
            self.loss += (self.act_loss / 2.0)
Exemple #3
0
 def build_loss(self, *args, **kargs):
     if self.config.loss == "softmax_loss":
         self.loss, _ = point_wise_loss.softmax_loss(self.logits, self.gold_label, 
                                 *args, **kargs)
     elif self.config.loss == "sparse_amsoftmax_loss":
         self.loss, _ = point_wise_loss.sparse_amsoftmax_loss(self.logits, self.gold_label, 
                                     self.config, *args, **kargs)
     elif self.config.loss == "focal_loss_binary_v2":
         self.loss, _ = point_wise_loss.focal_loss_binary_v2(self.logits, self.gold_label, 
                                     self.config, *args, **kargs)
Exemple #4
0
 def _get_loss(self, *args, **kargs):
     if self.config.loss == "softmax_loss":
         self.loss, _ = point_wise_loss.softmax_loss(self.logits, self.labels, 
                                 *args, **kargs)
     elif self.config.loss == "sparse_amsoftmax_loss":
         self.loss, _ = point_wise_loss.sparse_amsoftmax_loss(self.logits, self.labels, 
                                     self.config, *args, **kargs)
     elif self.config.loss == "focal_loss_binary_v2":
         self.loss, _ = point_wise_loss.focal_loss_binary_v2(self.logits, self.labels, 
                                     self.config, *args, **kargs)
     elif self.config.loss == "focal_loss_multi_v1":
         self.loss, _ = point_wise_loss.focal_loss_multi_v1(self.logits, self.labels, 
                                     self.config, *args, **kargs)
Exemple #5
0
 def build_loss(self, *args, **kargs):
     if self.config.loss == "softmax_loss":
         self.loss, _ = point_wise_loss.softmax_loss(
             self.logits, self.gold_label, *args, **kargs)
     elif self.config.loss == "sparse_amsoftmax_loss":
         self.loss, _ = point_wise_loss.sparse_amsoftmax_loss(
             self.logits, self.gold_label, self.config, *args, **kargs)
     elif self.config.loss == "focal_loss_multi_v1":
         self.loss, _ = point_wise_loss.focal_loss_multi_v1(
             self.logits, self.gold_label, self.config, *args, **kargs)
     if self.config.with_center_loss:
         self.center_loss, _ = point_wise_loss.center_loss_v2(
             self.sent_repres, self.gold_label, self.config, *args, **kargs)
         self.loss = self.loss + self.config.center_gamma * self.center_loss
    def build_loss(self, *args, **kargs):
        with tf.device('/device:GPU:%s' % gpu_id):
            if self.args.loss == "softmax_loss":
                soft_loss, _ = point_wise_loss.softmax_loss(
                    self.estimation, self.target, *args, **kargs)
                self.loss = soft_loss
            elif self.args.loss == "sparse_amsoftmax_loss":
                soft_loss, _ = point_wise_loss.sparse_amsoftmax_loss(
                    self.estimation, self.target, *args, **kargs)
                self.loss = soft_loss
            elif self.args.loss == "focal_loss_binary_v2":

                soft_loss, _ = point_wise_loss.focal_loss_binary_v2(
                    self.estimation, self.target, *args, **kargs)
                self.loss = soft_loss
Exemple #7
0
    def build_loss(self, *args, **kargs):
        if self.config.loss == "softmax_loss":
            self.loss, _ = point_wise_loss.softmax_loss(
                self.logits, self.gold_label, *args, **kargs)
        elif self.config.loss == "sparse_amsoftmax_loss":
            self.loss, _ = point_wise_loss.sparse_amsoftmax_loss(
                self.logits, self.gold_label, self.config, *args, **kargs)
        elif self.config.loss == "focal_loss_binary_v2":
            self.loss, _ = point_wise_loss.focal_loss_binary_v2(
                self.logits, self.gold_label, self.config, *args, **kargs)

        if self.config.get("weight_decay", None):
            for var in set(tf.get_collection('reg_vars', self.scope)):
                weight_decay = tf.multiply(tf.nn.l2_loss(var),
                                           self.config.weight_decay,
                                           name="{}-wd".format('-'.join(
                                               str(var.op.name).split('/'))))
                self.loss += weight_decay
Exemple #8
0
 def build_loss(self, *args, **kargs):
     if self.config.loss == "softmax_loss":
         self.loss, _ = point_wise_loss.softmax_loss(self.logits, self.gold_label, 
                                 *args, **kargs)
     elif self.config.loss == "sparse_amsoftmax_loss":
         self.loss, _ = point_wise_loss.sparse_amsoftmax_loss(self.logits, self.gold_label, 
                                     self.config, *args, **kargs)
     elif self.config.loss == "focal_loss_binary_v2":
         self.loss, _ = point_wise_loss.focal_loss_binary_v2(self.logits, self.gold_label, 
                                     self.config, *args, **kargs)
     if self.config.l2_loss:
         if self.config.sigmoid_growing_l2loss:
             weights_added = tf.add_n([tf.nn.l2_loss(tensor) for tensor in tf.trainable_variables() if tensor.name.endswith("weights:0") and not tensor.name.endswith("weighted_sum/weights:0") or tensor.name.endswith('kernel:0')])
             full_l2_step = tf.constant(self.config.weight_l2loss_step_full_reg , dtype=tf.int32, shape=[], name='full_l2reg_step')
             full_l2_ratio = tf.constant(self.config.l2_regularization_ratio , dtype=tf.float32, shape=[], name='l2_regularization_ratio')
             gs_flt = tf.cast(self.global_step , tf.float32)
             half_l2_step_flt = tf.cast(full_l2_step / 2 ,tf.float32)
             l2loss_ratio = tf.sigmoid( ((gs_flt - half_l2_step_flt) * 8) / half_l2_step_flt) * full_l2_ratio
         else:
             l2loss = tf.add_n([tf.nn.l2_loss(tensor) for tensor in tf.trainable_variables() if tensor.name.endswith("weights:0") or tensor.name.endswith('kernel:0')]) * tf.constant(config.l2_regularization_ratio , dtype='float', shape=[], name='l2_regularization_ratio')
         
         self.loss += l2loss