예제 #1
0
    def build_loss(self, *args, **kargs):
        if self.config.loss == "softmax_loss":
            self.loss, _ = point_wise_loss.softmax_loss(
                self.logits, self.gold_label, *args, **kargs)
        elif self.config.loss == "sparse_amsoftmax_loss":
            self.loss, _ = point_wise_loss.sparse_amsoftmax_loss(
                self.logits, self.gold_label, self.config, *args, **kargs)
        elif self.config.loss == "focal_loss_binary_v2":
            self.loss, _ = point_wise_loss.focal_loss_binary_v2(
                self.logits, self.gold_label, self.config, *args, **kargs)
        elif self.config.loss == "contrastive_loss":
            if self.config.metric in ["Hyperbolic"]:
                self.loss = pair_wise_loss.hyper_contrastive_loss(
                    self.dist, self.gold_label, self.config, is_quardic=True)
            elif self.config.metric in ["Euclidean", "Arccosine", "Cosine"]:
                self.loss = pair_wise_loss.contrastive_loss(self.dist,
                                                            self.gold_label,
                                                            self.config,
                                                            is_quardic=True)

        if self.config.get("weight_decay", None):
            model_vars = tf.trainable_variables()
            lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in model_vars \
                                if 'bias' not in v.name ])
            lossL2 *= self.config.weight_decay
            self.loss += lossL2
        if self.config.rnn == "universal_transformer":
            print("====encoder type====", self.config.rnn)
            self.loss += (self.act_loss / 2.0)
예제 #2
0
 def build_loss(self, *args, **kargs):
     if self.config.loss == "softmax_loss":
         self.loss, _ = point_wise_loss.softmax_loss(self.logits, self.gold_label, 
                                 *args, **kargs)
     elif self.config.loss == "sparse_amsoftmax_loss":
         self.loss, _ = point_wise_loss.sparse_amsoftmax_loss(self.logits, self.gold_label, 
                                     self.config, *args, **kargs)
     elif self.config.loss == "focal_loss_binary_v2":
         self.loss, _ = point_wise_loss.focal_loss_binary_v2(self.logits, self.gold_label, 
                                     self.config, *args, **kargs)
예제 #3
0
 def _get_loss(self, *args, **kargs):
     if self.config.loss == "softmax_loss":
         self.loss, _ = point_wise_loss.softmax_loss(self.logits, self.labels, 
                                 *args, **kargs)
     elif self.config.loss == "sparse_amsoftmax_loss":
         self.loss, _ = point_wise_loss.sparse_amsoftmax_loss(self.logits, self.labels, 
                                     self.config, *args, **kargs)
     elif self.config.loss == "focal_loss_binary_v2":
         self.loss, _ = point_wise_loss.focal_loss_binary_v2(self.logits, self.labels, 
                                     self.config, *args, **kargs)
     elif self.config.loss == "focal_loss_multi_v1":
         self.loss, _ = point_wise_loss.focal_loss_multi_v1(self.logits, self.labels, 
                                     self.config, *args, **kargs)
예제 #4
0
    def build_loss(self, *args, **kargs):
        with tf.device('/device:GPU:%s' % gpu_id):
            if self.args.loss == "softmax_loss":
                soft_loss, _ = point_wise_loss.softmax_loss(
                    self.estimation, self.target, *args, **kargs)
                self.loss = soft_loss
            elif self.args.loss == "sparse_amsoftmax_loss":
                soft_loss, _ = point_wise_loss.sparse_amsoftmax_loss(
                    self.estimation, self.target, *args, **kargs)
                self.loss = soft_loss
            elif self.args.loss == "focal_loss_binary_v2":

                soft_loss, _ = point_wise_loss.focal_loss_binary_v2(
                    self.estimation, self.target, *args, **kargs)
                self.loss = soft_loss
예제 #5
0
파일: drcn.py 프로젝트: yyht/simnet
    def build_loss(self, *args, **kargs):
        if self.config.loss == "softmax_loss":
            self.loss, _ = point_wise_loss.softmax_loss(
                self.logits, self.gold_label, *args, **kargs)
        elif self.config.loss == "sparse_amsoftmax_loss":
            self.loss, _ = point_wise_loss.sparse_amsoftmax_loss(
                self.logits, self.gold_label, self.config, *args, **kargs)
        elif self.config.loss == "focal_loss_binary_v2":
            self.loss, _ = point_wise_loss.focal_loss_binary_v2(
                self.logits, self.gold_label, self.config, *args, **kargs)

        if self.config.get("weight_decay", None):
            for var in set(tf.get_collection('reg_vars', self.scope)):
                weight_decay = tf.multiply(tf.nn.l2_loss(var),
                                           self.config.weight_decay,
                                           name="{}-wd".format('-'.join(
                                               str(var.op.name).split('/'))))
                self.loss += weight_decay
예제 #6
0
 def build_loss(self, *args, **kargs):
     if self.config.loss == "softmax_loss":
         self.loss, _ = point_wise_loss.softmax_loss(self.logits, self.gold_label, 
                                 *args, **kargs)
     elif self.config.loss == "sparse_amsoftmax_loss":
         self.loss, _ = point_wise_loss.sparse_amsoftmax_loss(self.logits, self.gold_label, 
                                     self.config, *args, **kargs)
     elif self.config.loss == "focal_loss_binary_v2":
         self.loss, _ = point_wise_loss.focal_loss_binary_v2(self.logits, self.gold_label, 
                                     self.config, *args, **kargs)
     if self.config.l2_loss:
         if self.config.sigmoid_growing_l2loss:
             weights_added = tf.add_n([tf.nn.l2_loss(tensor) for tensor in tf.trainable_variables() if tensor.name.endswith("weights:0") and not tensor.name.endswith("weighted_sum/weights:0") or tensor.name.endswith('kernel:0')])
             full_l2_step = tf.constant(self.config.weight_l2loss_step_full_reg , dtype=tf.int32, shape=[], name='full_l2reg_step')
             full_l2_ratio = tf.constant(self.config.l2_regularization_ratio , dtype=tf.float32, shape=[], name='l2_regularization_ratio')
             gs_flt = tf.cast(self.global_step , tf.float32)
             half_l2_step_flt = tf.cast(full_l2_step / 2 ,tf.float32)
             l2loss_ratio = tf.sigmoid( ((gs_flt - half_l2_step_flt) * 8) / half_l2_step_flt) * full_l2_ratio
         else:
             l2loss = tf.add_n([tf.nn.l2_loss(tensor) for tensor in tf.trainable_variables() if tensor.name.endswith("weights:0") or tensor.name.endswith('kernel:0')]) * tf.constant(config.l2_regularization_ratio , dtype='float', shape=[], name='l2_regularization_ratio')
         
         self.loss += l2loss