def __init__(self, opt):
     super(CenterLandmarkLoss, self).__init__()
     self.crit = FocalLoss()
     self.crit_hm_hp = nn.MSELoss() if opt.mse_loss else FocalLoss()
     self.crit_kp = RegWeightedL1Loss() if not opt.dense_hp else nn.L1Loss(
         reduction='sum')  #####Why sum????
     self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else RegLoss(
     ) if opt.reg_loss == 'sl1' else None
     self.opt = opt
예제 #2
0
 def __init__(self, opt):
     super(MultiPoseLoss, self).__init__()
     self.crit = FocalLoss()
     self.crit_hm_hp = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
     self.crit_kp = RegWeightedL1Loss() if not opt.dense_hp else \
                    torch.nn.L1Loss(reduction='sum')
     self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
                     RegLoss() if opt.reg_loss == 'sl1' else None
     self.opt = opt
예제 #3
0
 def __init__(self, opt):
     super(CtdetLoss, self).__init__()
     self.crit = MSELoss() if opt.mse_loss else FocalLoss()
     self.crit_obj = FocalLoss()
     self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
               RegLoss() if opt.reg_loss == 'sl1' else None
     self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
               NormRegL1Loss() if opt.norm_wh else \
               RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
     self.opt = opt
예제 #4
0
 def __init__(self, cfg, local_rank):
     super(MultiPoseLoss, self).__init__()
     self.crit = FocalLoss()
     self.crit_hm_hp = torch.nn.MSELoss() if cfg.LOSS.MSE_LOSS else FocalLoss()
     self.crit_kp = RegWeightedL1Loss() if not cfg.LOSS.DENSE_HP else \
                    torch.nn.L1Loss(reduction='sum')
     self.crit_reg = RegL1Loss() if cfg.LOSS.REG_LOSS == 'l1' else \
                     RegLoss() if cfg.LOSS.REG_LOSS == 'sl1' else None                       
     self.cfg = cfg
     self.local_rank = local_rank
예제 #5
0
 def __init__(self, opt):
     super(CtdetLoss, self).__init__()
     self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
     self.crit_centerness = FocalLoss()
     self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
       RegLoss() if opt.reg_loss == 'sl1' else None
     self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
       NormRegL1Loss() if opt.norm_wh else \
         RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
     self.crit_scale = torch.nn.SmoothL1Loss(size_average=False)
     self.opt = opt
예제 #6
0
 def __init__(self, opt):
   super(MultiPoseLoss, self).__init__()
   self.crit = FocalLoss()
   self.crit_hm_hp = FocalLoss()
   if opt.mdn:
     self.crit_kp = th_mdn_loss_dense if opt.dense_hp else \
                     th_mdn_loss_ind
   else:
     self.crit_kp = torch.nn.L1Loss(reduction='sum')  if opt.dense_hp else \
                     RegWeightedL1Loss()
   self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
                     RegLoss() if opt.reg_loss == 'sl1' else None
   self.opt = opt
예제 #7
0
    def __init__(self, opt):
        super(MultiPoseLoss, self).__init__()
        self.crit = FocalLoss()
        self.crit_hm_hp = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
        self.crit_kp = RegWeightedL1Loss() if not opt.dense_hp else \
                       torch.nn.L1Loss(reduction='sum')
        self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
                        RegLoss() if opt.reg_loss == 'sl1' else None

        ##changed
        self.crit_view_side = CrossEntropyLossWMask()
        self.crit_view_front_rear = CrossEntropyLossWMask()

        self.opt = opt
예제 #8
0
    def __init__(self, opt):
        super(MultiKPSLoss, self).__init__()
        self.crit = FocalLoss()  #中心点hp
        self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
            RegLoss() if opt.reg_loss == 'sl1' else None  #中心点回归
        self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
            NormRegL1Loss() if opt.norm_wh else \
            RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg  #宽高回归
        self.crit_hm_hp = torch.nn.MSELoss() if opt.mse_loss else FocalLoss(
        )  #关键点hp
        self.crit_kp = RegWeightedL1Loss() if not opt.dense_hp else \
            torch.nn.L1Loss(reduction='sum')                                   #关键点回归

        self.opt = opt
예제 #9
0
    def __init__(self, opt):
        super(MotLoss, self).__init__()
        self.crit = paddle.nn.MSELoss() if opt.mse_loss else FocalLoss()
        self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
            RegLoss() if opt.reg_loss == 'sl1' else None
        self.crit_wh = paddle.nn.L1Loss(reduction='sum') if opt.dense_wh else \
            NormRegL1Loss() if opt.norm_wh else \
                RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
        self.opt = opt
        self.emb_dim = opt.reid_dim
        self.nID = opt.nID

        # param_attr = paddle.ParamAttr(initializer=KaimingUniform())
        # bound = 1 / math.sqrt(self.emb_dim)
        # bias_attr = paddle.ParamAttr(initializer=Uniform(-bound, bound))
        # self.classifier = nn.Linear(self.emb_dim, self.nID, weight_attr=param_attr, bias_attr=bias_attr)
        self.classifier = nn.Linear(self.emb_dim, self.nID, bias_attr=True)
        if opt.id_loss == 'focal': # 一般用不到
            # torch.nn.init.normal_(self.classifier.weight, std=0.01)
            prior_prob = 0.01
            bias_value = -math.log((1 - prior_prob) / prior_prob)
            # torch.nn.init.constant_(self.classifier.bias, bias_value)

            weight_attr = paddle.framework.ParamAttr(initializer=nn.initializer.Normal(std=0.01))
            bias_attr = paddle.framework.ParamAttr(initializer=nn.initializer.Constant(bias_value))
            self.classifier = nn.Linear(self.emb_dim, self.nID, weight_attr=weight_attr, bias_attr=bias_attr)
        self.IDLoss = nn.CrossEntropyLoss(ignore_index=-1)
        self.emb_scale = math.sqrt(2) * math.log(self.nID - 1)
        # self.s_det = nn.Parameter(-1.85 * torch.ones(1))
        # self.s_id = nn.Parameter(-1.05 * torch.ones(1))
        self.s_det = paddle.create_parameter([1], dtype='float32', default_initializer = nn.initializer.Constant(value=-1.85))
        self.s_id = paddle.create_parameter([1], dtype='float32', default_initializer = nn.initializer.Constant(value=-1.05))
예제 #10
0
 def __init__(self, opt):
     super(DddLoss, self).__init__()
     self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
     self.crit_reg = Reg3dLoss_IOU()
     # self.crit_reg = L1Loss()
     # self.crit_rot = BinRotLoss()
     self.opt = opt
예제 #11
0
    def __init__(self, opt):
        super(CtdetLoss, self).__init__()
        self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
        self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
                  RegLoss() if opt.reg_loss == 'sl1' else None

        self.opt = opt
예제 #12
0
파일: ddd.py 프로젝트: Godaddy-xie/UM3D_TUM
 def __init__(self, opt):
     super(DddLoss, self).__init__()
     self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
     self.crit_reg = L1Loss()
     self.crit_rot = BinRotLoss()
     self.dept_reg = ConfidenceLoss()
     self.opt = opt
 def __init__(self, backbone='res50', num_classes=2, num_refining=1):
     super(STELA, self).__init__()
     self.anchor_generator = Anchors()
     self.num_anchors = self.anchor_generator.num_anchors
     self.init_backbone(backbone)
     self.fpn = FPN(in_channels_list=self.fpn_in_channels,
                    out_channels=256,
                    top_blocks=LastLevelP6P7(self.fpn_in_channels[-1], 256))
     self.cls_head = CLSHead(in_channels=256,
                             feat_channels=256,
                             num_stacked=1,
                             num_anchors=self.num_anchors,
                             num_classes=num_classes)
     self.reg_head = REGHead(in_channels=256,
                             feat_channels=256,
                             num_stacked=1,
                             num_anchors=self.num_anchors,
                             num_regress=5)
     self.num_refining = num_refining
     if self.num_refining > 0:
         self.ref_heads = nn.ModuleList([
             REGHead(in_channels=256,
                     feat_channels=256,
                     num_stacked=1,
                     num_anchors=self.num_anchors,
                     num_regress=5) for _ in range(self.num_refining)
         ])
         self.loss_ref = RegressLoss(func='smooth')
     self.loss_cls = FocalLoss()
     self.loss_reg = RegressLoss(func='smooth')
     self.box_coder = BoxCoder()
예제 #14
0
 def __init__(self, opt):
     super().__init__()
     self.hau = hau_iou.WeightedHausdorffDistance(
         [x // opt.down_ratio for x in (opt.input_h, opt.input_w)])
     self.crit = FocalLoss()
     self.iou = hau_iou.bounded_iou_loss
     self.opt = opt
예제 #15
0
파일: mot.py 프로젝트: jinyeom/FairMOT
 def __init__(self, opt):
     super(MotLoss, self).__init__()
     self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
     self.crit_reg = (
         RegL1Loss()
         if opt.reg_loss == "l1"
         else RegLoss()
         if opt.reg_loss == "sl1"
         else None
     )
     self.crit_wh = (
         torch.nn.L1Loss(reduction="sum")
         if opt.dense_wh
         else NormRegL1Loss()
         if opt.norm_wh
         else RegWeightedL1Loss()
         if opt.cat_spec_wh
         else self.crit_reg
     )
     self.opt = opt
     self.emb_dim = opt.reid_dim
     self.nID = opt.nID
     self.classifier = nn.Linear(self.emb_dim, self.nID)
     self.IDLoss = nn.CrossEntropyLoss(ignore_index=-1)
     # self.TriLoss = TripletLoss()
     self.emb_scale = math.sqrt(2) * math.log(self.nID - 1)
     self.s_det = nn.Parameter(-1.85 * torch.ones(1))
     self.s_id = nn.Parameter(-1.05 * torch.ones(1))
예제 #16
0
 def __init__(self, opt):
     super(Det3dLoss, self).__init__()
     print("Using MSE loss for clasification = ", opt.mse_loss)
     self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
     self.crit_view_side = CrossEntropyLossWMask()
     self.crit_view_front_rear = CrossEntropyLossWMask()
     self.crit_reg = L1Loss()
     self.opt = opt
예제 #17
0
 def __init__(self, opt):
     super(CtsegLoss, self).__init__()
     self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
     self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
         RegLoss() if opt.reg_loss == 'sl1' else None
     self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
         NormRegL1Loss() if opt.norm_wh else \
         RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
     self.crit_mask = DiceLoss(opt.seg_feat_channel)
     self.opt = opt
예제 #18
0
    def __init__(self, opt):
        super(DddLoss, self).__init__()
        self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
        self.crit_reg = L1Loss()
        self.crit_rot = BinRotLoss()

        self.depth_reg = L2Loss()
        self.dim_reg = L2Loss()
        self.vec_reg = L1Loss_ver()
        self.opt = opt
예제 #19
0
 def __init__(self, opt):
     super(CtdetLoss_doublehm, self).__init__()
     self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
     self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
         RegLoss() if opt.reg_loss == 'sl1' else None
     self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
         NormRegL1Loss() if opt.norm_wh else \
             RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
     self.crit_reconstruct_img = torch.nn.L1Loss()
     self.opt = opt
     self.test_reconstruct_conflict_with_class = True
예제 #20
0
    def __init__(self, opt):
        super(DddLoss, self).__init__()
        # 定义损失函数
        self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()

        # L1损失函数
        self.crit_reg = L1Loss()

        # 混合连续-离散损失:multiBin损失
        self.crit_rot = BinRotLoss()
        self.opt = opt
예제 #21
0
    def __init__(self, opt):
        super().__init__()
        self.alpha = 0.54
        self.beta = 0.54
        self.wh_gaussian = True
        self.sample = 0
        self.crit = FocalLoss()
        # self.wh_crit = IOULoss('diou')
        self.wh_crit = L1Loss()

        self.opt = opt
예제 #22
0
 def __init__(self, opt):
     super(MotLoss, self).__init__()
     self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
     self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
         RegLoss() if opt.reg_loss == 'sl1' else None
     self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
         NormRegL1Loss() if opt.norm_wh else \
             RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
     self.crit_density_focal = FocalLoss()
     self.crit_density_ssim = NORMMSSSIM()
     self.crit_count = torch.nn.MSELoss()
     self.opt = opt
     self.emb_dim = opt.reid_dim
     self.nID = opt.nID
     self.classifier = nn.Linear(self.emb_dim, self.nID)
     self.IDLoss = nn.CrossEntropyLoss(ignore_index=-1)
     self.emb_scale = math.sqrt(2) * math.log(self.nID -
                                              1) if self.nID != 1 else 0
     self.s_det = nn.Parameter(-1.85 * torch.ones(1))
     self.s_id = nn.Parameter(-1.05 * torch.ones(1))
예제 #23
0
파일: rodet.py 프로젝트: dinglinghu/DRN2020
 def __init__(self, opt):
     super(RodetLoss, self).__init__()
     self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
     self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
               RegLoss() if opt.reg_loss == 'sl1' else None
     self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
               NormRegL1Loss() if opt.norm_wh else \
               RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
     self.crit_angle = BinRotLoss() if opt.rotate_binloss else RegMSELoss()
     self.crit_dense_angle = DenseRegL1Loss() if opt.dense_angle else None
     self.opt = opt
예제 #24
0
 def __init__(self, opt):
     super(DetLoss, self).__init__()
     self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
     self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
         RegLoss() if opt.reg_loss == 'sl1' else None
     self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
         NormRegL1Loss() if opt.norm_wh else \
             RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
     self.opt = opt
     #self.TriLoss = TripletLoss()
     self.s_det = nn.Parameter(-1.85 * torch.ones(1))
예제 #25
0
 def __init__(self, opt):
     super(CtdetLossSpotNetVid, self).__init__()
     self.crit_seg = torch.nn.BCEWithLogitsLoss(
     )  # torch.nn.MSELoss()  # hughes
     self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
     self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
               RegLoss() if opt.reg_loss == 'sl1' else None
     self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
               NormRegL1Loss() if opt.norm_wh else \
               RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
     self.opt = opt
예제 #26
0
파일: fvdet.py 프로젝트: zhangyin497/FVNet
 def __init__(self, opt):
     super(FvdetLoss, self).__init__()
     self.crit = FocalLoss()
     #self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
     #RegLoss() if opt.reg_loss == 'sl1' else None
     self.crit_reg = RegL1Loss()
     #self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
     #NormRegL1Loss() if opt.norm_wh else \
     #RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
     self.crit_dep_iou = RegL1LossDepIOU()
     self.opt = opt
예제 #27
0
 def __init__(self, opt):
   super(CtdetLoss, self).__init__()
   ###均方误差,l2 loss
   self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
   ### 定义了几种回归损失的形式
   self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
             RegLoss() if opt.reg_loss == 'sl1' else None
   ### 定义了几种wh损失函数的形式
   self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
             NormRegL1Loss() if opt.norm_wh else \
             RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
   self.opt = opt
예제 #28
0
 def __init__(self, opt):
     super(GCtdetLoss, self).__init__()
     self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
     # self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
     #           RegLoss() if opt.reg_loss == 'sl1' else None
     # print("INIT")
     self.crit_reg = NLL()
     self.crit_wh = NLL()
     # self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
     #           NormRegL1Loss() if opt.norm_wh else \
     #           RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
     self.opt = opt
예제 #29
0
    def __init__(self, opt):
        super(CircleLoss, self).__init__()
        # if opt.mask_focal_loss:
        self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
        # else:
        #   self.crit = FocalLoss_mask()

        self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
                  RegLoss() if opt.reg_loss == 'sl1' else None
        self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
                  NormRegL1Loss() if opt.norm_wh else \
                  RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
        self.opt = opt
예제 #30
0
 def __init__(self, opt):
     super(HoidetLoss, self).__init__()
     self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
     self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
         RegLoss() if opt.reg_loss == 'sl1' else None
     self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
         NormRegL1Loss() if opt.norm_wh else \
             RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
     if opt.hard_negative:
         self.crit_h = FocalLossHardNeg(7)
     if opt.hm_rel_dcn25_i_casc_match or opt.hm_rel_dcn25_i_match:
         self.crit_reg_offset = RegL1LossOffset()
         self.bce = torch.nn.BCELoss()
     self.opt = opt