def __init__(self, opt):
     super(MultiPoseLoss, self).__init__()
     self.crit = FocalLoss()
     self.crit_hm_hp = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
     self.crit_kp = RegWeightedL1Loss() if not opt.dense_hp else \
                    torch.nn.L1Loss(reduction='sum')
     self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
                     RegLoss() if opt.reg_loss == 'sl1' else None
     self.opt = opt
Beispiel #2
0
 def __init__(self, opt):
     super(CtdetLoss, self).__init__()
     self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
     self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
         RegLoss() if opt.reg_loss == 'sl1' else None
     self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
         NormRegL1Loss() if opt.norm_wh else \
             RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
     self.opt = opt
Beispiel #3
0
    def __init__(self, opt):
        super(McMotLoss, self).__init__()

        self.opt = opt

        self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
        self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
            RegLoss() if opt.reg_loss == 'sl1' else None  # L1 loss or smooth l1 loss
        self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
            NormRegL1Loss() if opt.norm_wh else \
                RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg  # box size loss

        # @even: Test additional loss functions for re-id
        self.circle_loss = CircleLoss(m=0.25, gamma=80)
        self.ghm_c = GHMC(
        )  # GHM_C loss for multi-class classification(For ReID)

        if opt.id_weight > 0:
            self.emb_dim = opt.reid_dim

            # @even: 用nID_dict取代nID, 用于MCMOT(multi-class multi-object tracking)训练
            self.nID_dict = opt.nID_dict

            # 包含可学习参数的层: 用于Re-ID的全连接层
            # @even: 为每个需要ReID的类别定义一个分类器
            self.classifiers = nn.ModuleDict(
            )  # 使用ModuleList或ModuleDict才可以自动注册参数
            self.focal_loss_dict = nn.ModuleDict()
            for cls_id, nID in self.nID_dict.items():
                # 选择一: 使用普通的全连接层
                self.classifiers[str(cls_id)] = nn.Linear(self.emb_dim,
                                                          nID)  # FC layers

                # 选择二: 使用Arc margin全连接层
                # self.classifiers[str(cls_id)] = ArcMarginFc(in_features=self.emb_dim,
                #                                             out_features=nID,
                #                                             device=self.opt.device,
                #                                             m=0.4)

                # 选择三: 使用Focal loss
                # self.focal_loss_dict[str(cls_id)] = McFocalLoss(nID, self.opt.device)

            # using CE loss to do ReID classification
            self.ce_loss = nn.CrossEntropyLoss(ignore_index=-1)
            # self.TriLoss = TripletLoss()

            # @even: 为每个需要ReID的类别定义一个embedding scale
            self.emb_scale_dict = dict()
            for cls_id, nID in self.nID_dict.items():
                self.emb_scale_dict[cls_id] = math.sqrt(2) * math.log(nID - 1)

            # track reid分类的损失缩放系数
            self.s_id = nn.Parameter(-1.05 * torch.ones(1))  # -1.05

        # scale factor of detection loss
        self.s_det = nn.Parameter(-1.85 * torch.ones(1))
Beispiel #4
0
 def __init__(self, stages=2, use_gt=0) -> None:
     super(SQGARelMod, self).__init__()
     self.rel = CmpRelMod(in_channels=256,
                          feat_dim=256,
                          groups=1,
                          stages=stages,
                          use_gt=bool(use_gt))
     self.pre_cls = nn.Sequential(
         nn.Linear(256, 256),
         nn.ReLU(),
         nn.Linear(256, 1),
     )
     nn.init.normal_(self.pre_cls[0].weight, std=0.1)
     nn.init.normal_(self.pre_cls[2].weight, std=0.1)
     self.binary_loss = FocalLoss()
Beispiel #5
0
    def __init__(self, opt):
        super(McMotLoss, self).__init__()

        self.opt = opt

        self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
        self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
            RegLoss() if opt.reg_loss == 'sl1' else None  # L1 loss or smooth l1 loss
        self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
            NormRegL1Loss() if opt.norm_wh else \
                RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg  # box size loss
        self.circle_loss = CircleLoss(m=0.25, gamma=80)

        self.emb_dim = opt.reid_dim

        # @even: 用nID_dict取代nID, 用于MCMOT(multi-class multi-object tracking)训练
        self.nID_dict = opt.nID_dict

        # 包含可学习参数的层: 用于Re-ID的全连接层
        # @even: 为每个需要ReID的类别定义一个分类器
        self.classifiers = nn.ModuleDict()  # 使用ModuleList或ModuleDict才可以自动注册参数
        for cls_id, nID in self.nID_dict.items():
            self.classifiers[str(cls_id)] = nn.Linear(self.emb_dim,
                                                      nID)  # 全连接层
            # self.classifiers[str(cls_id)] = ArcMarginFc(in_features=self.emb_dim,  # 使用Arc margin全连接层
            #                                             out_features=nID,
            #                                             device=self.opt.device,
            #                                             m=0.4)

        self.IDLoss = nn.CrossEntropyLoss(
            ignore_index=-1)  # 不同的track id分类用交叉熵损失
        # self.TriLoss = TripletLoss()

        # 定义每个(y, x)位置的分类损失
        self.ClsLoss = nn.CrossEntropyLoss()  # 对feature map上每个位置(y, x)检测类别分类

        # @even: 为每个需要ReID的类别定义一个embedding scale
        self.emb_scale_dict = dict()
        for cls_id, nID in self.nID_dict.items():
            self.emb_scale_dict[cls_id] = math.sqrt(2) * math.log(nID - 1)

        self.s_det = nn.Parameter(-1.85 * torch.ones(1))  # 检测的损失缩放系数
        self.s_id = nn.Parameter(-1.05 * torch.ones(1))  # track id分类的损失缩放系数
Beispiel #6
0
    def __init__(self, opt):
        super(MotLoss, self).__init__()
        self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
        self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
            RegLoss() if opt.reg_loss == 'sl1' else None
        self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
            NormRegL1Loss() if opt.norm_wh else \
                RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg

        # classification (re-id)
        self.opt = opt
        self.emb_dim = opt.reid_dim
        self.nID = opt.nID
        self.classifier = nn.Linear(self.emb_dim, self.nID)
        self.IDLoss = nn.CrossEntropyLoss(ignore_index=-1)
        # self.TriLoss = TripletLoss()
        self.emb_scale = math.sqrt(2) * math.log(self.nID - 1)
        self.s_det = nn.Parameter(-1.85 * torch.ones(1))
        self.s_id = nn.Parameter(-1.05 * torch.ones(1))
Beispiel #7
0
    def __init__(self, opt):
        super(MotLoss, self).__init__()
        self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
        self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
            RegLoss() if opt.reg_loss == 'sl1' else None  # L1 loss or smooth l1 loss
        self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
            NormRegL1Loss() if opt.norm_wh else \
                RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg  # box size loss

        self.opt = opt
        self.emb_dim = opt.reid_dim
        self.nID = opt.nID

        # 唯一包含可学习参数的层: 用于Re-ID的全连接层
        self.classifier = nn.Linear(self.emb_dim, self.nID)
        self.IDLoss = nn.CrossEntropyLoss(ignore_index=-1)
        # self.TriLoss = TripletLoss()

        self.emb_scale = math.sqrt(2) * math.log(self.nID - 1)
        self.s_det = nn.Parameter(-1.85 * torch.ones(1))  # 检测的损失缩放系数
        self.s_id = nn.Parameter(-1.05 * torch.ones(1))  # track id分类的损失缩放系数
Beispiel #8
0
 def __init__(self, opt):
     super(DddLoss, self).__init__()
     self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
     self.crit_reg = L1Loss()
     self.crit_rot = BinRotLoss()
     self.opt = opt