Ejemplo n.º 1
0
 def __init__(self,
              ch_in,
              ch_out,
              kernel_size,
              stride=1,
              padding=0,
              dilation=1,
              groups=1,
              bias=False):
     super(ConvLayer, self).__init__()
     bias_attr = False
     fan_in = ch_in * kernel_size**2
     bound = 1 / math.sqrt(fan_in)
     param_attr = paddle.ParamAttr(initializer=KaimingUniform())
     if bias:
         bias_attr = paddle.ParamAttr(
             initializer=nn.initializer.Uniform(-bound, bound))
     self.conv = nn.Conv2D(in_channels=ch_in,
                           out_channels=ch_out,
                           kernel_size=kernel_size,
                           stride=stride,
                           padding=padding,
                           dilation=dilation,
                           groups=groups,
                           weight_attr=param_attr,
                           bias_attr=bias_attr)
Ejemplo n.º 2
0
    def __init__(self, ch_ins, ch_out, up_strides, dcn_v2=True):
        super(IDAUp, self).__init__()
        for i in range(1, len(ch_ins)):
            ch_in = ch_ins[i]
            up_s = int(up_strides[i])
            proj = nn.Sequential(
                ConvNormLayer(ch_in,
                              ch_out,
                              filter_size=3,
                              stride=1,
                              use_dcn=dcn_v2,
                              bias_on=dcn_v2,
                              norm_decay=None,
                              dcn_lr_scale=1.,
                              dcn_regularizer=None), nn.ReLU())
            node = nn.Sequential(
                ConvNormLayer(ch_out,
                              ch_out,
                              filter_size=3,
                              stride=1,
                              use_dcn=dcn_v2,
                              bias_on=dcn_v2,
                              norm_decay=None,
                              dcn_lr_scale=1.,
                              dcn_regularizer=None), nn.ReLU())

            param_attr = paddle.ParamAttr(initializer=KaimingUniform())
            up = nn.Conv2DTranspose(ch_out,
                                    ch_out,
                                    kernel_size=up_s * 2,
                                    weight_attr=param_attr,
                                    stride=up_s,
                                    padding=up_s // 2,
                                    groups=ch_out,
                                    bias_attr=False)
            # TODO: uncomment fill_up_weights
            #fill_up_weights(up)
            setattr(self, 'proj_' + str(i), proj)
            setattr(self, 'up_' + str(i), up)
            setattr(self, 'node_' + str(i), node)
Ejemplo n.º 3
0
    def __init__(self,
                 in_channels,
                 ch_head=256,
                 ch_emb=128,
                 num_classes=1,
                 num_identities_dict={0: 14455}):
        super(FairMOTEmbeddingHead, self).__init__()
        assert num_classes >= 1
        self.num_classes = num_classes
        self.ch_emb = ch_emb
        self.num_identities_dict = num_identities_dict
        self.reid = nn.Sequential(
            ConvLayer(
                in_channels, ch_head, kernel_size=3, padding=1, bias=True),
            nn.ReLU(),
            ConvLayer(
                ch_head, ch_emb, kernel_size=1, stride=1, padding=0, bias=True))
        param_attr = paddle.ParamAttr(initializer=KaimingUniform())
        bound = 1 / math.sqrt(ch_emb)
        bias_attr = paddle.ParamAttr(initializer=Uniform(-bound, bound))
        self.reid_loss = nn.CrossEntropyLoss(ignore_index=-1, reduction='sum')

        if num_classes == 1:
            nID = self.num_identities_dict[0]  # single class
            self.classifier = nn.Linear(
                ch_emb, nID, weight_attr=param_attr, bias_attr=bias_attr)
            # When num_identities(nID) is 1, emb_scale is set as 1
            self.emb_scale = math.sqrt(2) * math.log(nID - 1) if nID > 1 else 1
        else:
            self.classifiers = dict()
            self.emb_scale_dict = dict()
            for cls_id, nID in self.num_identities_dict.items():
                self.classifiers[str(cls_id)] = nn.Linear(
                    ch_emb, nID, weight_attr=param_attr, bias_attr=bias_attr)
                # When num_identities(nID) is 1, emb_scale is set as 1
                self.emb_scale_dict[str(cls_id)] = math.sqrt(2) * math.log(
                    nID - 1) if nID > 1 else 1
Ejemplo n.º 4
0
 def __init__(self,
              in_channels,
              ch_head=256,
              ch_emb=128,
              num_identifiers=14455):
     super(FairMOTEmbeddingHead, self).__init__()
     self.reid = nn.Sequential(
         ConvLayer(
             in_channels, ch_head, kernel_size=3, padding=1, bias=True),
         nn.ReLU(),
         ConvLayer(
             ch_head, ch_emb, kernel_size=1, stride=1, padding=0, bias=True))
     param_attr = paddle.ParamAttr(initializer=KaimingUniform())
     bound = 1 / math.sqrt(ch_emb)
     bias_attr = paddle.ParamAttr(initializer=Uniform(-bound, bound))
     self.classifier = nn.Linear(
         ch_emb,
         num_identifiers,
         weight_attr=param_attr,
         bias_attr=bias_attr)
     self.reid_loss = nn.CrossEntropyLoss(ignore_index=-1, reduction='sum')
     # When num_identifiers is 1, emb_scale is set as 1
     self.emb_scale = math.sqrt(2) * math.log(
         num_identifiers - 1) if num_identifiers > 1 else 1