コード例 #1
0
ファイル: model.py プロジェクト: robingong/person-reid-3d
 def __init__(self, k, feature_dims, emb_dims, output_classes, init_points = 512, input_dims=3,
              dropout_prob=0.5, npart=1, id_skip=False, drop_connect_rate=0, res_scale=1.0,
              light=False, bias = False, cluster='xyz', conv='EdgeConv', use_xyz=True, graph_jitter = False):
     super().__init__(k, feature_dims, emb_dims, output_classes, init_points, input_dims,
              dropout_prob, npart, id_skip, drop_connect_rate, res_scale,
              light, bias, cluster, conv, use_xyz, graph_jitter)
     self.sa = nn.ModuleList()
     npoint = init_points
     for i in range(self.num_layers):
         if i>0 and feature_dims[i]>feature_dims[i-1]:
             npoint = npoint//2
         self.sa.append( PointnetSAModuleMSG(
             npoint=npoint,
             radii = [0.1, 0.2, 0.4],
             nsamples = [8, 16, 32],
             mlps=[
               [feature_dims[i], feature_dims[i]//2, feature_dims[i]//4],
               [feature_dims[i], feature_dims[i]//2, feature_dims[i]//4],
               [feature_dims[i], feature_dims[i]//2, feature_dims[i]//2],
             ],
             fuse = 'concat', # fuse = 'add'
             norml = 'bn',
             activation = 'relu',
             use_se = True,
             use_xyz = use_xyz,
             use_neighbor = False,
             light = light
         )
         )
     # since add 3 branch
     weights_init_kaiming2 = lambda x:weights_init_kaiming(x, L=self.num_layers)
     self.sa.apply(weights_init_kaiming2)
コード例 #2
0
    def __init__(self,
                 k,
                 feature_dims,
                 emb_dims,
                 output_classes,
                 init_points=512,
                 input_dims=3,
                 dropout_prob=0.5,
                 npart=1,
                 id_skip=False,
                 drop_connect_rate=0,
                 res_scale=1.0,
                 light=False,
                 bias=False,
                 cluster='xyz',
                 conv='EdgeConv',
                 use_xyz=True,
                 use_se=True,
                 graph_jitter=False,
                 pre_act=False,
                 norm='bn',
                 stride=2,
                 layer_drop=0,
                 num_conv=1):
        super(ModelE, self).__init__()

        self.npart = npart
        self.norm = norm
        self.graph_jitter = graph_jitter
        self.res_scale = res_scale
        self.id_skip = id_skip
        self.drop_connect_rate = drop_connect_rate
        self.nng = KNNGraphE(k)  # with random neighbor
        self.conv = nn.ModuleList()
        self.conv_s1 = nn.ModuleList()
        self.conv_s2 = nn.ModuleList()
        self.bn = nn.ModuleList()
        self.sa = nn.ModuleList()
        self.cluster = cluster
        self.feature_dims = feature_dims
        self.conv_type = conv
        self.init_points = init_points
        self.k = k
        self.light = light
        self.pre_act = pre_act
        self.num_conv = num_conv
        #self.proj_in = nn.Linear(input_dims, input_dims)

        self.num_layers = len(feature_dims)
        npoint = init_points
        last_npoint = -1
        for i in range(self.num_layers):
            if k == 1:
                self.conv.append(
                    nn.Conv2d(feature_dims[i - 1] if i > 0 else input_dims,
                              feature_dims[i],
                              kernel_size=1,
                              bias=True))
                self.bn.append(nn.BatchNorm1d(feature_dims[i]))
            elif conv == 'EdgeConv':
                group_num = 2 if light and i > 0 else 1
                for j in range(self.num_conv):
                    if j == 0:
                        self.conv.append(
                            nn.Conv2d(feature_dims[i - 1] *
                                      2 if i > 0 else input_dims * 2,
                                      feature_dims[i],
                                      kernel_size=1,
                                      groups=group_num,
                                      bias=True))
                    else:
                        self.conv.append(
                            nn.Conv2d(feature_dims[i] * 2,
                                      feature_dims[i],
                                      kernel_size=1,
                                      groups=group_num,
                                      bias=True))

                    if i == 0 and j == 0 and pre_act:
                        norm_dim = input_dims
                    else:
                        norm_dim = feature_dims[
                            i - 1] if pre_act and j == 0 else feature_dims[i]

                    if norm == 'ln':
                        if layer_drop > 0:
                            self.bn.append(
                                nn.Sequential(nn.LayerNorm(norm_dim),
                                              nn.Dropout(layer_drop)))
                        else:
                            self.bn.append(nn.LayerNorm(norm_dim))
                    else:
                        if layer_drop > 0:
                            self.bn.append(
                                nn.Sequential(nn.BatchNorm1d(norm_dim),
                                              nn.Dropout(layer_drop)))
                        else:
                            self.bn.append(nn.BatchNorm1d(norm_dim))

            if i > 0 and feature_dims[i] > feature_dims[i - 1]:
                npoint = npoint // stride

            if npoint != last_npoint:
                if id_skip:
                    self.conv_s2.append(
                        nn.Linear(feature_dims[i - 1] if i > 0 else input_dims,
                                  feature_dims[i]))
                self.sa.append(
                    PointnetSAModule(npoint=npoint,
                                     radius=0.2,
                                     nsample=64,
                                     mlp=[
                                         feature_dims[i], feature_dims[i],
                                         feature_dims[i]
                                     ],
                                     fuse='add',
                                     norml='bn',
                                     activation='relu',
                                     use_se=use_se,
                                     use_xyz=use_xyz,
                                     use_neighbor=False,
                                     light=False))
                last_npoint = npoint
            #if id_skip:
            #    self.conv_s1.append( nn.Linear(feature_dims[i], feature_dims[i] ))

        self.embs = nn.ModuleList()
        self.bn_embs = nn.ModuleList()
        self.dropouts = nn.ModuleList()

        self.partpool = nn.AdaptiveAvgPool1d(self.npart)
        if self.npart == 1:
            self.embs.append(
                nn.Linear(
                    # * 2 because of concatenation of max- and mean-pooling
                    feature_dims[-1] * 2,
                    emb_dims[0],
                    bias=bias))
            self.bn_embs.append(nn.BatchNorm1d(emb_dims[0]))
            self.dropouts.append(nn.Dropout(dropout_prob, inplace=True))
            self.proj_output = nn.Linear(emb_dims[0], output_classes)
            self.proj_output.apply(weights_init_classifier)
        else:
            self.proj_outputs = nn.ModuleList()
            for i in range(0, self.npart):
                self.embs.append(nn.Linear(feature_dims[-1], 512, bias=bias))
                self.bn_embs.append(nn.BatchNorm1d(512))
                self.dropouts.append(nn.Dropout(dropout_prob, inplace=True))
                self.proj_outputs.append(nn.Linear(512, output_classes))
            self.proj_outputs.apply(weights_init_classifier)

        # initial
        #self.proj_in.apply(weights_init_kaiming)
        self.conv.apply(weights_init_kaiming)
        self.conv_s1.apply(weights_init_kaiming)
        self.conv_s2.apply(weights_init_kaiming)
        weights_init_kaiming2 = lambda x: weights_init_kaiming(
            x, L=self.num_layers)
        self.sa.apply(weights_init_kaiming2)
        #self.proj.apply(weights_init_kaiming)
        self.embs.apply(weights_init_kaiming)
        self.bn.apply(weights_init_kaiming)
        self.bn_embs.apply(weights_init_kaiming)
        self.npart = npart
コード例 #3
0
ファイル: model.py プロジェクト: robingong/person-reid-3d
    def __init__(self, k, feature_dims, emb_dims, output_classes, init_points = 512, input_dims=3,
                 dropout_prob=0.5, npart=1, id_skip=False, drop_connect_rate=0, res_scale = 1.0,
                 light = False, bias = False, cluster='xyz', conv='EdgeConv', use_xyz=True, use_se = True, graph_jitter = False):
        super(Model, self).__init__()

        self.npart = npart
        self.graph_jitter = graph_jitter
        self.res_scale = res_scale
        self.id_skip = id_skip
        self.drop_connect_rate = drop_connect_rate
        self.nng = KNNGraphE(k)  # with random neighbor
        self.conv = nn.ModuleList()
        self.conv_s1 = nn.ModuleList()
        self.conv_s2 = nn.ModuleList()
        self.bn = nn.ModuleList()
        self.sa = nn.ModuleList()
        self.cluster = cluster
        self.feature_dims = feature_dims
        self.conv_type = conv
        self.init_points = init_points
        self.k = k
        #self.proj_in = nn.Linear(input_dims, input_dims)

        self.num_layers = len(feature_dims)
        npoint = init_points
        for i in range(self.num_layers):
            if k==1: 
                    self.conv.append(nn.Linear(feature_dims[i-1] if i > 0 else input_dims, 
                                     feature_dims[i] ))
            elif conv == 'EdgeConv':
                if light:
                    self.conv.append(EdgeConv_Light(
                        feature_dims[i - 1] if i > 0 else input_dims,
                        feature_dims[i],
                        batch_norm=True))
                else: 
                    self.conv.append(EdgeConv(
                        feature_dims[i - 1] if i > 0 else input_dims,
                        feature_dims[i],
                        batch_norm=True))
            elif conv == 'GATConv':
                    self.conv.append(GATConv(
                        feature_dims[i - 1] if i > 0 else input_dims,
                        feature_dims[i],
                        feat_drop=0.2, attn_drop=0.2,
                        residual=True,
                        num_heads=1))
            elif conv == 'GraphConv':
                    self.conv.append( GraphConv(
                        feature_dims[i - 1] if i > 0 else input_dims,
                        feature_dims[i]))
            elif conv == 'SAGEConv':
                    self.conv.append( SAGEConv(
                        feature_dims[i - 1] if i > 0 else input_dims,
                        feature_dims[i],
                        feat_drop=0.2,
                        aggregator_type='mean', 
                        norm = nn.BatchNorm1d(feature_dims[i])
                        ) )
            elif conv == 'SGConv':
                    self.conv.append( SGConv(
                        feature_dims[i - 1] if i > 0 else input_dims,
                        feature_dims[i]) )
            elif conv == 'GatedGCN': # missing etypes
                    self.conv.append( GatedGCNLayer(
                        feature_dims[i - 1] if i > 0 else input_dims,
                        feature_dims[i], 
                        dropout=0.0, 
                        graph_norm=True, batch_norm=True, residual=True)
                        )


            if i>0 and feature_dims[i]>feature_dims[i-1]:
                npoint = npoint//2
                if id_skip and  npoint <= self.init_points//4: # Only work on high level
                    self.conv_s2.append( nn.Linear(feature_dims[i-1], feature_dims[i] ))

            self.sa.append(PointnetSAModule(
                npoint=npoint,
                radius=0.2,
                nsample=64,
                mlp=[feature_dims[i], feature_dims[i], feature_dims[i]],
                fuse = 'add',
                norml = 'bn',
                activation = 'relu',
                use_se = use_se,
                use_xyz = use_xyz,
                use_neighbor = False,
                light = light
            ))
            #if id_skip:
            #    self.conv_s1.append( nn.Linear(feature_dims[i], feature_dims[i] ))

        self.embs = nn.ModuleList()
        self.bn_embs = nn.ModuleList()
        self.dropouts = nn.ModuleList()

        self.partpool =  nn.AdaptiveAvgPool1d(self.npart)
        if self.npart == 1: 
            self.embs.append(nn.Linear(
                # * 2 because of concatenation of max- and mean-pooling
                feature_dims[-1]*2, emb_dims[0], bias=bias))
            self.bn_embs.append(nn.BatchNorm1d(emb_dims[0]))
            self.dropouts.append(nn.Dropout(dropout_prob, inplace=True))
            self.proj_output = nn.Linear(emb_dims[0], output_classes)
            self.proj_output.apply(weights_init_classifier)
        else: 
            self.proj_outputs = nn.ModuleList()
            for i in range(0, self.npart):
                self.embs.append(nn.Linear(512, 512, bias=bias))
                self.bn_embs.append(nn.BatchNorm1d(512))
                self.dropouts.append(nn.Dropout(dropout_prob, inplace=True))
                self.proj_outputs.append(nn.Linear(512, output_classes))
            self.proj_outputs.apply(weights_init_classifier)

        # initial
        #self.proj_in.apply(weights_init_kaiming)
        self.conv.apply(weights_init_kaiming)
        self.conv_s1.apply(weights_init_kaiming)
        self.conv_s2.apply(weights_init_kaiming)
        weights_init_kaiming2 = lambda x:weights_init_kaiming(x,L=self.num_layers)
        self.sa.apply(weights_init_kaiming2) 
        #self.proj.apply(weights_init_kaiming)
        self.embs.apply(weights_init_kaiming)
        self.bn.apply(weights_init_kaiming)
        self.bn_embs.apply(weights_init_kaiming)
        self.npart = npart
コード例 #4
0
    def __init__(self,
                 k,
                 feature_dims,
                 emb_dims,
                 output_classes,
                 init_points=512,
                 input_dims=3,
                 dropout_prob=0.5,
                 npart=1,
                 id_skip=False,
                 drop_connect_rate=0,
                 res_scale=1.0,
                 light=False,
                 bias=False,
                 cluster='xyz',
                 conv='EdgeConv',
                 use_xyz=True,
                 use_se=True,
                 graph_jitter=False,
                 pre_act=False,
                 norm='bn',
                 stride=2,
                 layer_drop=0,
                 num_conv=1,
                 shuffle=0):
        super().__init__(k, feature_dims, emb_dims, output_classes,
                         init_points, input_dims, dropout_prob, npart, id_skip,
                         drop_connect_rate, res_scale, light, bias, cluster,
                         conv, use_xyz, use_se, graph_jitter, pre_act, norm,
                         stride, layer_drop, num_conv, shuffle)
        self.sa = nn.ModuleList()
        npoint = init_points
        last_npoint = -1
        for i in range(len(feature_dims)):
            if i > 0 and feature_dims[i] > feature_dims[i - 1]:
                npoint = npoint // stride

            rest_feature = feature_dims[i] - 2 * (feature_dims[i] // 3)
            if npoint != last_npoint:
                self.sa.append(
                    PointnetSAModuleMSG(
                        npoint=npoint,
                        radii=[0.1, 0.2, 0.4],
                        nsamples=[4, 8, 12],
                        mlps=[
                            [
                                feature_dims[i], feature_dims[i] // 3,
                                feature_dims[i] // 3
                            ],
                            [
                                feature_dims[i], feature_dims[i] // 3,
                                feature_dims[i] // 3
                            ],
                            [
                                feature_dims[i], feature_dims[i] // 3,
                                rest_feature
                            ],
                        ],
                        fuse='concat',  # fuse = 'add'
                        norml='bn',
                        activation='relu',
                        use_se=use_se,
                        use_xyz=use_xyz,
                        use_neighbor=False,
                        light=light))
                last_npoint = npoint
        # since add 3 branch
        weights_init_kaiming2 = lambda x: weights_init_kaiming(
            x, L=self.num_layers)
        self.sa.apply(weights_init_kaiming2)
コード例 #5
0
    def init_layers_kaiming(self):
        weights_init_kaiming(self.en1_fc)
        weights_init_kaiming(self.en2_fc)
        weights_init_kaiming(self.mean_fc)
        weights_init_kaiming(self.logvar_fc)

        weights_init_kaiming(self.generator1)
        weights_init_kaiming(self.generator2)
        weights_init_kaiming(self.generator3)
        weights_init_kaiming(self.generator4)
コード例 #6
0
ファイル: model_efficient.py プロジェクト: layumi/dgcnn
    def __init__(self,
                 k,
                 feature_dims,
                 emb_dims,
                 output_classes,
                 init_points=512,
                 input_dims=3,
                 dropout_prob=0.5,
                 npart=1,
                 id_skip=False,
                 drop_connect_rate=0,
                 res_scale=1.0,
                 light=False,
                 bias=False,
                 cluster='xyz',
                 conv='EdgeConv',
                 use_xyz=True,
                 use_se=True,
                 graph_jitter=False,
                 pre_act=False,
                 norm='bn',
                 stride=2,
                 layer_drop=0,
                 num_conv=1,
                 temp=False,
                 gem=False,
                 ASPP=0):
        super().__init__(k, feature_dims, emb_dims, output_classes,
                         init_points, input_dims, dropout_prob, npart, id_skip,
                         drop_connect_rate, res_scale, light, bias, cluster,
                         conv, use_xyz, use_se, graph_jitter, pre_act, norm,
                         stride, layer_drop, num_conv, temp, gem, ASPP)
        self.sa = nn.ModuleList()
        npoint = init_points
        if temp:
            self.logit_scale = nn.Parameter(torch.ones(()), requires_grad=True)
        last_npoint = -1
        for i in range(len(feature_dims)):
            if i > 0 and feature_dims[i] > feature_dims[i - 1]:
                npoint = npoint // stride

            if npoint != last_npoint:
                self.sa.append(
                    PointnetSAModuleMSG(
                        npoint=npoint,
                        radii=[0.1, 0.2, 0.4],
                        nsamples=[8, 16, 32],
                        mlps=[
                            [
                                feature_dims[i], feature_dims[i] // 2,
                                feature_dims[i]
                            ],
                            [
                                feature_dims[i], feature_dims[i] // 2,
                                feature_dims[i]
                            ],
                            [
                                feature_dims[i], feature_dims[i] // 2,
                                feature_dims[i]
                            ],
                        ],
                        fuse='add',  # fuse = 'add'
                        norml='bn',
                        activation='relu',
                        use_se=use_se,
                        use_xyz=use_xyz,
                        use_neighbor=False,
                        light=False))
                last_npoint = npoint
        # since add 3 branch
        weights_init_kaiming2 = lambda x: weights_init_kaiming(
            x, L=self.num_layers)
        self.sa.apply(weights_init_kaiming2)