Exemple #1
0
    def init_dilated_res_block(self, d_in, d_out, name):
        f_pc = helper_torch_util.conv2d(True, d_in, d_out//2)
        setattr(self, name + 'mlp1', f_pc)

        self.init_building_block(d_out//2, d_out, name + 'LFA')

        f_pc = helper_torch_util.conv2d(True, d_out, d_out * 2, activation=False)
        setattr(self, name + 'mlp2', f_pc)

        shortcut = helper_torch_util.conv2d(True, d_in, d_out * 2, activation=False)
        setattr(self, name + 'shortcut', shortcut)
Exemple #2
0
    def init_building_block(self, d_in, d_out, name):
       
        f_pc  = helper_torch_util.conv2d(True, 10, d_in)
        setattr(self, name + 'mlp1', f_pc)

        self.init_att_pooling(d_in * 2, d_out // 2, name + 'att_pooling_1')
        
        f_xyz = helper_torch_util.conv2d(True, d_in, d_out//2)
        setattr(self, name + 'mlp2', f_xyz)

        self.init_att_pooling(d_in * 2, d_out, name + 'att_pooling_2')
Exemple #3
0
    def __init__(self, cfg):
        super(RandLANet,self).__init__()
        self.cfg    = cfg

        d_feature   = cfg.d_feature

        self.fc0    = nn.Linear(cfg.d_in, d_feature)
        self.batch_normalization = nn.BatchNorm2d(d_feature, 
                                            eps=1e-6, momentum=0.99)

        f_encoder_list = []
        d_encoder_list = []

        # ###########################Encoder############################
        for i in range(cfg.num_layers):
            name = 'Encoder_layer_' + str(i)
            self.init_dilated_res_block(d_feature, cfg.d_out[i], name)
            d_feature = cfg.d_out[i] * 2
            if i == 0:
                d_encoder_list.append(d_feature)

            d_encoder_list.append(d_feature)
        # ###########################Encoder############################

        feature = helper_torch_util.conv2d(True, d_feature, d_feature)
        setattr(self, 'decoder_0', feature)


        # ###########################Decoder############################
        f_decoder_list = []
        for j in range(cfg.num_layers):
            name = 'Decoder_layer_' + str(j)
            d_in  = d_encoder_list[-j-2] + d_feature
            d_out = d_encoder_list[-j-2] 

            f_decoder_i = helper_torch_util.conv2d_transpose(True, d_in, d_out)
            setattr(self, name, f_decoder_i)

            d_feature = d_encoder_list[-j-2] 
           
        # ###########################Decoder############################



        f_layer_fc1 = helper_torch_util.conv2d(True, d_feature, 64)
        setattr(self, 'fc1', f_layer_fc1)
        f_layer_fc2 = helper_torch_util.conv2d(True, 64, 32)
        setattr(self, 'fc2', f_layer_fc2)
        f_layer_fc3 = helper_torch_util.conv2d(False, 32, cfg.num_classes, activation=False)
        setattr(self, 'fc', f_layer_fc3)
Exemple #4
0
    def init_att_pooling(self, d, d_out, name):

        att_activation = nn.Linear(d, d)
        setattr(self, name + 'fc', att_activation)

        f_agg = helper_torch_util.conv2d(True, d, d_out)
        setattr(self, name + 'mlp', f_agg)
Exemple #5
0
    def __init__(self, cfg):

        super(LapCluster, self).__init__()
        self.cfg = cfg
        self.compute_lap = Laplacian(cot=True)

        self.criterion = torch.nn.CrossEntropyLoss()

        ld = 1

        concat_feature = []

        for i, d in enumerate(cfg.d_in):
            if i == 0:
                f_conv2d = helper_torch_util.conv2d(
                    True, ld, d, kernel_size=[1, cfg.d_feature])
                setattr(self, 'mlp_in_' + str(i), f_conv2d)
                ld = d
            else:
                f_conv2d = helper_torch_util.conv2d(True,
                                                    ld,
                                                    d,
                                                    kernel_size=[1, 1])
                setattr(self, 'mlp_in_' + str(i), f_conv2d)
                ld = d

        for i in range(len(cfg.d_b1)):
            ld1 = ld
            for j, d in enumerate(cfg.d_b1[i]):
                f_conv2d = helper_torch_util.conv2d(True, ld1, d)
                setattr(self, "mlp_{}_{}".format(i, j), f_conv2d)
                ld1 = d

            ld2 = ld1
            for j, d in enumerate(cfg.d_b2[i]):
                f_conv2d = helper_torch_util.conv2d(True, ld2, d)
                setattr(self, "mlp_corr_{}_{}".format(i, j), f_conv2d)
                ld2 = d

            ld = ld1
            if cfg.pooling:
                ld += ld2

        ld = cfg.d_in[-1]
        for i in range(len(cfg.d_b1)):
            ld += cfg.d_b1[i][-1]
            if cfg.pooling:
                ld += cfg.d_b2[i][-1]

        for i, d in enumerate(cfg.d_outmlp):
            f_conv2d = helper_torch_util.conv2d(True,
                                                ld,
                                                d,
                                                kernel_size=[1, 1])
            setattr(self, 'mlp_out_' + str(i), f_conv2d)
            ld = d

        for i, d in enumerate(cfg.d_finalmlp):
            f_conv2d = helper_torch_util.conv2d(True,
                                                ld,
                                                d,
                                                kernel_size=[1, 1])
            setattr(self, 'final_' + str(i), f_conv2d)
            ld = d

            if i != len(cfg.d_finalmlp) - 1:
                bn = nn.BatchNorm2d(d)
                setattr(self, 'final_bn_' + str(i), bn)
                f_dropout = nn.Dropout(0.5)
                setattr(self, 'final_dropout_' + str(i), f_dropout)