示例#1
0
    def __init__(self, nChannels, nPoints, nCoords, nClasses):
        super(PointNet2d, self).__init__()

        self.input_trans = Transform2d(nPoints,
                                       nCoords,
                                       nChannels,
                                       3,
                                       initial_reshape=True)

        self.conv64A = utils.Conv2d(nChannels, 64, (1, nCoords), pool=False)
        self.conv64B = utils.Conv2d(64, 64, (1, 1), pool=False)

        self.feature_trans = Transform2d(nPoints, 1, 64, 64)

        self.conv64C = utils.Conv2d(64, 64, (1, 1), pool=False)
        self.conv128 = utils.Conv2d(64, 128, (1, 1), pool=False)
        self.conv1024 = utils.Conv2d(128, 1024, (1, 1), pool=False)

        self.pool = torch.nn.MaxPool2d((nPoints, 1))

        self.linear512 = utils.Linear(1024, 512)
        self.dropoutA = torch.nn.Dropout(0.7)

        self.linear256 = utils.Linear(512, 256)
        self.dropoutB = torch.nn.Dropout(0.7)

        self.linearID = utils.Linear(256, nClasses, bn=False, activation=None)
示例#2
0
    def __init__(self, height, width, channels, K, initial_reshape=False):
        super(Transform2d, self).__init__()

        self.K = K
        self.initial_reshape = initial_reshape

        self.conv64 = utils.Conv2d(channels, 64, (1, width), pool=False)
        self.conv128 = utils.Conv2d(64, 128, (1, 1), pool=False)
        self.conv1024 = utils.Conv2d(128, 1024, (1, 1), pool=False)

        self.pool = torch.nn.MaxPool2d((height, 1))

        self.linear512 = utils.Linear(1024, 512)
        self.linear256 = utils.Linear(512, 256)

        self.weights = torch.zeros(256, K * K, requires_grad=True)
        self.biases = torch.eye(K, requires_grad=True).flatten()
示例#3
0
    def __init__(self, height, width, bn=False):
        super(Transform1d, self).__init__()

        self.width = width

        self.conv64 = utils.Conv1d(width, 64, bn=bn, pool=False)
        self.conv128 = utils.Conv1d(64, 128, bn=bn, pool=False)
        self.conv1024 = utils.Conv1d(128, 1024, bn=bn, pool=False)

        self.pool = torch.nn.MaxPool1d(height)

        self.linear512 = utils.Linear(1024, 512, bn=bn)
        self.linear256 = utils.Linear(512, 256, bn=bn)

        self.linearK = torch.nn.Linear(256, width * width)
        self.linearK.bias = torch.nn.Parameter(
            torch.eye(width).view(width * width))
示例#4
0
    def __init__(self, config, bn=False):
        super(PointNet1d, self).__init__()

        input_shape = config['data_handling']['image_shape']

        assert (len(input_shape) == 2)

        nPoints = input_shape[0]
        nCoords = input_shape[1]
        nClasses = len(config['data_handling']['classes'])

        logger.debug('nPoints = %s, nCoords = %s, nClasses = %s', nPoints,
                     nCoords, nClasses)

        self.input_trans = Transform1d(nPoints, nCoords, bn=bn)

        self.input_to_feature = torch.nn.Sequential()
        for x in config['model']['input_to_feature']:
            N_in, N_out, pool = x
            self.input_to_feature.add_module(
                'conv_%d_to_%d' % (N_in, N_out),
                utils.Conv1d(N_in, N_out, bn=bn, pool=pool))
            #utils.Conv1d(nCoords,64,pool=False)
            #utils.Conv1d(64,64,pool=False))

        self.feature_trans = Transform1d(
            nPoints, config['model']['input_to_feature'][-1][1], bn=bn)

        self.feature_to_pool = torch.nn.Sequential()
        for x in config['model']['feature_to_pool']:
            N_in, N_out, pool = x
            self.feature_to_pool.add_module(
                'conv_%d_to_%d' % (N_in, N_out),
                utils.Conv1d(N_in, N_out, bn=bn, pool=pool))

        self.pool = torch.nn.MaxPool1d(nPoints)

        self.dense_layers = torch.nn.Sequential()
        for x in config['model']['dense_layers']:
            N_in, N_out, dropout, bn, act = x
            dr = int(dropout * 3)
            if N_out is None:
                N_out = nClasses
            self.dense_layers.add_module(
                'dense_%d_to_%d' % (N_in, N_out),
                utils.Linear(N_in, N_out, bn=bn, activation=act))
            if dropout > 0:
                self.dense_layers.add_module('dropout_%03d' % dr,
                                             torch.nn.Dropout(dropout))