Example #1
0
def MultiscaleShapeContext(dimension,
                           n_features=1,
                           n_layers=3,
                           shape_context_size=3,
                           downsample_size=2,
                           downsample_stride=2,
                           bn=True):
    m = sparseconvnet.Sequential()
    if n_layers == 1:
        m.add(
            sparseconvnet.ShapeContext(dimension, n_features,
                                       shape_context_size))
    else:
        m.add(sparseconvnet.ConcatTable().add(
            sparseconvnet.ShapeContext(
                dimension, n_features, shape_context_size)).add(
                    sparseconvnet.Sequential(
                        sparseconvnet.AveragePooling(dimension,
                                                     downsample_size,
                                                     downsample_stride),
                        MultiscaleShapeContext(dimension, n_features,
                                               n_layers - 1,
                                               shape_context_size,
                                               downsample_size,
                                               downsample_stride, False),
                        sparseconvnet.UnPooling(dimension, downsample_size,
                                                downsample_stride)))).add(
                                                    sparseconvnet.JoinTable())
    if bn:
        m.add(
            sparseconvnet.BatchNormalization(shape_context_size**dimension *
                                             n_features * n_layers))
    return m
Example #2
0
 def __init__(self, flags):
     torch.nn.Module.__init__(self)
     import sparseconvnet as scn
     self._flags = flags
     dimension = self._flags.DATA_DIM
     num_class = self._flags.NUM_CLASS
     image_size = self._flags.SPATIAL_SIZE
     num_filter = self._flags.BASE_NUM_FILTERS
     assert image_size == 128
     net = scn.Sequential()
     net.add(scn.InputLayer(dimension, image_size, mode=3))
     net.add(scn.SubmanifoldConvolution(dimension, 1, num_filter, 3, False))
     net.add(scn.MaxPooling(dimension, 2, 2))
     net.add(
         SparseResNet(dimension, num_filter,
                      [[num_filter * 1, 2, 1], [num_filter * 2, 2, 2],
                       [num_filter * 4, 2, 2], [num_filter * 8, 2, 2]]))
     net.add(
         scn.Convolution(dimension, num_filter * 8, num_filter * 16, 3, 1,
                         False))
     net.add(scn.BatchNormReLU(num_filter * 16))
     net.add(scn.AveragePooling(dimension, 6, 6))
     net.add(scn.SparseToDense(dimension, num_filter * 16))
     # net.add(torch.nn.AvgPool3d(6))
     self._net = net
     self.linear = torch.nn.Linear(num_filter * 16, num_class)
Example #3
0
    def __init__(self,
                 dimension,
                 reps,
                 n_layers,
                 leakiness=0,
                 input_layer=None,
                 name='encoder',
                 device=None):
        super(Encoder, self).__init__()
        self.dimension = dimension
        self.reps = reps
        self.n_layers = n_layers
        self.leakiness = leakiness
        self.name = name
        self.device = device

        if input_layer != None:
            self.input_layer = scn.InputLayer(len(input_layer), input_layer)

        self.blocks = []
        self.block_names = {}
        n_in, n_out = 1, 1
        for i in range(len(n_layers)):
            block = scn.Sequential()
            # add reps Resnet blocks, where reps >= 1 and first block just ensures number of
            # input channels is correct
            for rep in range(reps):
                block.add(
                    resnet_block(dimension,
                                 n_in,
                                 n_out,
                                 1,
                                 leakiness,
                                 computation='submanifoldconvolution'))
                n_in = n_out
            n_out = n_layers[i][1]
            '''
            block.add(
                scn.BatchNormLeakyReLU(n_in, leakiness)
            )
            '''
            block.add(scn.LeakyReLU(leakiness))
            if len(n_layers[i]) == 2:
                block.add(scn.Convolution(dimension, n_in, n_out, 2, 2, False))
            elif len(n_layers[i]
                     ) == 3 and n_layers[i][2] == 'submanifoldconvolution':
                block.add(
                    scn.SubmanifoldConvolution(dimension, n_in, n_out, 2,
                                               False))
            elif len(n_layers[i]) == 3 and n_layers[i][2] == 'maxpool':
                block.add(scn.MaxPooling(dimension, 2, 2))
            elif len(n_layers[i]) == 3 and n_layers[i][2] == 'avgpool':
                block.add(scn.AveragePooling(dimension, 2, 2))
            block_name = get_block_name(name, dimension, reps, n_in, n_out,
                                        leakiness)
            n_in = n_out
            self.blocks.append(block)
            self.block_names[block_name] = len(self.blocks) - 1
        self.blocks = torch.nn.ModuleList(self.blocks)
Example #4
0
 def __init__(self):
     nn.Module.__init__(self)
     self.sparseModel = scn.Sequential(
         scn.SubmanifoldConvolution(2, 3, 16, 3, False),
         scn.BatchNormReLU(16),
         scn.SparseResNet(
             2, 16, [['b', 16, 3, 1], ['b', 32, 3, 2], ['b', 64, 3, 2]]),
         scn.AveragePooling(2, 8, 8), scn.SparseToDense(2, 64))
     self.spatial_size = self.sparseModel.input_spatial_size(
         torch.LongTensor([1, 1]))
     self.inputLayer = scn.InputLayer(2, self.spatial_size, 2)
     self.linear = nn.Linear(64, 10)
Example #5
0
    def __init__(self, options):
        nn.Module.__init__(self)
        self.options = options
        dimension = 3
        self.input_layer = scn.InputLayer(dimension,
                                          options.inputScale,
                                          mode=4)
        self.conv = scn.SubmanifoldConvolution(dimension,
                                               1,
                                               options.numNeighbors,
                                               3,
                                               bias=False)

        self.pool_1 = scn.AveragePooling(dimension, 2, 2)
        self.pool_2 = scn.AveragePooling(dimension, 4, 4)
        self.pool_3 = scn.AveragePooling(dimension, 8, 8)
        self.pool_4 = scn.AveragePooling(dimension, 16, 16)
        self.pool_5 = scn.AveragePooling(dimension, 32, 32)
        self.unpool_1 = scn.UnPooling(dimension, 2, 2)
        self.unpool_2 = scn.UnPooling(dimension, 4, 4)
        self.unpool_3 = scn.UnPooling(dimension, 8, 8)
        self.unpool_4 = scn.UnPooling(dimension, 16, 16)
        self.unpool_5 = scn.UnPooling(dimension, 32, 32)

        with torch.no_grad():
            weight = torch.zeros(27, 1, options.numNeighbors).cuda()
            if options.numNeighbors == 6:
                offsets = [4, 22, 10, 16, 12, 14]
                pass
            for index, offset in enumerate(offsets):
                weight[offset, 0, index] = 1
                continue
            self.conv.weight = nn.Parameter(weight)
            pass
        self.output_layer = scn.OutputLayer(dimension)
        return
Example #6
0
def get_down_avgpooling(
        num_dims, sparse, input_channels, output_channels=None, *, stride=2):
    if isinstance(stride, int):
        stride = np.full(num_dims, stride)
    else:
        assert len(stride) == num_dims
    stride_tuple = tuple(stride)

    assert output_channels is None or output_channels == input_channels

    if sparse:
        layer = scn.AveragePooling(
            num_dims, pool_size=stride_tuple, pool_stride=stride_tuple)
    else:
        maxpool_class = get_dense_avgpool_class(num_dims)
        layer = maxpool_class(
            kernel_size=stride_tuple, stride=stride_tuple, padding=0)

    return sparse, stride, input_channels, layer
Example #7
0
 def __init__(self,
              spatial_size=(97, 97, 199),
              n_initial_filters=15,
              mom=0.99):
     torch.nn.Module.__init__(self)
     self.input_tensor = scn.InputLayer(dimension=3,
                                        spatial_size=(spatial_size))
     self.initial_convolution1 = scn.SubmanifoldConvolution(
         dimension=3,
         nIn=1,
         nOut=n_initial_filters,
         filter_size=(3, 3, 5),
         bias=False)
     self.relu1 = scn.BatchNormReLU(n_initial_filters,
                                    momentum=mom,
                                    eps=1e-5)
     # self.initial_convolution2 = scn.SubmanifoldConvolution(
     #         dimension   = 3,
     #         nIn         = n_initial_filters,
     #         nOut        = n_initial_filters,
     #         filter_size = (7, 7, 15),
     #         bias        = False
     #     )
     # self.relu2 = scn.BatchNormReLU(n_initial_filters, momentum=mom, eps=1e-5)
     n_filters = 2 * n_initial_filters
     self.initial_downsample = scn.Convolution(dimension=3,
                                               nIn=n_initial_filters,
                                               nOut=n_filters,
                                               filter_size=(5, 5, 15),
                                               filter_stride=(2, 2, 4),
                                               bias=False)
     self.resnet_block = SparseResNet(
         3,
         n_filters,
         [['b', 2 * n_filters, 2, 2], ['b', 4 * n_filters, 2, 2],
          ['b', 8 * n_filters, 2, 2], ['b', 16 * n_filters, 2, 2]],
         mom=mom)
     n_filters = 16 * n_filters
     sz_pool_fin = 2
     self.n_final_filters = n_filters
     self.pool = scn.AveragePooling(3, sz_pool_fin, 1)
     self.sparse_to_dense = scn.SparseToDense(dimension=3,
                                              nPlanes=n_filters)