Beispiel #1
0
    def __init__(self):
        super(Net, self).__init__()
        self.szp = SparseZeroPad2d(1)
        self.bn = nn.BatchNorm1d(3)
        self.cv1 = spconv.SparseConv2d(3, 3, 5, 5)
        self.ss1 = SparseScale2d(5)
        self.cv2 = spconv.SparseConv2d(3, 3, 5, 5)
        self.ss2 = SparseScale2d(5)
        self.mp = spconv.SparseMaxPool2d(2, 2)
        self.td = spconv.ToDense()

        self.net = spconv.SparseSequential(
            SparseZeroPad2d(1),
            nn.BatchNorm1d(3),
            spconv.SparseConv2d(3, 32, 3, 1),
            nn.ReLU(),
            spconv.SparseConv2d(32, 64, 3, 1),
            nn.ReLU(),
            spconv.SparseMaxPool2d(2, 2),
            spconv.ToDense(),
        )

        self.fc1 = nn.Linear(10816, 128)
        self.fc2 = nn.Linear(128, 10)
        self.dropout1 = nn.Dropout2d(0.25)
        self.dropout2 = nn.Dropout2d(0.5)
Beispiel #2
0
 def __init__(self, in_planes, pointwise_layers=2):
     super(Pointwise2DForZ, self).__init__()
     self.log = logging.getLogger(__name__)
     layers = []
     n_layers = pointwise_layers
     if not isinstance(n_layers, int) or n_layers < 2:
         raise ValueError("n_layers must be  integer >= 2")
     increment = int(round(float(in_planes) / float(n_layers - 1)))
     out = in_planes
     for i in range(n_layers):
         if i == (n_layers - 1):
             out = 1
         elif i == 0:
             out = in_planes
         else:
             out -= increment
         self.log.debug(
             "appending layer {0} -> {1} planes, kernel size of {2}, padding of {3}"
             .format(in_planes, out, 1, 0))
         layers.append(spconv.SparseConv2d(in_planes, out, 1, 1, 0))
         layers.append(nn.BatchNorm1d(out))
         layers.append(nn.ReLU())
         in_planes = out
     layers.append(spconv.ToDense())
     self.network = spconv.SparseSequential(*layers)
Beispiel #3
0
    def __init__(self, shape):
        super().__init__()

        self.net = spconv.SparseSequential(
            spconv.SparseConv3d(1, 32, 4),
            nn.LeakyReLU(),
            spconv.SparseConv3d(32, 32, 4),
            nn.LeakyReLU(),
            spconvpool.SparseMaxPool(3, 5),
            spconv.SparseConv3d(32, 32, 4),
            nn.LeakyReLU(),
            spconv.SparseConv3d(32, 32, 4),
            nn.LeakyReLU(),
            spconvpool.SparseMaxPool(3, 5),
            spconv.SparseConv3d(32, 32, 4),
            nn.LeakyReLU(),
            spconv.SparseConv3d(32, 32, 4),
            nn.LeakyReLU(),
            spconv.SparseConv3d(32, 1, 4),
            nn.LeakyReLU(),
            spconvpool.SparseMaxPool(3, 5),
            spconv.ToDense(),
            nn.Flatten(),
            nn.Linear(14688, 1000),
            nn.LeakyReLU(),
            nn.Linear(1000, 1000),
            nn.LeakyReLU(),
            nn.Linear(1000, 1),
            nn.LeakyReLU(),
            #             nn.Sigmoid()
        )
        self.shape = shape
Beispiel #4
0
 def __init__(self):
     super().__init__()
     self.net = spconv.SparseSequential(
         spconv.SubMConv3d(3, 8, 3, indice_key="subm1", padding=1, use_hash=False),
         nn.BatchNorm1d(8),
         nn.ReLU(),
         spconv.SparseConv3d(8, 16, 3, stride=2, padding=1, use_hash=False),
         nn.BatchNorm1d(16),
         nn.ReLU(),
         spconv.SubMConv3d(16, 16, 3, indice_key="subm2", padding=1, use_hash=False),
         nn.BatchNorm1d(16),
         nn.ReLU(),
         spconv.SparseConv3d(16, 32, 3, stride=2, padding=1, use_hash=False),
         nn.BatchNorm1d(32),
         nn.ReLU(),
         spconv.SubMConv3d(32, 32, 3, indice_key="subm3", padding=1, use_hash=False),
         nn.BatchNorm1d(32),
         nn.ReLU(),
         spconv.SparseConv3d(32, 64, 3, stride=2, padding=1, use_hash=False),
         nn.BatchNorm1d(64),
         nn.ReLU(),
         spconv.SubMConv3d(64, 64, 3, indice_key="subm4", padding=1, use_hash=False),
         nn.BatchNorm1d(64),
         nn.ReLU(),
         spconv.ToDense()  # [64, 2, 8, 8]
     )
     self.linear = nn.Linear(64 * 2 * 8 * 8, 4)
Beispiel #5
0
 def __init__(self,
              in_planes,
              kernel_size=3,
              n_layers=2,
              pointwise_layers=0,
              pointwise_factor=0.8,
              todense=True):
     super(SparseConv2DForZ, self).__init__()
     self.log = logging.getLogger(__name__)
     layers = []
     if pointwise_layers > 0:
         if n_layers == 1:
             raise ValueError(
                 "n_layers must be > 1 if using pointwise convolution")
         increment = int(
             round(
                 int(round(in_planes * pointwise_factor)) /
                 float(n_layers - 1)))
     else:
         increment = int(round(float(in_planes) / float(n_layers)))
     if kernel_size % 2 != 1:
         raise ValueError("Kernel size must be an odd integer")
     if not isinstance(n_layers, int) or n_layers < 1:
         raise ValueError("n_layers must be  integer >= 1")
     out = in_planes
     reset_kernel = False
     orig_kernel = kernel_size
     for i in range(n_layers):
         if i == (n_layers - 1):
             out = 1
         else:
             out -= increment
             if i == 0 and pointwise_layers > 0:
                 if pointwise_factor > 0:
                     out = int(round(pointwise_factor * in_planes))
         pd = int((kernel_size - 1) / 2)
         if pointwise_layers > 0:
             pd = 0
             kernel_size = 1
             pointwise_layers -= 1
             if pointwise_layers == 0:
                 reset_kernel = True
         self.log.debug(
             "appending layer {0} -> {1} planes, kernel size of {2}, padding of {3}"
             .format(in_planes, out, kernel_size, pd))
         layers.append(
             spconv.SparseConv2d(in_planes, out, kernel_size, 1, pd))
         if reset_kernel:
             kernel_size = orig_kernel
             reset_kernel = False
         if i != (n_layers - 1):
             layers.append(nn.BatchNorm1d(out))
         layers.append(nn.ReLU())
         in_planes = out
         if kernel_size > 1:
             kernel_size -= 2
     if todense:
         layers.append(spconv.ToDense())
     self.network = spconv.SparseSequential(*layers)
 def _make_deblock(self, num_out_filters, idx):
     print("CUSTOM MAKE DEBLOCK")
     stride = self._upsample_strides[idx - self._upsample_start_idx]
     if self._use_norm:
         if self._use_groupnorm:
             SparseBatchNorm2d = change_default_args(
                 num_groups=self._num_groups, eps=1e-3)(GroupNorm)
             DenseBatchNorm2d = change_default_args(
                 num_groups=self._num_groups, eps=1e-3)(GroupNorm)
         else:
             SparseBatchNorm2d = change_default_args(eps=1e-3,
                                                     momentum=0.01)(
                                                         nn.BatchNorm1d)
             DenseBatchNorm2d = change_default_args(
                 eps=1e-3, momentum=0.01)(nn.BatchNorm2d)
         SparseConvTranspose2d = change_default_args(bias=False)(
             spconv.SparseConvTranspose2d)
         DenseConvTranspose2d = change_default_args(bias=False)(
             nn.ConvTranspose2d)
     else:
         SparseBatchNorm2d = Empty
         DenseBatchNorm2d = Empty
         SparseConvTranspose2d = change_default_args(bias=True)(
             spconv.SparseConvTranspose2d)
         DenseConvTranspose2d = change_default_args(bias=True)(
             nn.ConvTranspose2d)
     stride = np.round(stride).astype(np.int64)
     if (idx <= LAST_SPARSE_IDX):
         deblock = spconv.SparseSequential(
             SparseConvTranspose2d(
                 num_out_filters,
                 self._num_upsample_filters[idx - self._upsample_start_idx],
                 stride,
                 stride=stride),
             SparseBatchNorm2d(
                 self._num_upsample_filters[idx -
                                            self._upsample_start_idx]),
             nn.ReLU(), spconv.ToDense())
     else:
         stride = np.round(stride).astype(np.int64)
         deblock = Sequential(
             DenseConvTranspose2d(
                 num_out_filters,
                 self._num_upsample_filters[idx - self._upsample_start_idx],
                 stride,
                 stride=stride),
             DenseBatchNorm2d(
                 self._num_upsample_filters[idx -
                                            self._upsample_start_idx]),
             nn.ReLU(),
         )
     return deblock
 def _make_deblock(self, num_out_filters, idx):
     stride = self._upsample_strides[idx - self._upsample_start_idx]
     if self._use_norm:
         if self._use_groupnorm:
             BatchNorm2d = change_default_args(num_groups=self._num_groups,
                                               eps=1e-3)(GroupNorm)
         else:
             BatchNorm2d = change_default_args(eps=1e-3, momentum=0.01)(
                 nn.BatchNorm1d)
         Conv2d = change_default_args(bias=False)(spconv.SparseConv2d)
         ConvTranspose2d = change_default_args(bias=False)(
             spconv.SparseConvTranspose2d)
     else:
         BatchNorm2d = Empty
         Conv2d = change_default_args(bias=True)(spconv.SparseConv2d)
         ConvTranspose2d = change_default_args(bias=True)(
             spconv.SparseConvTranspose2d)
     if stride >= 1:
         stride = np.round(stride).astype(np.int64)
         print("DEBLOCK CONV_TRANSPOSE STRIDE:", stride)
         deblock = spconv.SparseSequential(
             # PrintLayer(stride),
             ConvTranspose2d(
                 num_out_filters,
                 self._num_upsample_filters[idx - self._upsample_start_idx],
                 stride,
                 stride=stride),
             # PrintLayer(stride),
             BatchNorm2d(
                 self._num_upsample_filters[idx - self._upsample_start_idx]
             ),
             nn.ReLU(),
         )
     else:
         stride = np.round(1 / stride).astype(np.int64)
         print("DEBLOCK CONV STRIDE:", stride)
         deblock = spconv.SparseSequential(
             # PrintLayer(stride),
             Conv2d(num_out_filters,
                    self._num_upsample_filters[idx -
                                               self._upsample_start_idx],
                    stride,
                    stride=stride),
             # PrintLayer(stride),
             BatchNorm2d(
                 self._num_upsample_filters[idx - self._upsample_start_idx]
             ),
             nn.ReLU(),
         )
     deblock.add(spconv.ToDense())
     return deblock
 def __init__(self):
     super(Net, self).__init__()
     self.net = spconv.SparseSequential(
         nn.BatchNorm1d(1),
         spconv.SparseConv2d(1, 32, 3, 1),
         nn.ReLU(),
         spconv.SparseConv2d(32, 64, 3, 1),
         nn.ReLU(),
         spconv.SparseMaxPool2d(2, 2),
         spconv.ToDense(),
     )
     self.fc1 = nn.Linear(9216, 128)
     self.fc2 = nn.Linear(128, 10)
     self.dropout1 = nn.Dropout2d(0.25)
     self.dropout2 = nn.Dropout2d(0.5)
 def __init__(self, num_layers, ndim, shape, in_channels, out_channels,
              kernel_size, stride):
     super().__init__()
     self.net = spconv.SparseSequential(
         spconv.SparseConv3d(in_channels,
                             out_channels,
                             kernel_size,
                             stride,
                             indice_key="cp0",
                             bias=False),
         spconv.SparseInverseConv3d(out_channels,
                                    in_channels,
                                    kernel_size,
                                    indice_key="cp0",
                                    bias=False),
     )
     self.todense = spconv.ToDense()
     self.shape = shape
Beispiel #10
0
	def __init__(self, shape):
		super().__init__()
		
		# For some reason the batch normalization is screwing things up idk why.
		# I heard it fixes itself when you do model.eval rather than just getting a single output
		
		self.net = spconv.SparseSequential(            
			spconv.SparseConv3d(1, 32, 4),
			nn.LeakyReLU(),
			spconv.SparseConv3d(32, 32, 4),
			nn.LeakyReLU(),
			spconvpool.SparseMaxPool(3, 5),
			
			spconv.SparseConv3d(32, 32, 4),
			nn.LeakyReLU(),
			spconv.SparseConv3d(32, 32, 4),
			nn.LeakyReLU(),
			spconvpool.SparseMaxPool(3, 5),

			spconv.SparseConv3d(32, 32, 4),
			nn.LeakyReLU(),
			spconv.SparseConv3d(32, 32, 4),
			nn.LeakyReLU(),
			spconv.SparseConv3d(32, 1, 4),
			nn.LeakyReLU(),
			spconvpool.SparseMaxPool(3, 5),
			
			spconv.ToDense(),
			
			nn.Flatten(),
			
			nn.Linear(14688, 1000),
			nn.LeakyReLU(),
			nn.Linear(1000, 1000),
			nn.LeakyReLU(),
			nn.Linear(1000, 1),
			nn.LeakyReLU(),
#             nn.Sigmoid()
		)        
		self.shape = shape
Beispiel #11
0
 def generate_block(self,
                    in_dim,
                    out_dim,
                    ksize,
                    stride,
                    padding,
                    do_subm=True):
     block = spconv.SparseSequential(
         spconv.SubMConv3d(in_channels=in_dim,
                           out_channels=out_dim,
                           kernel_size=1,
                           stride=1,
                           indice_key="subm0"),
         nn.BatchNorm1d(num_features=out_dim),
         nn.LeakyReLU(),
         spconv.SubMConv3d(in_channels=in_dim,
                           out_channels=out_dim,
                           kernel_size=3,
                           stride=1,
                           padding=1,
                           indice_key='subm0')
         if do_subm else spconv.SparseConv3d(in_channels=in_dim,
                                             out_channels=out_dim,
                                             kernel_size=3,
                                             stride=1,
                                             padding=1),
         nn.BatchNorm1d(num_features=out_dim),
         nn.LeakyReLU(),
         spconv.SubMConv3d(in_channels=in_dim,
                           out_channels=out_dim,
                           kernel_size=1,
                           stride=1,
                           indice_key="subm0"),
         nn.BatchNorm1d(num_features=out_dim),
         spconv.ToDense(),
     )
     return block
Beispiel #12
0
 def __init__(self,
              in_planes,
              out_planes=2,
              kernel_size=3,
              n_conv=1,
              n_point=3,
              conv_position=3,
              pointwise_factor=0.8,
              batchnorm=True):
     """
     @type out_planes: int
     """
     super(SparseConv2DForEZ, self).__init__()
     self.log = logging.getLogger(__name__)
     layers = []
     n_layers = n_conv + n_point
     if n_conv > 0:
         if conv_position < 1:
             raise ValueError("conv position must be >= 1 if n_conv > 0")
     if n_point > 0:
         if n_layers == 1:
             raise ValueError(
                 "n_layers must be > 1 if using pointwise convolution")
         increment = int(
             round(
                 int(round(in_planes * pointwise_factor - out_planes)) /
                 float(n_layers - 1)))
     else:
         increment = int(
             round(float(in_planes - out_planes) / float(n_layers)))
     if kernel_size % 2 != 1:
         raise ValueError("Kernel size must be an odd integer")
     if not isinstance(n_layers, int) or n_layers < 1:
         raise ValueError("n_layers must be  integer >= 1")
     out = copy(in_planes)
     inp = copy(in_planes)
     curr_kernel = copy(kernel_size)
     if n_conv > 0:
         conv_positions = [
             i for i in range(conv_position - 1, conv_position - 1 + n_conv)
         ]
     else:
         conv_positions = []
     for i in range(n_layers):
         if i == (n_layers - 1):
             out = copy(out_planes)
         else:
             out -= increment
             if i == 0 and n_point > 0:
                 if pointwise_factor > 0:
                     out = int(round(pointwise_factor * in_planes))
         if not i in conv_positions:
             curr_kernel = 1
         else:
             curr_kernel = kernel_size - int((i + 1 - conv_position) * 2)
             if curr_kernel < 3:
                 curr_kernel = 3
         if curr_kernel % 2 == 0:
             raise ValueError("error: kernel size is even")
         pd = int((curr_kernel - 1) / 2)
         self.log.debug(
             "appending layer {0} -> {1} planes, kernel size of {2}, padding of {3}"
             .format(inp, out, curr_kernel, pd))
         layers.append(spconv.SparseConv2d(inp, out, curr_kernel, 1, pd))
         if i != (n_layers - 1):
             if batchnorm:
                 layers.append(nn.BatchNorm1d(out))
         layers.append(nn.ReLU())
         inp = out
     layers.append(spconv.ToDense())
     self.network = spconv.SparseSequential(*layers)
Beispiel #13
0
    def __init__(self,
                 output_shape,
                 num_input_features=128,
                 num_filters_down1=[64],
                 num_filters_down2=[64, 64],
                 use_norm=True,
                 num_class=2,
                 layer_nums=[3, 5, 5],
                 layer_strides=[2, 2, 2],
                 num_filters=[128, 128, 256],
                 upsample_strides=[1, 2, 4],
                 num_upsample_filters=[256, 256, 256],
                 num_anchor_per_loc=2,
                 encode_background_as_zeros=True,
                 use_direction_classifier=True,
                 use_groupnorm=False,
                 num_groups=32,
                 use_bev=False,
                 box_code_size=7,
                 use_rc_net=False,
                 name='sparse_rpn'):
        super(SparseRPN, self).__init__()
        self._num_anchor_per_loc = num_anchor_per_loc
        self._use_direction_classifier = use_direction_classifier
        self.name = name
        if use_norm:
            BatchNorm2d = change_default_args(eps=1e-3,
                                              momentum=0.01)(nn.BatchNorm2d)
            BatchNorm1d = change_default_args(eps=1e-3,
                                              momentum=0.01)(nn.BatchNorm1d)
            Conv2d = change_default_args(bias=False)(nn.Conv2d)
            SpConv3d = change_default_args(bias=False)(spconv.SparseConv3d)
            SubMConv3d = change_default_args(bias=False)(spconv.SubMConv3d)
            ConvTranspose2d = change_default_args(bias=False)(
                nn.ConvTranspose2d)
        else:
            BatchNorm2d = Empty
            BatchNorm1d = Empty
            Conv2d = change_default_args(bias=True)(nn.Conv2d)
            SpConv3d = change_default_args(bias=True)(spconv.SparseConv3d)
            SubMConv3d = change_default_args(bias=True)(spconv.SubMConv3d)
            ConvTranspose2d = change_default_args(bias=True)(
                nn.ConvTranspose2d)
        sparse_shape = np.array(output_shape[1:4]) + [1, 0, 0]
        # sparse_shape[0] = 11
        print(sparse_shape)
        self.sparse_shape = sparse_shape
        self.voxel_output_shape = output_shape
        # [11, 400, 352]
        self.block1 = spconv.SparseSequential(
            SpConv3d(num_input_features,
                     num_filters[0],
                     3,
                     stride=[2, layer_strides[0], layer_strides[0]],
                     padding=[0, 1, 1]), BatchNorm1d(num_filters[0]),
            nn.ReLU())
        # [5, 200, 176]
        for i in range(layer_nums[0]):
            self.block1.add(
                SubMConv3d(num_filters[0],
                           num_filters[0],
                           3,
                           padding=1,
                           indice_key="subm0"))
            self.block1.add(BatchNorm1d(num_filters[0]))
            self.block1.add(nn.ReLU())

        self.deconv1 = spconv.SparseSequential(
            SpConv3d(num_filters[0],
                     num_filters[0], (3, 1, 1),
                     stride=(2, 1, 1)), BatchNorm1d(num_filters[0]), nn.ReLU(),
            SpConv3d(num_filters[0],
                     num_upsample_filters[0], (2, 1, 1),
                     stride=1), BatchNorm1d(num_upsample_filters[0]),
            nn.ReLU(), spconv.ToDense(), Squeeze())  # [1, 200, 176]

        # [5, 200, 176]
        self.block2 = spconv.SparseSequential(
            SpConv3d(num_filters[0],
                     num_filters[1],
                     3,
                     stride=[2, layer_strides[1], layer_strides[1]],
                     padding=[0, 1, 1]), BatchNorm1d(num_filters[1]),
            nn.ReLU())

        for i in range(layer_nums[1]):
            self.block2.add(
                SubMConv3d(num_filters[1],
                           num_filters[1],
                           3,
                           padding=1,
                           indice_key="subm1"))
            self.block2.add(BatchNorm1d(num_filters[1]))
            self.block2.add(nn.ReLU())
        # [2, 100, 88]
        self.deconv2 = spconv.SparseSequential(
            SpConv3d(num_filters[1], num_filters[1], (2, 1, 1), stride=1),
            BatchNorm1d(num_filters[1]), nn.ReLU(), spconv.ToDense(),
            Squeeze(),
            ConvTranspose2d(num_filters[1],
                            num_upsample_filters[1],
                            upsample_strides[1],
                            stride=upsample_strides[1]),
            BatchNorm2d(num_upsample_filters[1]), nn.ReLU())  # [1, 200, 176]

        self.block3 = spconv.SparseSequential(
            SpConv3d(num_filters[1],
                     num_filters[2], [2, 3, 3],
                     stride=[1, layer_strides[2], layer_strides[2]],
                     padding=[0, 1, 1]), BatchNorm1d(num_filters[2]),
            nn.ReLU())

        for i in range(layer_nums[2]):
            self.block3.add(
                SubMConv3d(num_filters[2],
                           num_filters[2],
                           3,
                           padding=1,
                           indice_key="subm2"))
            self.block3.add(BatchNorm1d(num_filters[2]))
            self.block3.add(nn.ReLU())

        self.deconv3 = Sequential(
            spconv.ToDense(),
            Squeeze(),
            ConvTranspose2d(num_filters[2],
                            num_upsample_filters[2],
                            upsample_strides[2],
                            stride=upsample_strides[2]),
            BatchNorm2d(num_upsample_filters[2]),
            nn.ReLU(),
        )  # [1, 200, 176]
        self.post = Sequential(
            Conv2d(sum(num_upsample_filters), 128, 3, stride=1, padding=1),
            BatchNorm2d(128),
            nn.ReLU(),
            Conv2d(128, 64, 3, stride=1, padding=1),
            BatchNorm2d(64),
            nn.ReLU(),
        )  # [1, 200, 176]
        if encode_background_as_zeros:
            num_cls = num_anchor_per_loc * num_class
        else:
            num_cls = num_anchor_per_loc * (num_class + 1)
        '''self.conv_cls = nn.Conv2d(sum(num_upsample_filters), num_cls, 1)
        self.conv_box = nn.Conv2d(
            sum(num_upsample_filters), num_anchor_per_loc * box_code_size, 1)
        if use_direction_classifier:
            self.conv_dir_cls = nn.Conv2d(
                sum(num_upsample_filters), num_anchor_per_loc * 2, 1)
        '''
        self.conv_cls = nn.Conv2d(64, num_cls, 1)
        self.conv_box = nn.Conv2d(64, num_anchor_per_loc * box_code_size, 1)
        if use_direction_classifier:
            self.conv_dir_cls = nn.Conv2d(64, num_anchor_per_loc * 2, 1)
Beispiel #14
0
    def __init__(self,
                 nin,
                 nout,
                 n,
                 size,
                 expansion_factor=10.,
                 size_factor=3,
                 pad_factor=0.0,
                 stride_factor=1,
                 dil_factor=1,
                 dropout=0,
                 trainable_weights=False):
        super(ExtractedFeatureConv, self).__init__()
        assert (n > 1)
        self.alg = []
        self.out_size = size
        self.dropout = dropout
        self.log = logging.getLogger(__name__)
        self.log.debug(
            "Initializing convolution block with nin {0}, nout {1}, size {2}".
            format(nin, nout, size))
        self.ndim = len(size) - 1
        nframes = [nin, int(round(nin * expansion_factor))]
        diff = float(nframes[1] - nout) / (n - 1)
        nframes += [int(floor(nframes[1] - diff * i)) for i in range(n - 1)]
        for i in range(n):
            decay_factor = 1. - (i - 1) / (n - 1)
            fs = int(floor(size_factor / (i + 1.)))
            if fs < 2:
                fs = 2
            st = int(round(stride_factor * i / (n - 1)))
            if st < 1:
                st = 1
            dil = int(round(dil_factor**i))
            pd = int(round(pad_factor * (fs - 1) * dil_factor * decay_factor))
            self.alg.append(
                spconv.SparseConv2d(nframes[i], nframes[i + 1], fs, st, pd,
                                    dil, 1, trainable_weights))
            self.log.debug(
                "added regular convolution, frames: {0} -> {1}".format(
                    nframes[i], nframes[i + 1]))
            self.log.debug(
                "filter size: {0}, stride: {1}, pad: {2}, dil: {3}".format(
                    fs, st, pd, dil))
            self.alg.append(nn.BatchNorm1d(nframes[i + 1]))
            self.alg.append(nn.ReLU())
            if self.dropout:
                self.alg.append(nn.Dropout(self.dropout))
            arg_dict = {
                DIM: self.ndim,
                NIN: nframes[i],
                NOUT: nframes[i + 1],
                FS: [fs] * 4,
                STR: [st] * 4,
                PAD: [pd] * 4,
                DIL: [dil] * 4
            }
            self.out_size = ModelValidation.calc_output_size(
                arg_dict, self.out_size, "cur", "prev", self.ndim)
            self.log.debug("Loop {0}, output size is {1}".format(
                i, self.out_size))

        self.alg.append(spconv.ToDense())
        self.network = spconv.SparseSequential(*self.alg)
Beispiel #15
0
    def __init__(self, in_channel, pred_dim, chans=64):
        super(SparseNet3D, self).__init__()
        conv3d = []
        up_bn = []  #batch norm layer for deconvolution
        conv3d_transpose = []

        # self.conv3d = torch.nn.Conv3d(4, 32, (4,4,4), stride=(2,2,2), padding=(1,1,1))
        # self.layers = []
        self.down_in_dims = [in_channel, chans, 2 * chans]
        self.down_out_dims = [chans, 2 * chans, 4 * chans]
        self.down_ksizes = [4, 4, 4]
        self.down_strides = [2, 2, 2]
        padding = 1  #Note: this only holds for ksize=4 and stride=2!
        print('down dims: ', self.down_out_dims)

        for i, (in_dim, out_dim, ksize, stride) in enumerate(
                zip(self.down_in_dims, self.down_out_dims, self.down_ksizes,
                    self.down_strides)):
            # print('3D CONV', end=' ')

            conv3d += [
                spconv.SparseConv3d(in_channels=in_dim,
                                    out_channels=out_dim,
                                    kernel_size=ksize,
                                    stride=stride,
                                    padding=padding),
                nn.LeakyReLU(),
                nn.BatchNorm1d(num_features=out_dim),
            ]

        # self.conv3d = nn.ModuleList(conv3d)
        self.conv3d = spconv.SparseSequential(*conv3d)

        self.up_in_dims = [4 * chans, 6 * chans]
        self.up_bn_dims = [6 * chans, 3 * chans]
        self.up_out_dims = [4 * chans, 2 * chans]
        self.up_ksizes = [4, 4]
        self.up_strides = [2, 2]
        padding = 1  #Note: this only holds for ksize=4 and stride=2!
        print('up dims: ', self.up_out_dims)

        for i, (in_dim, bn_dim, out_dim, ksize, stride) in enumerate(
                zip(self.up_in_dims, self.up_bn_dims, self.up_out_dims,
                    self.up_ksizes, self.up_strides)):

            conv3d_transpose += [
                spconv.SparseConvTranspose3d(in_channels=in_dim,
                                             out_channels=out_dim,
                                             kernel_size=ksize,
                                             stride=stride,
                                             padding=padding),
                nn.LeakyReLU(),
                nn.BatchNorm1d(num_features=out_dim),
            ]
            up_bn.append(nn.BatchNorm1d(num_features=bn_dim))

        # final 1x1x1 conv to get our desired pred_dim
        self.final_feature = spconv.SparseSequential(
            spconv.ToDense(),
            nn.Conv3d(in_channels=2 * chans,
                      out_channels=pred_dim,
                      kernel_size=1,
                      stride=1,
                      padding=0))
        self.conv3d_transpose = spconv.SparseSequential(*conv3d_transpose)
        self.up_bn = nn.ModuleList(up_bn)
Beispiel #16
0
    def _version1(self,
                  nin,
                  nout,
                  n,
                  size,
                  to_dense,
                  size_factor=3,
                  pad_factor=0.0,
                  stride_factor=1,
                  dil_factor=1,
                  pointwise_factor=0,
                  depth_factor=0,
                  dropout=0,
                  trainable_weights=False):
        assert (n > 0)
        self.alg = []
        self.out_size = size
        self.dropout = dropout
        self.log = logging.getLogger(__name__)
        self.log.debug(
            "Initializing convolution block with nin {0}, nout {1}, size {2}".
            format(nin, nout, size))
        self.ndim = len(size) - 1
        if nin != nout:
            if pointwise_factor > 0:
                nframes = [
                    nin, nin - int(floor((nin - nout) * pointwise_factor))
                ]
                if n > 1:
                    diff = float(nin - nout) / n
                    for i in range(n - 1):
                        val = int(floor(nframes[-1] - diff))
                        if val > nout:
                            nframes += [val]
                        else:
                            nframes += [nout]
            elif depth_factor > 0:
                nframes = [nin, int(nin * depth_factor)]
                if n > 1:
                    diff = float(nframes[-1] - nout) / (n - 1)
                    for i in range(n - 1):
                        val = int(floor(nframes[-1] - diff))
                        if val > nout:
                            nframes += [val]
                        else:
                            nframes += [nout]
            else:
                diff = float(nin - nout) / n
                nframes = [int(floor(nin - diff * i)) for i in range(n + 1)]
        else:
            nframes = [nin] * (n + 1)
        for i in range(n):
            if pointwise_factor > 0:
                if n > 1:
                    decay_factor = 1. - (i - 1) / (n - 1)
                else:
                    decay_factor = 1.
            else:
                if n > 1:
                    decay_factor = 1. - i / (n - 1)
                else:
                    decay_factor = 1.
            fs = int(floor(size_factor / (i + 1.)))
            if fs < 2:
                fs = 2
            st = int(round(stride_factor * i / (n - 1)))
            if st < 1:
                st = 1
            dil = int(round(dil_factor**i))
            pd = int(round(pad_factor * (fs - 1) * dil_factor * decay_factor))
            if i == 0 and pointwise_factor > 0:
                pd, fs, dil, st = 0, 1, 1, 1
                self.alg.append(
                    spconv.SparseConv2d(nframes[i], nframes[i + 1], fs, st, pd,
                                        dil, 1, trainable_weights))
                # self.alg.append(spconv.SubMConv2d(nframes[i], nframes[i + 1], fs, st, pd, dil, 1, trainable_weights))
                self.log.debug(
                    "added pointwise convolution, frames: {0} -> {1}".format(
                        nframes[i], nframes[i + 1]))
            else:
                self.alg.append(
                    spconv.SparseConv2d(nframes[i], nframes[i + 1], fs, st, pd,
                                        dil, 1, trainable_weights))
                self.log.debug(
                    "added regular convolution, frames: {0} -> {1}".format(
                        nframes[i], nframes[i + 1]))
            self.log.debug(
                "filter size: {0}, stride: {1}, pad: {2}, dil: {3}".format(
                    fs, st, pd, dil))
            self.alg.append(nn.BatchNorm1d(nframes[i + 1]))
            self.alg.append(nn.ReLU())
            if self.dropout:
                self.alg.append(nn.Dropout(self.dropout))
            arg_dict = {
                DIM: self.ndim,
                NIN: nframes[i],
                NOUT: nframes[i + 1],
                FS: [fs] * 4,
                STR: [st] * 4,
                PAD: [pd] * 4,
                DIL: [dil] * 4
            }
            self.out_size = ModelValidation.calc_output_size(
                arg_dict, self.out_size, "cur", "prev", self.ndim)
            self.log.debug("Loop {0}, output size is {1}".format(
                i, self.out_size))

        if to_dense:
            self.alg.append(spconv.ToDense())
        self.func = spconv.SparseSequential(*self.alg)
Beispiel #17
0
    def _version3(self,
                  nin,
                  nout,
                  n,
                  size,
                  to_dense,
                  size_factor=3,
                  pad_factor=0.0,
                  stride_factor=1,
                  dil_factor=1,
                  expansion_factor=0,
                  n_expansion=0,
                  pointwise_factor=0,
                  dropout=0,
                  trainable_weights=False):
        self.alg = []
        self.out_size = size
        self.dropout = dropout
        self.log = logging.getLogger(__name__)
        self.log.debug(
            "Initializing convolution block with nin {0}, nout {1}, size {2}".
            format(nin, nout, size))
        self.ndim = len(size) - 1
        if pointwise_factor > 0:
            n_contraction = n - 1 - n_expansion
            if n_contraction < 1:
                raise ValueError("n_contraction too large, must be < n - 1")

        else:
            n_contraction = n - n_expansion
            if n_contraction < 1:
                raise ValueError("n_contraction too large, must be < n")
        nframes = [nin]
        if pointwise_factor > 0:
            nframes.append(nin - int(floor((nin - nout) * pointwise_factor)))
        if n_expansion > 0:
            nframes += _get_frame_expansion(nframes[-1], expansion_factor,
                                            n_expansion)
        if n_contraction > 0:
            nframes += _get_frame_contraction(nframes[-1], nout, n_contraction)
        for i in range(n):
            if pointwise_factor > 0:
                if n > 1:
                    decay_factor = 1. - (i - 1) / (n - 1)
                else:
                    decay_factor = 1.
            else:
                if n > 1:
                    decay_factor = 1. - i / (n - 1)
                else:
                    decay_factor = 1.
            fs = int(ceil(size_factor * decay_factor))
            if fs < 2:
                fs = 2
            st = int(round(stride_factor * i / (n - 1)))
            if st < 1:
                st = 1
            dil = int(round(dil_factor**i))
            pd = int(
                round(pad_factor * ((fs - 1) / 2.) * dil_factor *
                      decay_factor))
            if i == 0 and pointwise_factor > 0:
                pd, fs, dil, st = 0, 1, 1, 1
                self.alg.append(
                    spconv.SparseConv2d(nframes[i], nframes[i + 1], fs, st, pd,
                                        dil, 1, trainable_weights))
                # self.alg.append(spconv.SubMConv2d(nframes[i], nframes[i + 1], fs, st, pd, dil, 1, trainable_weights))
                self.log.debug(
                    "added pointwise convolution, frames: {0} -> {1}".format(
                        nframes[i], nframes[i + 1]))
            else:
                self.alg.append(
                    spconv.SparseConv2d(nframes[i], nframes[i + 1], fs, st, pd,
                                        dil, 1, trainable_weights))
                self.log.debug(
                    "added regular convolution, frames: {0} -> {1}".format(
                        nframes[i], nframes[i + 1]))
            self.log.debug(
                "filter size: {0}, stride: {1}, pad: {2}, dil: {3}".format(
                    fs, st, pd, dil))
            self.alg.append(nn.BatchNorm1d(nframes[i + 1]))
            self.alg.append(nn.ReLU())
            if self.dropout:
                self.alg.append(nn.Dropout(self.dropout))
            arg_dict = {
                DIM: self.ndim,
                NIN: nframes[i],
                NOUT: nframes[i + 1],
                FS: [fs] * 4,
                STR: [st] * 4,
                PAD: [pd] * 4,
                DIL: [dil] * 4
            }
            self.out_size = ModelValidation.calc_output_size(
                arg_dict, self.out_size, "cur", "prev", self.ndim)
            self.log.debug("Loop {0}, output size is {1}".format(
                i, self.out_size))
        if to_dense:
            self.alg.append(spconv.ToDense())
        self.func = spconv.SparseSequential(*self.alg)