def __init__(self, inputshape, reps, nin_features, nout_features, nplanes, show_sizes): nn.Module.__init__(self) """ inputs ------ inputshape [list of int]: dimensions of the matrix or image reps [int]: number of residual modules per layer (for both encoder and decoder) nin_features [int]: number of features in the first convolutional layer nout_features [int]: number of features that feed into the regression layer nPlanes [int]: the depth of the U-Net show_sizes [bool]: if True, print sizes while running forward """ self._mode = 0 self._dimension = 2 self._inputshape = inputshape if len(self._inputshape) != self._dimension: raise ValueError( "expected inputshape to contain size of 2 dimensions only." + "given %d values" % (len(self._inputshape))) self._reps = reps self._nin_features = nin_features self._nout_features = nout_features self._nplanes = [ nin_features, 2 * nin_features, 3 * nin_features, 4 * nin_features, 5 * nin_features ] self._show_sizes = show_sizes self.sparseModel = scn.Sequential().add( scn.InputLayer( self._dimension, self._inputshape, mode=self._mode)).add( scn.SubmanifoldConvolution( self._dimension, 1, self._nin_features, 3, False)).add( scn.UNet( self._dimension, self._reps, self._nplanes, residual_blocks=True, downsample=[2, 2])).add( scn.BatchNormReLU(self._nin_features)).add( scn.OutputLayer(self._dimension)) self.input = scn.InputLayer(self._dimension, self._inputshape, mode=self._mode) self.conv1 = scn.SubmanifoldConvolution(self._dimension, 1, self._nin_features, 3, False) self.unet = scn.UNet(self._dimension, self._reps, self._nplanes, residual_blocks=True, downsample=[2, 2]) self.batchnorm = scn.BatchNormReLU(self._nin_features) self.output = scn.OutputLayer(self._dimension) self.conv2 = scn.SubmanifoldConvolution(self._dimension, self._nin_features, 1, 3, False)
def __init__(self, flags): import sparseconvnet as scn super(UResNet, self).__init__() self._flags = flags dimension = flags.DATA_DIM reps = 2 # Conv block repetition factor kernel_size = 2 # Use input_spatial_size method for other values? m = flags.URESNET_FILTERS # Unet number of features nPlanes = [i * m for i in range(1, flags.URESNET_NUM_STRIDES + 1) ] # UNet number of features per level # nPlanes = [(2**i) * m for i in range(1, num_strides+1)] # UNet number of features per level nInputFeatures = 1 self.sparseModel = scn.Sequential().add( scn.InputLayer(dimension, flags.SPATIAL_SIZE, mode=3)).add( scn.SubmanifoldConvolution( dimension, nInputFeatures, m, 3, False)).add( # Kernel size 3, no bias scn.UNet(dimension, reps, nPlanes, residual_blocks=True, downsample=[kernel_size, 2]) ).add( # downsample = [filter size, filter stride] scn.BatchNormReLU(m)).add(scn.OutputLayer(dimension)) self.linear = torch.nn.Linear(m, flags.NUM_CLASS)
def __init__(self, cfg): import sparseconvnet as scn super(UResNet, self).__init__() self._model_config = cfg['modules']['uresnet'] self._dimension = self._model_config.get('data_dim', 3) num_strides = self._model_config.get('num_strides', 5) spatial_size = self._model_config.get('spatial_size', 512) num_classes = self._model_config.get('num_classes', 5) m = self._model_config.get('filters', 16) # Unet number of features nInputFeatures = self._model_config.get('features', 1) reps = 2 # Conv block repetition factor kernel_size = 2 # Use input_spatial_size method for other values? nPlanes = [i * m for i in range(1, num_strides + 1) ] # UNet number of features per level # nPlanes = [(2**i) * m for i in range(1, num_strides+1)] # UNet number of features per level self.sparseModel = scn.Sequential().add( scn.InputLayer(self._dimension, spatial_size, mode=3)).add( scn.SubmanifoldConvolution( self._dimension, nInputFeatures, m, 3, False)).add( # Kernel size 3, no bias scn.UNet(self._dimension, reps, nPlanes, residual_blocks=True, downsample=[kernel_size, 2]) ).add( # downsample = [filter size, filter stride] scn.BatchNormReLU(m)).add( scn.OutputLayer(self._dimension)) self.linear = torch.nn.Linear(m, num_classes)
def __init__(self, dimension=3, size=1536, nFeatures=16, depth=5, nClasses=1): super(UResNet, self).__init__() self.dimension = dimension self.size = size self.nFeatures = nFeatures self.depth = depth self.nClasses = nClasses reps = 2 # Conv block repetition factor kernel_size = 2 # Use input_spatial_size method for other values? m = nFeatures # Unet number of features nPlanes = [i * m for i in range(1, depth + 1) ] # UNet number of features per level # nPlanes = [(2**i) * m for i in range(1, num_strides+1)] # UNet number of features per level nInputFeatures = 6 self.sparseModel = scn.Sequential().add( scn.InputLayer(dimension, size, mode=3)).add( scn.SubmanifoldConvolution( dimension, nInputFeatures, m, 3, False)).add( # Kernel size 3, no bias scn.UNet(dimension, reps, nPlanes, residual_blocks=True, downsample=[kernel_size, 2]) ).add( # downsample = [filter size, filter stride] scn.BatchNormReLU(m)).add(scn.OutputLayer(dimension)) # self.linear = torch.nn.Linear(m, nClasses) self.linear = torch.nn.Sequential(torch.nn.Linear(m, m // 2), torch.nn.ReLU(0.1), torch.nn.Linear(m // 2, nClasses))
def __init__(self, is_3d, num_strides=3, base_num_outputs=16, num_classes=3, spatialSize=192): nn.Module.__init__(self) dimension = 3 if is_3d else 2 reps = 2 # Conv block repetition factor kernel_size = 2 # Use input_spatial_size method for other values? m = base_num_outputs # Unet number of features nPlanes = [i * m for i in range(1, num_strides + 1) ] # UNet number of features per level # nPlanes = [(2**i) * m for i in range(1, num_strides+1)] # UNet number of features per level nInputFeatures = 1 self.sparseModel = scn.Sequential().add( scn.InputLayer(dimension, spatialSize, mode=3)).add( scn.SubmanifoldConvolution( dimension, nInputFeatures, m, 3, False)).add( # Kernel size 3, no bias UNet(dimension, reps, nPlanes, residual_blocks=True, downsample=[kernel_size, 2]) ).add( # downsample = [filter size, filter stride] scn.BatchNormReLU(m)).add(scn.OutputLayer(dimension)) self.linear = nn.Linear(m, num_classes)
def __init__(self): super(get_model, self).__init__() self.part_num = 3 self.resolution = 150 self.dimension = 3 self.reps = 2 #Conv block repetition factor self.m = 32 #Unet number of features self.nPlanes = [ self.m, 2 * self.m, 3 * self.m, 4 * self.m, 5 * self.m ] #UNet number of features per level self.sparseModel = scn.Sequential().add( scn.InputLayer( self.dimension, torch.LongTensor([self.resolution * 8 + 15] * 3), mode=3)).add( scn.SubmanifoldConvolution( self.dimension, 1, self.m, 3, False)).add( scn.FullyConvolutionalNet( self.dimension, self.reps, self.nPlanes, residual_blocks=False, downsample=[3, 2])).add( scn.BatchNormReLU(sum(self.nPlanes))).add( scn.OutputLayer(self.dimension)) self.nc = 64 self.linear = nn.Linear(sum(self.nPlanes), self.nc) self.convs1 = torch.nn.Conv1d(self.nc * 3, 128, 1) self.convs2 = torch.nn.Conv1d(128, 64, 1) self.convs3 = torch.nn.Conv1d(64, self.part_num, 1) self.bns1 = nn.BatchNorm1d(128) self.bns2 = nn.BatchNorm1d(64)
def __init__(self, num_dims, spatial_size_extention): super().__init__() self.output_layer = scn.OutputLayer(num_dims) self.output_roi_cut = SparseRoiCut( RawToTensorFeatureExtractorCombiner()) self.scene_roi_extra_cut = SparseRoiExtraCut( RawToFeaturesSceneFeatureExtractorCombiner()) self.spatial_size_extention = spatial_size_extention
def forward(self, x, loss_weights=None): # Fix for PL summary if loss_weights is None: loss_weights = self._loss_weights outputs = [] # print('[model] x', x[0].shape, x[1].shape, torch.max(x[0][:,0]).item(), torch.max(x[0][:,1]).item(), torch.max(x[0][:,2]).item()) # encode x, out, feats_sparse = self.encoder(x) batch_size = x.shape[0] if self.use_skip_sparse: for k in range(len(feats_sparse)): # print('[model] feats_sparse[%d]' % k, feats_sparse[k].spatial_size) feats_sparse[k] = ([ feats_sparse[k].metadata.getSpatialLocations( feats_sparse[k].spatial_size), scn.OutputLayer(3)(feats_sparse[k]) ], feats_sparse[k].spatial_size) locs, feats, out = self.dense_coarse_to_sparse(x, out, truncation=3) outputs.append(out) # print('locs, feats', locs.shape, locs.type(), feats.shape, feats.type(), x.shape) # raw_input('sdflkj') x_sparse = [locs, feats] for h in range(len(self.refinement)): if loss_weights[h + 1] > 0: if self.use_skip_sparse: x_sparse = self.concat_skip( feats_sparse[len(self.refinement) - h][0], x_sparse, feats_sparse[len(self.refinement) - h][1], batch_size) # print('[model] refine(%d) x_sparse(input)' % h, x_sparse[0].shape, torch.min(x_sparse[0]).item(), torch.max(x_sparse[0]).item()) x_sparse, occ = self.refinement[h](x_sparse) outputs.append(occ) # print('[model] refine(%d) x_sparse' % h, x_sparse[0].shape, torch.min(x_sparse[0]).item(), torch.max(x_sparse[0]).item()) else: outputs.append([[], []]) # surface prediction locs = x_sparse[0] if self.PRED_SURF and loss_weights[-1] > 0: if self.use_skip_sparse: x_sparse = self.concat_skip(feats_sparse[0][0], x_sparse, feats_sparse[0][1], batch_size) x_sparse = self.surfacepred(x_sparse) # print('[model] surfpred x_sparse', x_sparse.shape) # #DEBUG SANITY - check batching same # print('locs', locs.shape) # print('x_sparse', x_sparse.shape) # for b in [0,1,2]: # batchmask = locs[:,3] == b # batchlocs = locs[batchmask] # batchfeats = x_sparse[batchmask] # print('[%d] batchlocs' % b, batchlocs.shape, torch.min(batchlocs[:,:-1]).item(), torch.max(batchlocs[:,:-1]).item(), # torch.sum(batchlocs[:,:-1]).item()) # print('[%d] batchfeats' % b, batchfeats.shape, torch.min(batchfeats).item(), torch.max(batchfeats).item(), torch.sum(batchfeats).item()) # raw_input('sdlfkj') # #DEBUG SANITY - check batching same return [locs, x_sparse], outputs return [[], []], outputs
def __init__(self): nn.Module.__init__(self) self.sparseModel = scn.Sequential().add( scn.InputLayer(dimension, spatialSize, mode=3)).add( scn.SubmanifoldConvolution(dimension, 1, m, 3, False)).add( scn.UNet(dimension, reps, nPlanes, residual_blocks=False, downsample=[2,2])).add( scn.BatchNormReLU(m)).add( scn.OutputLayer(dimension)) self.linear = nn.Linear(m, classes_total)
def __init__(self, nrows, ncols, calc_consistency=True, intersectiondata=None, larcv_version=None, nsource_wires=3456, ntarget_wires=2400, goodrange=None, return_pos_images=False, reduce=True): super(SparseLArFlow3DConsistencyLoss, self).__init__() """ inputs ------ nrows [int]: image rows ncols [int]" image cols intersectiondata [str]: path to rootfile which stores a table for what Y-position corresponds to some wire crossing. if none, only the flow prediction loss is used larcv_version [None or int]: 1 or 2 for larcv 1 or 2 nsource_wires [int]: don't remember ntarget_wires [int]: don't remember """ self.calc_consistency = calc_consistency if self.calc_consistency: IntersectUB.load_intersection_data(intersectiondatafile, larcv_version=larcv_version, nsource_wires=nsource_wires, ntarget_wires=ntarget_wires) IntersectUB.set_img_dims(nrows, ncols) if goodrange is not None: self.goodrange_t = torch.zeros((ncols, nrows), dtype=torch.float) self.goodrange_t[goodrange[0]:goodrange[1], :] = 1.0 else: self.goodrange_t = None self._return_pos_images = return_pos_images self._reduce = reduce #self.truth1_input = scn.InputLayer(2,(nrows,ncols),mode=0) #self.truth2_input = scn.InputLayer(2,(nrows,ncols),mode=0) self.outlayer1 = scn.OutputLayer(2) self.outlayer2 = scn.OutputLayer(2)
def __init__(self, class_num, full_scale, m=16, dimension=3): nn.Module.__init__(self) self.dimension = dimension self.input = scn.InputLayer(dimension, full_scale, mode=4) self.input_s = scn.InputLayer(dimension, full_scale, mode=4) self.down_in = scn.SubmanifoldConvolution(dimension, 1, m, 3, False) self.down_in_s = scn.SubmanifoldConvolution(dimension, 1, m, 3, False) self.main_block1 = self.block(m, m, 2, 1) self.main_block2 = self.block(m, 2 * m, 1, 2) self.main_block3 = self.block(2 * m, 3 * m, 1, 2) self.main_block4 = self.block(3 * m, 4 * m, 1, 2) self.main_block5 = self.block(4 * m, 5 * m, 1, 2) self.main_block6 = self.block(5 * m, 6 * m, 1, 2) self.main_block7 = self.block(6 * m, 7 * m, 2, 2) self.main_block8 = self.block(7 * m, 7 * m, 2, 1) self.support_block1 = self.block(m, m, 2, 1) self.support_block2 = self.block(m, 2 * m, 1, 2) self.support_block3 = self.block(2 * m, 3 * m, 1, 2) self.support_block4 = self.block(3 * m, 4 * m, 1, 2) self.support_block5 = self.block(4 * m, 5 * m, 1, 2) self.support_block6 = self.block(5 * m, 6 * m, 1, 2) self.support_block7 = self.block(6 * m, 7 * m, 1, 2) self.support_block8 = self.block(7 * m, 7 * m, 1, 1) self.support_block2_tune = self.guide_tune(dimension, 2 * m, 2 * m, 1, False) self.support_block2_out = GlobalMeanAttentionPooling(dimension) self.support_block3_tune = self.guide_tune(dimension, 4 * m, 4 * m, 1, False) self.support_block3_out = GlobalMeanAttentionPooling(dimension) self.support_block4_tune = self.guide_tune(dimension, 7 * m, 7 * m, 1, False) self.support_block4_out = GlobalMeanAttentionPooling(dimension) self.global_add2 = GlobalMaskLayer(dimension) self.global_add3 = GlobalMaskLayer(dimension) self.global_add4 = GlobalMaskLayer(dimension) self.spatial_pick = DistMatchLayer_v4(dimension, 7 * m, topk=3) # self.join_sub = scn.JoinTable() self.tune_sub = self.guide_tune(dimension, 14 * m, 7 * m, 1, False) self.deconv7 = self.decoder(7 * m, 6 * m) self.join6 = scn.JoinTable() self.deconv6 = self.decoder(12 * m, 5 * m) self.deconv5 = self.decoder(5 * m, 4 * m) self.join4 = scn.JoinTable() self.deconv4 = self.decoder(8 * m, 3 * m) self.deconv3 = self.decoder(3 * m, 2 * m) self.join2 = scn.JoinTable() self.deconv2 = self.decoder(4 * m, 2 * m) # self.deconv1 = self.decoder(2 * m, m) self.output = scn.OutputLayer(dimension) self.linear = nn.Linear(2 * m, class_num)
def __init__(self, dimension = 3, device = 'cuda', spatialSize = 4096, nIn = 3, nOut = 2, reps = REPS, nPlanes = NPLANES): nn.Module.__init__(self) self.sparseModel = scn.Sequential().add( scn.InputLayer(dimension, torch.LongTensor([spatialSize]*3), mode=3)).add( scn.SubmanifoldConvolution(dimension, nIn, m, 3, False)).add( scn.UNet(dimension, reps, nPlanes, residual_blocks=False, downsample=[2,2])).add( scn.BatchNormReLU(m)).add( scn.OutputLayer(dimension)).to(device) self.inputLayer = scn.InputLayer(dimension, torch.LongTensor([spatialSize]*3), mode=3) self.linear = nn.Linear(m, nOut).to(device)
def __init__( self, in_channels, m=16, # number of unet features (multiplied in each layer) block_reps=1, # depth residual_blocks=False, # ResNet style basic blocks full_scale=4096, num_planes=7, DIMENSION=3, downsample=[2, 2], leakiness=0, n_input_planes=-1, lout=5, ): super(UNetSCNContra, self).__init__() # parameters self.in_channels = in_channels self.out_channels = m self.block_reps = block_reps self.residual_blocks = residual_blocks self.full_scale = full_scale self.n_planes = [(n + 1) * m for n in range(num_planes)] self.dimension = DIMENSION self.downsample = downsample self.leakiness = leakiness self.lout = lout # self.n_input_planes = n_input_planes # before U-Net self.input_layer = scn.InputLayer(DIMENSION, full_scale, mode=4) self.SC1 = scn.SubmanifoldConvolution(DIMENSION, in_channels, self.out_channels, 3, False) # U-Net self.enc_convs, self.middle_conv, self.dec_convs = self.iter_unet( n_input_planes) # after U-Net self.BNReLU = scn.BatchNormReLU(self.out_channels) self.inter_out_layer = scn.OutputLayer(self.dimension) self.output_layer = scn.OutputLayer(self.dimension)
def __init__(self, num_dims, sparse, stride_list, channels_list, num_classes): super().__init__() channels = channels_list[-1] (self.sparse, stride, channels, self.channel_changer) = get_channel_changer(num_dims, sparse, channels, num_classes) self.output_layer = (scn.OutputLayer(num_dims) if sparse else nn.Identity())
def __init__(self): nn.Module.__init__(self) self.input0 = scn.InputLayer(data.dimension, data.full_scale, mode=4) self.sparseModel = spconv.SparseSequential( spconv.SubMConv3d(3, m, 3, bias=False, indice_key="start_")).add( nn.BatchNorm1d(m, eps=1e-3, momentum=0.01)).add(nn.ReLU()).add( UNet_vgg( block_reps, [m, 2 * m, 3 * m, 4 * m, 5 * m, 6 * m, 7 * m], residual_blocks)) # [m, 2*m, 3*m, 4*m, 5*m, 6*m, 7*m] self.out0 = scn.OutputLayer(data.dimension) self.linear = nn.Linear(m, 20)
def __init__(self): nn.Module.__init__(self) self.sparseModel = scn.Sequential().add( scn.InputLayer(data.dimension, data.full_scale, mode=4)).add( scn.SubmanifoldConvolution( data.dimension, 3, m, 3, False)).add( scn.UNet(data.dimension, block_reps, [m, 2 * m, 3 * m, 4 * m, 5 * m, 6 * m, 7 * m], residual_blocks)).add( scn.BatchNormReLU(m)).add( scn.OutputLayer(data.dimension)) self.linear = nn.Linear(m, 20)
def __init__(self, inputshape, reps, nfeatures, nplanes, noutput_classes): nn.Module.__init__(self) # set parameters self.dimensions = 2 # not playing with 3D for now # input shape: LongTensor, tuple, or list. Handled by InputLayer # size of each spatial dimesion self.inputshape = inputshape if len(self.inputshape) != self.dimensions: raise ValueError( "expected inputshape to contain size of 2 dimensions only. given %d values" % (len(self.inputshape))) # mode variable: how to deal with repeated data self.mode = 0 # nfeatures self.nfeatures = nfeatures # plane structure self.nPlanes = [self.nfeatures * (n + 1) for n in xrange(nplanes)] # repetitions (per plane) self.reps = reps # output classes self.noutput_classes = noutput_classes # model: # input # stem # unet # linear to nclasses self.sparseModel = scn.Sequential().add( scn.InputLayer( self.dimensions, self.inputshape, mode=self.mode)).add( scn.SubmanifoldConvolution( self.dimensions, 1, self.nfeatures, 3, False)).add( scn.UNet( self.dimensions, self.reps, self.nPlanes, residual_blocks=True, downsample=[ 2, 2 ])).add(scn.BatchNormReLU(self.nfeatures)).add( scn.SubmanifoldConvolution( self.dimensions, self.nfeatures, self.noutput_classes, 1, False)).add( scn.OutputLayer(self.dimensions)) self.softmax = torch.nn.Softmax(dim=1)
def __init__(self): nn.Module.__init__(self) self.sparseModel = scn.Sequential().add( scn.InputLayer(dimension, data.spatialSize, mode=3)).add( scn.SubmanifoldConvolution(dimension, 1, m, 3, False)).add( scn.FullyConvolutionalNet( dimension, reps, nPlanes, residual_blocks=False, downsample=[3, 2])).add(scn.BatchNormReLU( sum(nPlanes))).add(scn.OutputLayer(dimension)) self.linear = nn.Linear(sum(nPlanes), data.nClassesTotal)
def __init__(self, Feature_channel, num_class): nn.Module.__init__(self) dimension = 3 self.compress_fn = nn.Linear(Feature_channel, dimension) self.bn = nn.BatchNorm1d(3) self.sparseModel = scn.Sequential().add( scn.InputLayer(dimension, full_scale, mode=4)).add( scn.SubmanifoldConvolution(dimension, 3, m, 3, False)).add( scn.UNet(dimension, block_reps, [m, 2 * m, 3 * m, 4 * m, 5 * m, 6 * m, 7 * m], residual_blocks)).add(scn.BatchNormReLU(m)).add( scn.OutputLayer(dimension)) self.linear = nn.Linear(m, num_class)
def __init__(self, nf_in, nf, pass_occ, pass_feats, max_data_size, truncation=3): nn.Module.__init__(self) data_dim = 3 self.pass_occ = pass_occ self.pass_feats = pass_feats self.nf_in = nf_in self.nf = nf self.truncation = truncation self.p0 = scn.InputLayer(data_dim, max_data_size, mode=0) self.p1 = scn.SubmanifoldConvolution(data_dim, nf_in, nf, filter_size=FSIZE0, bias=False) self.p2 = scn.FullyConvolutionalNet(data_dim, reps=1, nPlanes=[nf, nf, nf], residual_blocks=True) self.p3 = scn.BatchNormReLU(nf * 3) self.p4 = scn.OutputLayer(data_dim) # upsampled self.n0 = scn.InputLayer(data_dim, max_data_size, mode=0) self.n1 = scn.SubmanifoldConvolution(data_dim, nf * 3, nf, filter_size=FSIZE0, bias=False) self.n2 = scn.BatchNormReLU(nf) self.n3 = scn.OutputLayer(data_dim) self.linear = nn.Linear(nf, 1) self.linearsdf = nn.Linear(nf, 1)
def __init__(self): nn.Module.__init__(self) self.sparseModel = scn.Sequential().add( scn.InputLayer(dimension, full_scale, mode=4)).add( scn.SubmanifoldConvolution(dimension, 3, m, 3, False)).add( scn.FullyConvolutionalNet( dimension, block_reps, [m, 2 * m, 3 * m, 4 * m, 5 * m, 6 * m, 7 * m], residual_blocks)).add(scn.BatchNormReLU(448)).add( scn.OutputLayer(dimension)) self.linearx = nn.Linear(448, 448) for _, para in enumerate(self.sparseModel.parameters()): para.requires_grad = False
def __init__(self): nn.Module.__init__(self) self.sparseModel = scn.Sequential().add( scn.InputLayer(dimension, torch.LongTensor([nvox] * 3), mode=3) ).add( scn.SubmanifoldConvolution(dimension, nPlanes, 16, 3, False) ).add( # scn.SparseResNet(dimension, 16, [ # ['b', 16, 2, 1], # ['b', 32, 2, 2], # ['b', 48, 2, 2], # ['b', 96, 2, 2]] )).add( scn.BatchNormReLU(16)).add(scn.OutputLayer(dimension)) self.linear = nn.Linear(16, global_Nclass)
def __init__(self, nf_in, nf, nf_out, max_data_size): nn.Module.__init__(self) data_dim = 3 self.p0 = scn.InputLayer(data_dim, max_data_size, mode=0) self.p1 = scn.SubmanifoldConvolution(data_dim, nf_in, nf, filter_size=FSIZE0, bias=False) self.p2 = scn.FullyConvolutionalNet( data_dim, reps=1, nPlanes=[nf, nf, nf], residual_blocks=True ) #nPlanes=[nf, nf*2, nf*2], residual_blocks=True) self.p3 = scn.BatchNormReLU(nf * 3) self.p4 = scn.OutputLayer(data_dim) self.linear = nn.Linear(nf * 3, nf_out)
def __init__(self, config): super().__init__() self.config = config m = config['Segmentation']['m'] input_dim = 4 if config['Segmentation']['use_coords'] else 1 self.sparseModel = scn.Sequential().add( scn.InputLayer(3, config['Segmentation']['full_scale'][1], mode=4) ).add(scn.SubmanifoldConvolution(3, input_dim, m, 3, False)).add( scn.UNet(dimension=3, reps=config['Segmentation']['block_reps'], nPlanes=[m, 2 * m, 3 * m, 4 * m, 5 * m, 6 * m, 7 * m], residual_blocks=config['Segmentation']['block_residual'], groups=config['Segmentation']['seg_groups'])).add( scn.BatchNormReLU(m)).add(scn.OutputLayer(3)) self.linear = nn.Linear(m, self.config['DATA']['classes_seg']) if self.config['Completion']['interaction']: self.shape_embedding = conv_base.Conv1d( m, m, kernel_size=1, bn=True, activation=nn.LeakyReLU(0.2))
def __init__(self, size): self.dimension = 3 self.reps = 1 #Conv block repetition factor self.m = 32 #Unet number of features self.nPlanes = [m, 2 * m, 3 * m, 4 * m, 5 * m] #UNet number of features per level nn.Module.__init__(self) self.sparseModel = scn.Sequential().add( scn.InputLayer( self.dimension, torch.LongTensor([size] * 3), mode=3)).add( scn.SubmanifoldConvolution( self.dimension, 1, self.m, 3, False)).add( scn.UNet(self.dimension, self.reps, self.nPlanes, residual_blocks=False, downsample=[2, 2])).add( scn.BatchNormReLU(self.m)).add( scn.OutputLayer(self.dimension)) self.linear = nn.Linear(m, 2)
def __init__(self, in_channels, m=16, # number of unet features (multiplied in each layer) block_reps=1, # depth residual_blocks=False, # ResNet style basic blocks full_scale=4096, num_planes=7 ): super(UNetSCN, self).__init__() self.in_channels = in_channels self.out_channels = m n_planes = [(n + 1) * m for n in range(num_planes)] self.sparseModel = scn.Sequential().add( scn.InputLayer(DIMENSION, full_scale, mode=4)).add( scn.SubmanifoldConvolution(DIMENSION, in_channels, m, 3, False)).add( scn.UNet(DIMENSION, block_reps, n_planes, residual_blocks)).add( scn.BatchNormReLU(m)).add( scn.OutputLayer(DIMENSION))
def __init__(self, options): nn.Module.__init__(self) self.options = options dimension = 3 self.input_layer = scn.InputLayer(dimension, options.inputScale, mode=4) self.conv = scn.SubmanifoldConvolution(dimension, 1, options.numNeighbors, 3, bias=False) self.pool_1 = scn.AveragePooling(dimension, 2, 2) self.pool_2 = scn.AveragePooling(dimension, 4, 4) self.pool_3 = scn.AveragePooling(dimension, 8, 8) self.pool_4 = scn.AveragePooling(dimension, 16, 16) self.pool_5 = scn.AveragePooling(dimension, 32, 32) self.unpool_1 = scn.UnPooling(dimension, 2, 2) self.unpool_2 = scn.UnPooling(dimension, 4, 4) self.unpool_3 = scn.UnPooling(dimension, 8, 8) self.unpool_4 = scn.UnPooling(dimension, 16, 16) self.unpool_5 = scn.UnPooling(dimension, 32, 32) with torch.no_grad(): weight = torch.zeros(27, 1, options.numNeighbors).cuda() if options.numNeighbors == 6: offsets = [4, 22, 10, 16, 12, 14] pass for index, offset in enumerate(offsets): weight[offset, 0, index] = 1 continue self.conv.weight = nn.Parameter(weight) pass self.output_layer = scn.OutputLayer(dimension) return
def __init__(self, cfg): super(PPNUResNet, self).__init__() import sparseconvnet as scn self._model_config = cfg['modules']['uresnet_ppn_type'] self._dimension = self._model_config.get('data_dim', 3) nInputFeatures = self._model_config.get('features', 1) spatial_size = self._model_config.get('spatial_size', 512) num_classes = self._model_config.get('num_classes', 5) m = self._model_config.get('filters', 16) # Unet number of features num_strides = self._model_config.get('num_strides', 5) reps = 2 # Conv block repetition factor kernel_size = 2 # Use input_spatial_size method for other values? nPlanes = [i * m for i in range(1, num_strides + 1) ] # UNet number of features per level # nPlanes = [(2**i) * m for i in range(1, num_strides+1)] # UNet number of features per level downsample = [kernel_size, 2] # downsample = [filter size, filter stride] self.last = None leakiness = 0 def block(m, a, b): # ResNet style blocks m.add(scn.ConcatTable().add(scn.Identity( ) if a == b else scn.NetworkInNetwork(a, b, False)).add( scn.Sequential().add( scn.BatchNormLeakyReLU(a, leakiness=leakiness)).add( scn.SubmanifoldConvolution( self._dimension, a, b, 3, False)).add( scn.BatchNormLeakyReLU( b, leakiness=leakiness)).add( scn.SubmanifoldConvolution( self._dimension, b, b, 3, False)))).add(scn.AddTable()) self.input = scn.Sequential().add( scn.InputLayer(self._dimension, spatial_size, mode=3)).add( scn.SubmanifoldConvolution(self._dimension, nInputFeatures, m, 3, False)) # Kernel size 3, no bias self.concat = scn.JoinTable() # Encoding self.bn = scn.BatchNormLeakyReLU(nPlanes[0], leakiness=leakiness) # self.encoding = [] self.encoding_block = scn.Sequential() self.encoding_conv = scn.Sequential() module = scn.Sequential() for i in range(num_strides): module = scn.Sequential() for _ in range(reps): block(module, nPlanes[i], nPlanes[i]) self.encoding_block.add(module) module2 = scn.Sequential() if i < num_strides - 1: module2.add( scn.BatchNormLeakyReLU( nPlanes[i], leakiness=leakiness)).add( scn.Convolution(self._dimension, nPlanes[i], nPlanes[i + 1], downsample[0], downsample[1], False)) # self.encoding.append(module) self.encoding_conv.add(module2) self.encoding = module # Decoding self.decoding_conv, self.decoding_blocks = scn.Sequential( ), scn.Sequential() for i in range(num_strides - 2, -1, -1): module1 = scn.Sequential().add( scn.BatchNormLeakyReLU(nPlanes[i + 1], leakiness=leakiness)).add( scn.Deconvolution( self._dimension, nPlanes[i + 1], nPlanes[i], downsample[0], downsample[1], False)) self.decoding_conv.add(module1) module2 = scn.Sequential() for j in range(reps): block(module2, nPlanes[i] * (2 if j == 0 else 1), nPlanes[i]) self.decoding_blocks.add(module2) self.output = scn.Sequential().add(scn.BatchNormReLU(m)).add( scn.OutputLayer(self._dimension)) self.linear = torch.nn.Linear(m, num_classes) # PPN stuff self.half_stride = int(num_strides / 2.0) self.ppn1_conv = scn.SubmanifoldConvolution(self._dimension, nPlanes[-1], nPlanes[-1], 3, False) self.ppn1_scores = scn.SubmanifoldConvolution(self._dimension, nPlanes[-1], 2, 3, False) self.selection1 = Selection() self.selection2 = Selection() self.unpool1 = scn.Sequential() for i in range(num_strides - self.half_stride - 1): self.unpool1.add( scn.UnPooling(self._dimension, downsample[0], downsample[1])) self.unpool2 = scn.Sequential() for i in range(self.half_stride): self.unpool2.add( scn.UnPooling(self._dimension, downsample[0], downsample[1])) middle_filters = int(m * self.half_stride * (self.half_stride + 1) / 2.0) self.ppn2_conv = scn.SubmanifoldConvolution(self._dimension, middle_filters, middle_filters, 3, False) self.ppn2_scores = scn.SubmanifoldConvolution(self._dimension, middle_filters, 2, 3, False) self.multiply1 = Multiply() self.multiply2 = Multiply() self.ppn3_conv = scn.SubmanifoldConvolution(self._dimension, nPlanes[0], nPlanes[0], 3, False) self.ppn3_pixel_pred = scn.SubmanifoldConvolution( self._dimension, nPlanes[0], self._dimension, 3, False) self.ppn3_scores = scn.SubmanifoldConvolution(self._dimension, nPlanes[0], 2, 3, False) self.ppn3_type = scn.SubmanifoldConvolution(self._dimension, nPlanes[0], num_classes, 3, False) self.add_labels1 = AddLabels() self.add_labels2 = AddLabels()
def __init__(self, cfg, name="uresnet_lonely"): super(UResNet, self).__init__() import sparseconvnet as scn if 'modules' in cfg: self.model_config = cfg['modules'][name] else: self.model_config = cfg # Whether to compute ghost mask separately or not self._dimension = self.model_config.get('data_dim', 3) reps = self.model_config.get('reps', 2) # Conv block repetition factor kernel_size = self.model_config.get('kernel_size', 2) num_strides = self.model_config.get('num_strides', 5) m = self.model_config.get('filters', 16) # Unet number of features nInputFeatures = self.model_config.get('features', 1) spatial_size = self.model_config.get('spatial_size', 512) leakiness = self.model_config.get('leak', 0.0) nPlanes = [i * m for i in range(1, num_strides + 1) ] # UNet number of features per level print("nPlanes: ", nPlanes) downsample = [kernel_size, 2] # [filter size, filter stride] self.last = None def block(m, a, b): # ResNet style blocks m.add(scn.ConcatTable().add(scn.Identity( ) if a == b else scn.NetworkInNetwork(a, b, False)).add( scn.Sequential().add( scn.BatchNormLeakyReLU(a, leakiness=leakiness)).add( scn.SubmanifoldConvolution( self._dimension, a, b, 3, False)).add( scn.BatchNormLeakyReLU( b, leakiness=leakiness)).add( scn.SubmanifoldConvolution( self._dimension, b, b, 3, False)))).add(scn.AddTable()) self.input = scn.Sequential().add( scn.InputLayer(self._dimension, spatial_size, mode=3)).add( scn.SubmanifoldConvolution(self._dimension, nInputFeatures, m, 3, False)) # Kernel size 3, no bias self.concat = scn.JoinTable() # Encoding self.bn = scn.BatchNormLeakyReLU(nPlanes[0], leakiness=leakiness) self.encoding_block = scn.Sequential() self.encoding_conv = scn.Sequential() module = scn.Sequential() for i in range(num_strides): module = scn.Sequential() for _ in range(reps): block(module, nPlanes[i], nPlanes[i]) self.encoding_block.add(module) module2 = scn.Sequential() if i < num_strides - 1: module2.add( scn.BatchNormLeakyReLU( nPlanes[i], leakiness=leakiness)).add( scn.Convolution(self._dimension, nPlanes[i], nPlanes[i + 1], downsample[0], downsample[1], False)) self.encoding_conv.add(module2) self.encoding = module # Decoding self.decoding_conv, self.decoding_blocks = scn.Sequential( ), scn.Sequential() for i in range(num_strides - 2, -1, -1): module1 = scn.Sequential().add( scn.BatchNormLeakyReLU(nPlanes[i + 1], leakiness=leakiness)).add( scn.Deconvolution( self._dimension, nPlanes[i + 1], nPlanes[i], downsample[0], downsample[1], False)) self.decoding_conv.add(module1) module2 = scn.Sequential() for j in range(reps): block(module2, nPlanes[i] * (2 if j == 0 else 1), nPlanes[i]) self.decoding_blocks.add(module2) self.output = scn.Sequential().add(scn.BatchNormReLU(m)).add( scn.OutputLayer(self._dimension))
def __init__(self, cfg, name='ynet_full'): super().__init__(cfg, name) self.model_config = cfg[name] self.num_filters = self.model_config.get('filters', 16) self.seed_dim = self.model_config.get('seed_dim', 1) self.sigma_dim = self.model_config.get('sigma_dim', 1) self.embedding_dim = self.model_config.get('embedding_dim', 3) self.inputKernel = self.model_config.get('input_kernel_size', 3) self.coordConv = self.model_config.get('coordConv', False) # YResNet Configurations # operation for mapping latent secondary features to primary features self.mapping_op = self.model_config.get('mapping_operation', 'pool') assert self.mapping_op in self.supported_mapping_ops # Network Freezing Options self.encoder_freeze = self.model_config.get('encoder_freeze', False) self.embedding_freeze = self.model_config.get('embedding_freeze', False) self.seediness_freeze = self.model_config.get('seediness_freeze', False) # Input Layer Configurations and commonly used scn operations. self.input = scn.Sequential().add( scn.InputLayer(self.dimension, self.spatial_size, mode=3)).add( scn.SubmanifoldConvolution(self.dimension, self.nInputFeatures, \ self.num_filters, self.inputKernel, self.allow_bias)) # Kernel size 3, no bias self.add = scn.AddTable() # Preprocessing logic for secondary self.t_bn = scn.BatchNormLeakyReLU(1, leakiness=self.leakiness) self.netinnet = scn.Sequential() self._resnet_block(self.netinnet, 1, self.num_filters) # Timing information max_seq_len = self.model_config.get('max_seq_len', 5) self.pe = SinusoidalPositionalEncoding(max_seq_len, 1) # Backbone YResNet. Do NOT change namings! self.primary_encoder = YResNetEncoder(cfg, name='yresnet_encoder') self.secondary_encoder = YResNetEncoder(cfg, name='yresnet_encoder') if self.mapping_op == 'conv': self.mapping = ConvolutionalFeatureMapping(self.dimension, self.nPlanes[-1], self.nPlanes[-1], 2, 2, False) elif self.mapping_op == 'pool': self.mapping = PoolFeatureMapping( self.dimension, 2, 2, ) self.seed_net = YResNetDecoder(cfg, name='seediness_decoder') self.cluster_net = YResNetDecoder(cfg, name='embedding_decoder') # Encoder-Decoder 1x1 Connections encoder_planes = [i for i in self.primary_encoder.nPlanes] cluster_planes = [i for i in self.cluster_net.nPlanes] seed_planes = [i for i in self.seed_net.nPlanes] self.skip_mode = self.model_config.get('skip_mode', 'default') self.cluster_skip = scn.Sequential() self.seed_skip = scn.Sequential() # Output Layers self.output_cluster = scn.Sequential() self._nin_block(self.output_cluster, self.cluster_net.num_filters, 4) self.output_cluster.add(scn.OutputLayer(self.dimension)) self.output_seediness = scn.Sequential() self._nin_block(self.output_seediness, self.seed_net.num_filters, 1) self.output_seediness.add(scn.OutputLayer(self.dimension)) if self.skip_mode == 'default': for p1, p2 in zip(encoder_planes, cluster_planes): self.cluster_skip.add(scn.Identity()) for p1, p2 in zip(encoder_planes, seed_planes): self.seed_skip.add(scn.Identity()) elif self.skip_mode == '1x1': for p1, p2 in zip(encoder_planes, cluster_planes): self._nin_block(self.cluster_skip, p1, p2) for p1, p2 in zip(encoder_planes, seed_planes): self._nin_block(self.seed_skip, p1, p2) else: raise ValueError('Invalid skip connection mode!') # Freeze Layers if self.encoder_freeze: for p in self.encoder.parameters(): p.requires_grad = False if self.embedding_freeze: for p in self.cluster_net.parameters(): p.requires_grad = False for p in self.output_cluster.parameters(): p.requires_grad = False if self.seediness_freeze: for p in self.seed_net.parameters(): p.requires_grad = False for p in self.output_seediness.parameters(): p.requires_grad = False # Pytorch Activations self.tanh = nn.Tanh() self.sigmoid = nn.Sigmoid()