def _make_transpose(self, transblock, planes, blocks, stride=1): upsample = None if stride != 1: upsample = scn.Sequential( scn.SparseToDense(2,self.inplanes * transblock.expansion), nn.ConvTranspose2d(self.inplanes * transblock.expansion, planes, kernel_size=2, stride=stride, padding=0, bias=False), scn.DenseToSparse(2), scn.BatchNormalization(planes) ) elif self.inplanes * transblock.expansion != planes: upsample = scn.Sequential( scn.NetworkInNetwork(self.inplanes * transblock.expansion, planes, False), scn.BatchNormalization(planes) ) layers = [] for i in range(1, blocks): layers.append(transblock(self.inplanes, self.inplanes * transblock.expansion)) layers.append(transblock(self.inplanes, planes, stride, upsample)) self.inplanes = planes // transblock.expansion return scn.Sequential(*layers)
def __init__(self, inplanes, outplanes, nplanes=1): nn.Module.__init__(self) self.conv1 = scn.SubmanifoldConvolution(dimension=3, nIn = inplanes, nOut = outplanes, filter_size = [nplanes,3,3], bias=False) # if FLAGS.BATCH_NORM: self.bn1 = scn.BatchNormReLU(outplanes) self.conv2 = scn.SubmanifoldConvolution(dimension=3, nIn = outplanes, nOut = outplanes, filter_size = [nplanes,3,3], bias = False) # if FLAGS.BATCH_NORM: self.bn2 = scn.BatchNormalization(outplanes) self.residual = scn.Identity() self.relu = scn.ReLU() self.add = scn.AddTable()
def MultiscaleShapeContext(dimension, n_features=1, n_layers=3, shape_context_size=3, downsample_size=2, downsample_stride=2, bn=True): m = sparseconvnet.Sequential() if n_layers == 1: m.add( sparseconvnet.ShapeContext(dimension, n_features, shape_context_size)) else: m.add(sparseconvnet.ConcatTable().add( sparseconvnet.ShapeContext( dimension, n_features, shape_context_size)).add( sparseconvnet.Sequential( sparseconvnet.AveragePooling(dimension, downsample_size, downsample_stride), MultiscaleShapeContext(dimension, n_features, n_layers - 1, shape_context_size, downsample_size, downsample_stride, False), sparseconvnet.UnPooling(dimension, downsample_size, downsample_stride)))).add( sparseconvnet.JoinTable()) if bn: m.add( sparseconvnet.BatchNormalization(shape_context_size**dimension * n_features * n_layers)) return m
def __init__(self, inplanes, outplanes, batch_norm, leaky_relu): nn.Module.__init__(self) self.batch_norm = batch_norm self.leaky_relu = leaky_relu self.conv1 = scn.SubmanifoldConvolution(dimension=3, nIn = inplanes, nOut = outplanes, filter_size = 3, bias=False) if self.batch_norm: if self.leaky_relu: self.bn1 = scn.BatchNormLeakyReLU(outplanes) else: self.bn1 = scn.BatchNormReLU(outplanes) self.conv2 = scn.SubmanifoldConvolution(dimension=3, nIn = outplanes, nOut = outplanes, filter_size = 3, bias = False) if self.batch_norm: self.bn2 = scn.BatchNormalization(outplanes) self.residual = scn.Identity() if self.leaky_relu: self.relu = scn.LeakyReLU() else: self.relu = scn.ReLU() self.add = scn.AddTable()
def __init__(self, *, inplanes, outplanes, nplanes=1, params): nn.Module.__init__(self) self.conv1 = scn.SubmanifoldConvolution(dimension=3, nIn=inplanes, nOut=outplanes, filter_size=[nplanes, 3, 3], bias=params.use_bias) self.do_batch_norm = False if params.batch_norm: self.do_batch_norm = True self.bn1 = scn.BatchNormReLU(outplanes) self.conv2 = scn.SubmanifoldConvolution(dimension=3, nIn=outplanes, nOut=outplanes, filter_size=[nplanes, 3, 3], bias=False) if params.batch_norm: self.bn2 = scn.BatchNormalization(outplanes) self.residual = scn.Identity() self.relu = scn.ReLU() self.add = scn.AddTable()
def __init__(self, inplanes, outplanes, nplanes=1): nn.Module.__init__(self) self.conv = scn.Convolution(dimension=3, nIn=inplanes, nOut=outplanes, filter_size=[nplanes, 2, 2], filter_stride=[1, 2, 2], bias=False) # if FLAGS.BATCH_NORM: self.bn = scn.BatchNormalization(outplanes) self.relu = scn.ReLU()
def __init__(self, *, inplanes, outplanes, nplanes=1, params): nn.Module.__init__(self) self.conv = scn.Deconvolution(dimension=3, nIn=inplanes, nOut=outplanes, filter_size=[nplanes, 2, 2], filter_stride=[1, 2, 2], bias=params.use_bias) self.do_batch_norm = False if params.batch_norm: self.do_batch_norm = True self.bn = scn.BatchNormalization(outplanes) self.relu = scn.ReLU()
def __init__(self, inplanes, outplanes, batch_norm, leaky_relu, nplanes=1): nn.Module.__init__(self) self.batch_norm = batch_norm self.leaky_relu = leaky_relu self.conv = scn.Convolution(dimension=3, nIn=inplanes, nOut=outplanes, filter_size=[nplanes, 2, 2], filter_stride=[1, 2, 2], bias=False) if self.batch_norm: self.bn = scn.BatchNormalization(outplanes) if self.leaky_relu: self.relu = scn.LeakyReLU() else: self.relu = scn.ReLU()
def __init__(self, inplanes, planes, stride=1, upsample=None, **kwargs): super(TransBasicBlockSparse, self).__init__() self.conv1 = conv3x3_sparse(inplanes, inplanes) self.bn1 = scn.BatchNormReLU(inplanes) self.relu = scn.ReLU() if upsample is not None and stride != 1: self.conv2 = scn.Sequential( scn.SparseToDense(2,inplanes), nn.ConvTranspose2d(inplanes, planes, kernel_size=2, stride=stride, padding=0, output_padding=0, bias=False), scn.DenseToSparse(2) ) else: self.conv2 = conv3x3_sparse(inplanes, planes, stride) self.bn2 = scn.BatchNormalization(planes) self.add = scn.AddTable() self.upsample = upsample self.stride = stride
def createConvBatchNormLayers(nLayers, nChannels, use_bias, use_batch_norm, dimension=2, facebook_layer=True, kernel_size=3): """Creates sparse layers""" kernels = [] if dimension == 1: asyn_conv_layer = ascn1.asynSparseConvolution1D elif dimension == 2: asyn_conv_layer = ascn2.asynSparseConvolution2D if use_bias: bias = [] if use_batch_norm: batch_norm_param = [] for i_layer in range(1, nLayers + 1): kernels.append( np.random.uniform(-10.0, 10.0, [ kernel_size**dimension, 1, nChannels[i_layer - 1], nChannels[i_layer] ])) if use_bias: bias.append(np.random.uniform(-10.0, 10.0, [nChannels[i_layer]])) if use_batch_norm: learned_scale = np.random.uniform(-1.0, 1.0, [nChannels[i_layer]]) learned_shift = np.random.uniform(-1.0, 1.0, [nChannels[i_layer]]) running_mean = np.random.uniform(-1.0, 1.0, [nChannels[i_layer]]) running_var = np.random.uniform(0.1, 1.0, [nChannels[i_layer]]) batch_norm_param.append( [learned_scale, learned_shift, running_mean, running_var]) asyn_conv_layers = [] asyn_bn_layers = [] for i_layer in range(1, nLayers + 1): asyn_conv_layers.append( asyn_conv_layer(dimension=dimension, nIn=nChannels[i_layer - 1], nOut=nChannels[i_layer], filter_size=kernel_size, first_layer=(i_layer == 1), use_bias=use_bias)) asyn_conv_layers[i_layer - 1].weight.data = torch.squeeze(torch.tensor( kernels[i_layer - 1], dtype=torch.float32), dim=1) if use_bias: asyn_conv_layers[i_layer - 1].bias.data = torch.tensor( bias[i_layer - 1], dtype=torch.float32) if use_batch_norm: asyn_bn_layers.append( torch.nn.BatchNorm1d(nChannels[i_layer - 1], eps=1e-4, momentum=0.9)) asyn_bn_layers[i_layer - 1].weight.data = torch.tensor( batch_norm_param[i_layer - 1][0], dtype=torch.float64) asyn_bn_layers[i_layer - 1].bias.data = torch.tensor( batch_norm_param[i_layer - 1][1], dtype=torch.float64) asyn_bn_layers[i_layer - 1].running_mean.data = torch.tensor( batch_norm_param[i_layer - 1][2], dtype=torch.float64) asyn_bn_layers[i_layer - 1].running_var.data = torch.tensor( batch_norm_param[i_layer - 1][3], dtype=torch.float64) asyn_bn_layers[i_layer - 1].eval() if facebook_layer: sparse_conv_layers = [] sparse_bn_layers = [] for i_layer in range(1, nLayers + 1): sparse_conv_layers.append( scn.SubmanifoldConvolution(dimension=dimension, nIn=nChannels[i_layer - 1], nOut=nChannels[i_layer], filter_size=kernel_size, bias=use_bias)) sparse_conv_layers[i_layer - 1].weight.data = torch.tensor( kernels[i_layer - 1], dtype=torch.float32) if use_bias: sparse_conv_layers[i_layer - 1].bias.data = torch.tensor( bias[i_layer - 1], dtype=torch.float32) if use_batch_norm: sparse_bn_layers.append( scn.BatchNormalization(nChannels[i_layer])) sparse_bn_layers[i_layer - 1].weight.data = torch.tensor( batch_norm_param[i_layer - 1][0], dtype=torch.float32) sparse_bn_layers[i_layer - 1].bias.data = torch.tensor( batch_norm_param[i_layer - 1][1], dtype=torch.float32) sparse_bn_layers[i_layer - 1].running_mean.data = torch.tensor( batch_norm_param[i_layer - 1][2], dtype=torch.float32) sparse_bn_layers[i_layer - 1].running_var.data = torch.tensor( batch_norm_param[i_layer - 1][3], dtype=torch.float32) sparse_bn_layers[i_layer - 1].eval() return sparse_conv_layers, sparse_bn_layers, asyn_conv_layers, asyn_bn_layers else: batch_asyn_conv_layers = [] batch_asyn_bn_layers = [] for i_layer in range(1, nLayers + 1): batch_asyn_conv_layers.append( asyn_conv_layer(dimension=dimension, nIn=nChannels[i_layer - 1], nOut=nChannels[i_layer], filter_size=kernel_size, first_layer=(i_layer == 1), use_bias=use_bias), ) batch_asyn_conv_layers[i_layer - 1].weight.data = torch.squeeze( torch.tensor(kernels[i_layer - 1], dtype=torch.float32), dim=1) if use_bias: batch_asyn_conv_layers[i_layer - 1].bias.data = torch.tensor( bias[i_layer - 1], dtype=torch.float32) if use_batch_norm: batch_asyn_bn_layers.append( torch.nn.BatchNorm1d(nChannels[i_layer - 1], eps=1e-4, momentum=0.9)) batch_asyn_bn_layers[i_layer - 1].weight.data = torch.tensor( batch_norm_param[i_layer - 1][0], dtype=torch.float64) batch_asyn_bn_layers[i_layer - 1].bias.data = torch.tensor( batch_norm_param[i_layer - 1][1], dtype=torch.float64) batch_asyn_bn_layers[i_layer - 1].running_mean.data = torch.tensor( batch_norm_param[i_layer - 1][2], dtype=torch.float64) batch_asyn_bn_layers[i_layer - 1].running_var.data = torch.tensor( batch_norm_param[i_layer - 1][3], dtype=torch.float64) batch_asyn_bn_layers[i_layer - 1].eval() return batch_asyn_conv_layers, batch_asyn_bn_layers, asyn_conv_layers, asyn_bn_layers