def __init__(self, in_channels, pretrained_path=None): super().__init__() self.pretrained_path = pretrained_path self.down_1 = conv_block_3d(in_channels, in_channels*2) self.pool_1 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True) self.down_2 = conv_block_3d(in_channels*2, in_channels*4) self.pool_2 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True) self.down_3 = conv_block_3d(in_channels*4, in_channels*8) self.pool_3 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True) self.bridge = conv_block_3d(in_channels*8, in_channels*8) # self.trans_1 = conv_trans_block_3d(in_channels*16, in_channels*16) self.unpool_1 = nn.MaxUnpool3d(kernel_size=2, stride=2) self.gn_1 = nn.GroupNorm(in_channels*2, in_channels*8) self.up_1 = conv_block_3d(in_channels*16, in_channels*4) # self.trans_2 = conv_trans_block_3d(in_channels*8, in_channels*8) self.unpool_2 = nn.MaxUnpool3d(kernel_size=2, stride=2) self.gn_2 = nn.GroupNorm(in_channels*1, in_channels*4) self.up_2 = conv_block_3d(in_channels*8, in_channels*2) # self.trans_3 = conv_trans_block_3d(in_channels*4, in_channels*4) self.unpool_3 = nn.MaxUnpool3d(kernel_size=2, stride=2) self.gn_3 = nn.GroupNorm(in_channels, in_channels*2) self.up_3 = conv_block_3d(in_channels*4, in_channels*4) self.out = nn.Conv3d(in_channels*4, in_channels*8, kernel_size=3, stride=1, padding=1)
def __init__(self): super().__init__() # define encoder self.encoder = nn.Sequential( nn.Conv3d(in_channels=72, out_channels=72, kernel_size=(3, 3, 3), stride=stride, padding=padding), nn.Tanh(), nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(3, 2, 2), padding=padding), nn.Tanh(), nn.Conv3d(in_channels=32, out_channels=32, kernel_size=(3, 3, 3), stride=stride, padding=padding), nn.Tanh(), nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(3, 3, 2), padding=padding), nn.Tanh(), nn.Conv3d(in_channels=96, out_channels=96, kernel_size=(3, 3, 3), stride=stride, padding=padding), nn.Tanh(), nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(1, 1, 2), padding=padding), nn.Tanh()) # define decoder self.decoder = nn.Sequential( nn.MaxUnpool3d(kernel_size=(2, 2, 2), stride=(1, 1, 2), padding=padding), nn.Tanh(), nn.ConvTranspose3d(in_channels=96, out_channels=96, kernel_size=(3, 3, 3), stride=stride, padding=padding), nn.Tanh(), nn.MaxUnpool3d(kernel_size=(2, 2, 2), stride=(3, 3, 2), padding=padding), nn.Tanh(), nn.ConvTranspose3d(in_channels=32, out_channels=32, kernel_size=(3, 3, 3), stride=stride, padding=padding), nn.Tanh(), nn.MaxUnpool3d(kernel_size=(2, 2, 2), stride=(3, 2, 2), padding=padding), nn.Tanh(), nn.ConvTranspose3d(in_channels=16, out_channels=16, kernel_size=(3, 3, 3), stride=stride, padding=padding), nn.Sigmoid())
def __init__(self): super(Decoder32, self).__init__() self.deconv3 = nn.Conv3d(128, 64, kernel_size=3, stride=1, padding=1) self.deconv2_1 = nn.Conv3d(128, 128, kernel_size=3, stride=1, padding=1) self.deconv2_0 = nn.Conv3d(128, 32, kernel_size=3, stride=1, padding=1) self.deconv1_1 = nn.Conv3d(64, 64, kernel_size=3, stride=1, padding=1) self.deconv1_0 = nn.Conv3d(64, 16, kernel_size=3, stride=1, padding=1) self.deconv0_1 = nn.Conv3d(32, 32, kernel_size=3, stride=1, padding=1) self.deconv_cat = nn.Conv3d(64, 32, kernel_size=3, stride=1, padding=1) self.deconv0_0 = nn.Conv3d(32, 2, kernel_size=3, stride=1, padding=1) self.deconv3_bn = nn.BatchNorm3d(64) self.deconv2_1_bn = nn.BatchNorm3d(128) self.deconv2_0_bn = nn.BatchNorm3d(32) self.deconv1_1_bn = nn.BatchNorm3d(64) self.deconv1_0_bn = nn.BatchNorm3d(16) self.deconv0_1_bn = nn.BatchNorm3d(32) self.deconv_cat_bn = nn.BatchNorm3d(32) self.maxunpool = nn.MaxUnpool3d(2) self.log_softmax = nn.LogSoftmax()
def construct_inv_layers(self, model): """ Implements the decoder part from the CNN. The decoder part is the symmetrical list of the encoder in which some layers are replaced by their transpose counterpart. ConvTranspose and ReLU layers are inverted in the end. :param model: (Module) a CNN. The convolutional part must be comprised in a 'features' class variable. :return: (Module) decoder part of the Autoencoder """ inv_layers = [] for i, layer in enumerate(self.encoder): if isinstance(layer, nn.Conv3d): inv_layers.append(nn.ConvTranspose3d(layer.out_channels, layer.in_channels, layer.kernel_size, stride=layer.stride, padding=layer.padding)) self.level += 1 elif isinstance(layer, PadMaxPool3d): inv_layers.append(CropMaxUnpool3d(layer.kernel_size, stride=layer.stride)) elif isinstance(layer, nn.MaxPool3d): inv_layers.append(nn.MaxUnpool3d(layer.kernel_size, stride=layer.stride)) elif isinstance(layer, nn.Linear): inv_layers.append(nn.Linear(layer.out_features, layer.in_features)) elif isinstance(layer, Flatten): inv_layers.append(Reshape(model.flattened_shape)) elif isinstance(layer, nn.LeakyReLU): inv_layers.append(nn.LeakyReLU(negative_slope=1 / layer.negative_slope)) else: inv_layers.append(deepcopy(layer)) inv_layers = self.replace_relu(inv_layers) inv_layers.reverse() return nn.Sequential(*inv_layers)
def __init__(self, batch_norm=True): super(ReductionATranspose, self).__init__() self.split = Split([96, 96]) self.convT3_3 = ConvTranspose3d(96, 64, 3, stride=2, batch_norm=batch_norm) self.convT3_2 = ConvTranspose3d(64, 64, 3, padding=1, batch_norm=batch_norm) self.convT3_1 = ConvTranspose3d(64, 96, 1, batch_norm=batch_norm) self.convT2 = ConvTranspose3d(96, 96, 3, stride=2, batch_norm=batch_norm) self.mup1 = nn.MaxUnpool3d(3, stride=2, padding=0) self.add = Merge(mode='add')
def __init__(self, skip_connection=True): super(SurfaceDecoder, self).__init__() # decoder self.deconv4 = nn.Conv3d(128, 64, 3, padding=1) self.deconv3_1 = nn.ConvTranspose3d(128, 128, 3, padding=1) self.deconv3_2 = nn.ConvTranspose3d(128, 32, 3, padding=1) self.deconv2_off_1 = nn.ConvTranspose3d(64, 64, 3, padding=1) self.deconv2_off_2 = nn.ConvTranspose3d(64, 16, 3, padding=1) self.deconv2_occ_1 = nn.ConvTranspose3d(64, 64, 3, padding=1) self.deconv2_occ_2 = nn.ConvTranspose3d(64, 16, 3, padding=1) self.deconv1_off_1 = nn.ConvTranspose3d(32, 32, 3, padding=1) self.deconv1_off_2 = nn.ConvTranspose3d(32, 3, 3, padding=3) self.deconv1_occ_1 = nn.ConvTranspose3d(32, 32, 3, padding=1) self.deconv1_occ_2 = nn.ConvTranspose3d(32, 1, 3, padding=3) # batchnorm self.deconv4_bn = nn.BatchNorm3d(64) self.deconv3_1_bn = nn.BatchNorm3d(128) self.deconv3_2_bn = nn.BatchNorm3d(32) self.deconv2_off_1_bn = nn.BatchNorm3d(64) self.deconv2_off_2_bn = nn.BatchNorm3d(16) self.deconv2_occ_1_bn = nn.BatchNorm3d(64) self.deconv2_occ_2_bn = nn.BatchNorm3d(16) self.deconv1_off_1_bn = nn.BatchNorm3d(32) self.deconv1_occ_1_bn = nn.BatchNorm3d(32) self.sigmoid = nn.Sigmoid() self.maxunpool = nn.MaxUnpool3d(2) self.skip_connection = skip_connection
def __init__(self): super(PiaNet, self).__init__() self.nff = [1, 8, 16, 32, 64, 128] #NumFeature_Forw[0-5] self.num_blocks_forw = [2, 2, 3, 3] #[2-5] # forward1 self.forward1 = nn.Sequential( nn.Conv3d(self.nff[0], self.nff[1], kernel_size=3, padding=1), nn.InstanceNorm3d(self.nff[1]), nn.ReLU(inplace=True), nn.Conv3d(self.nff[1], self.nff[1], kernel_size=3, padding=1), nn.InstanceNorm3d(self.nff[1]), nn.ReLU(inplace=True)) # forward2-5 for i in range(len(self.num_blocks_forw)): #4 blocks = [] for j in range(self.num_blocks_forw[i]): #{2,2,3,3} if j == 0: # conv ###plus source connection blocks.append(PostRes(self.nff[i + 1] + 1, self.nff[i + 2])) else: blocks.append(PostRes(self.nff[i + 2], self.nff[i + 2])) setattr(self, 'forward' + str(i + 2), nn.Sequential(*blocks)) self.avgpool = nn.AvgPool3d(kernel_size=2, stride=2) self.maxpool = nn.MaxPool3d(kernel_size=2, stride=2) self.unmaxpool = nn.MaxUnpool3d(kernel_size=2, stride=2) self.decoder1 = Decoder1() self.decoder2 = Decoder2() self.drop = nn.Dropout3d(p=0.5, inplace=False)
def __init__(self): super(_Decoder, self).__init__() self.layer3 = nn.Sequential( nn.MaxUnpool3d(2, 2), Conv3d(64, 32, 3, 1, 1, bias=False, batch_norm=True)) self.layer2 = nn.Sequential( nn.MaxUnpool3d(2, 2), Conv3d(32, 16, 3, 1, 1, bias=False, batch_norm=True)) self.layer1 = nn.Sequential( Conv3d(16, 16, 3, 1, 1, bias=False, batch_norm=True)) self.stem = nn.Sequential( nn.MaxUnpool3d(3, 2, 1), ConvTranspose3d(16, 1, 5, 2, 2, bias=False, batch_norm=True, output_padding=1))
def __init__(self, in_channels, out_channels): super(YLNet3D, self).__init__() #Conv3d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True) #nn.Sequential is a container for Module.Module will work in order during forward self.encoder_1 = nn.Sequential( nn.Conv3d(in_channels, 25, 3, padding=2, dilation=2), nn.BatchNorm3d(25), nn.ReLU()) #first encoder self.encoder_2 = nn.Sequential( nn.Conv3d(25, 25, 3, padding=2, dilation=2), nn.BatchNorm3d(25), nn.ReLU()) #second encoder self.encoder_3 = nn.Sequential( nn.Conv3d(25, 25, 3, padding=2, dilation=2), nn.BatchNorm3d(25), nn.ReLU()) #third encoder self.maxpool_1 = nn.MaxPool3d(2, stride=2, return_indices=True) #create masks self.maxpool_2 = nn.MaxPool3d(2, stride=2, return_indices=True) self.maxpool_3 = nn.MaxPool3d(2, stride=2, return_indices=True) self.unpool_1 = nn.MaxUnpool3d(2, stride=2) self.unpool_2 = nn.MaxUnpool3d(2, stride=2) self.unpool_3 = nn.MaxUnpool3d(2, stride=2) self.decoder_1 = nn.Sequential( nn.Conv3d( 25, 25, 3, padding=1), #the number of kernels can be easily changed here nn.BatchNorm3d(25), nn.ReLU()) # first decoder self.decoder_2 = nn.Sequential(nn.Conv3d(50, 25, 3, padding=1), nn.BatchNorm3d(25), nn.ReLU()) # second decoder self.decoder_3 = nn.Sequential(nn.Conv3d(50, 25, 3, padding=1), nn.BatchNorm3d(25), nn.ReLU()) # third decoder self.conv_4 = nn.Sequential( nn.Conv3d( 50, out_channels, 1, padding=0 ) #the number of output columns depend on the number of classes ) # last conv layer
def add_unpool(self, pool_type, kernel_size, padding, stride): if (pool_type == "max"): node = nn.MaxUnpool3d(kernel_size, padding=padding, stride=stride) elif (pool_type == "avg"): node = nn.MaxPool3d(kernel_size, padding=padding, stride=stride) else: raise TypeError("Invalid value provided for `pool_type`.\ Allowed values are `max`, `avg`.") return node
def __init__(self): super(Autoencoder, self).__init__() # first layer self.ec1 = nn.Conv3d( 1, 16, (5, 3, 3), stride=1, padding=(2, 1, 1), ) self.em1 = nn.MaxPool3d((1, 2, 2), return_indices=True) #self.ed1 = nn.Dropout3d(p=0.25) # second layer self.ec2 = nn.Conv3d(16, 8, (5, 3, 3), stride=1, padding=(2, 1, 1)) self.em2 = nn.MaxPool3d((2, 2, 2), return_indices=True) #self.ed2 = nn.Dropout3d(p=0.25) # third layer self.ec3 = nn.Conv3d(8, 8, (5, 3, 3), stride=1, padding=(2, 1, 1)) self.em3 = nn.MaxPool3d((2, 2, 2), return_indices=True) # encoding done, time to decode self.dc1 = nn.ConvTranspose3d(8, 8, (5, 3, 3), stride=1, padding=(2, 1, 1)) self.dm1 = nn.MaxUnpool3d((2, 2, 2)) # inverse of 2nd Conv self.dc2 = nn.ConvTranspose3d(8, 8, (5, 3, 3), stride=1, padding=(2, 1, 1)) self.dm2 = nn.MaxUnpool3d((2, 2, 2)) # inverse of 1st Conv self.dc3 = nn.ConvTranspose3d(8, 16, (5, 3, 3), stride=1, padding=(2, 1, 1)) self.dm3 = nn.MaxUnpool3d((1, 2, 2)) # final inverse self.dc4 = nn.ConvTranspose3d(16, 1, (5, 3, 3), stride=1, padding=(2, 1, 1))
def __init__(self, in_f, out_f): super(deconv_block, self).__init__() self.unpool1 = nn.MaxUnpool3d(kernel_size=(1, 4, 4)) self.deconv1 = nn.ConvTranspose3d(in_channels=in_f, out_channels=out_f, kernel_size=3, padding=1, dilation=(1, 1, 1)) self.relu1 = nn.ReLU()
def __init__(self): super(AutoEncoder, self).__init__() # Encoder self.conv1 = nn.Conv3d(4, 16, 3) self.conv2 = nn.Conv3d(16, 32, 3) self.conv3 = nn.Conv3d(32, 96, 2) self.pool1 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True) self.pool2 = nn.MaxPool3d(kernel_size=3, stride=3, return_indices=True) self.pool3 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True) self.enc_linear = nn.Linear(381216, 512) # Decoder self.deconv1 = nn.ConvTranspose3d(96, 32, 2) self.deconv2 = nn.ConvTranspose3d(32, 16, 3) self.deconv3 = nn.ConvTranspose3d(16, 4, 3) self.unpool1 = nn.MaxUnpool3d(kernel_size=2, stride=2) self.unpool2 = nn.MaxUnpool3d(kernel_size=3, stride=3) self.unpool3 = nn.MaxUnpool3d(kernel_size=2, stride=2) self.dec_linear = nn.Linear(512, 381216)
def __init__(self, in_f, out_f): super(ConvTranspose3d_Block_1step, self).__init__() self.add_module('unpool1', nn.MaxUnpool3d(kernel_size=(1, 4, 4))) self.add_module( 'deconv1', nn.ConvTranspose3d(in_channels=in_f, out_channels=out_f, kernel_size=3, padding=1, dilation=(1, 1, 1))) self.add_module('relu1', nn.ReLU())
def __init__(self, image_channels=IMG, h_dim=HDIM, z_dim=ZDIMS, n_classes=CLASSES): super(VAE, self).__init__() channels = (16, 32, 96) print("VAE") #encoder layers self.conv1 = nn.Conv3d(image_channels, channels[0], kernel_size=2) self.conv2 = nn.Conv3d(channels[0], channels[1], kernel_size=2) self.conv3 = nn.Conv3d(channels[1], channels[2], kernel_size=2) self.conv4 = nn.Conv3d(channels[2], channels[2], kernel_size=2) self.maxpool = nn.MaxPool3d( kernel_size=2, return_indices=True) #pooling layers return indices self.flatten = Flatten() #flattens dims into tensor self.mu = nn.Linear(h_dim, z_dim) #mu layer self.logvar = nn.Linear(h_dim, z_dim) #logvariance layer #decoder layers self.linear = nn.Linear(z_dim, h_dim) #pulls from bottleneck to hidden self.unflatten = UnFlatten() #unflattens tensor to dims self.maxunpool = nn.MaxUnpool3d( kernel_size=2 ) #unpooling layers require indices from pooling layers self.conv_tran4 = nn.ConvTranspose3d(h_dim, channels[2], kernel_size=(2, 3, 2)) self.conv_tran3 = nn.ConvTranspose3d(channels[2], channels[2], kernel_size=(2, 2, 2)) self.conv_tran2 = nn.ConvTranspose3d(channels[2], channels[1], kernel_size=(3, 2, 3)) self.conv_tran1 = nn.ConvTranspose3d(channels[1], channels[0], kernel_size=(2, 2, 3)) self.conv_tran0 = nn.ConvTranspose3d(channels[0], image_channels, kernel_size=(3, 3, 2)) self.sigmoid = nn.Sigmoid()
def __init__(self, model_config): super().__init__() # Encoder specification self.enc_cnn_1 = nn.Conv3d(53, 32, kernel_size=3) self.enc_cnn_2 = nn.Conv3d(32, 16, kernel_size=3) self.enc_cnn_3 = nn.Conv3d(16, 8, kernel_size=3) self.enc_pool = nn.MaxPool3d(2, stride=2, return_indices=True) self.enc_linear_1 = nn.Linear(480, 256) self.enc_linear_2 = nn.Linear(256, 256) self.dec_cnn_1 = nn.Conv3d(8, 16, kernel_size=3, padding=[2, 2, 2]) self.dec_cnn_2 = nn.Conv3d(16, 32, kernel_size=3, padding=[2, 2, 2]) self.dec_cnn_3 = nn.Conv3d(32, 53, kernel_size=3, padding=[2, 2, 2]) self.dec_unpool1 = nn.MaxUnpool3d(kernel_size=[2, 2, 3], stride=2) self.dec_unpool2 = nn.MaxUnpool3d(kernel_size=[3, 3, 3], stride=2) self.dec_linear_1 = nn.Linear(256, 256) self.dec_linear_2 = nn.Linear(256, 480) self.debug_model = True
def __init__(self, h_dim=40, z_dim=4): super(MyUNet, self).__init__() self.fc1 = nn.Linear(8**3 * 12, h_dim) # FC internal enc self.fc2 = nn.Linear(h_dim, z_dim) # FC MU self.fc3 = nn.Linear(h_dim, z_dim) # FC STD self.fc4 = nn.Linear(z_dim, h_dim) # FC internal dec self.fc5 = nn.Linear(h_dim, 8**3 * 12) # FC internal dec self.relu = nn.ReLU() self.pool = nn.MaxPool3d(2, return_indices=True) self.unpool = nn.MaxUnpool3d(2) # Luego se le pasan los indices self.conv_block1 = conv_block( 1, 4, kernel_size=2, # padding=1, stride=2) self.conv_block2 = conv_block( 4, 8, kernel_size=2, # padding=1, stride=2) self.conv_block3 = conv_block( 8, 12, kernel_size=2, # padding=1, stride=2) self.unconv_block1 = unconv_block( 12, 8, kernel_size=2, # padding=1, stride=2) self.unconv_block2 = unconv_block( 8, 4, kernel_size=2, # padding=1, stride=2) self.unconv_block3 = unconv_block( 4, 1, kernel_size=2, # padding=1, stride=2)
def conv_up_from_spec(conv_layers, image_size, out_channels): layers = [] nextchans = image_size[1] * np.prod([cg for _, cg, _ in conv_layers ]) # initial channels for i, (conv_kernel_size, channel_growth, pool_stride) in reversed(list(enumerate(conv_layers))): pool_kernel_size = pool_stride chans = nextchans if i == 0: nextchans = out_channels else: nextchans //= channel_growth if pool_stride > 1: layers.append( nn.MaxUnpool3d(kernel_size=pool_kernel_size, stride=pool_stride)) layers.append(nn.ReLU()) layers.append( nn.ConvTranspose3d(chans, nextchans, kernel_size=conv_kernel_size)) return layers
def __init__(self, T=256, W=32, H=32, D=32, skip_connection=True): super(SurfaceDecoder, self).__init__() self.W = W self.H = H self.D = D self.T = T self.actvn = nn.ReLU() self.Occ2Top = OccupancyToTopology() # decoder self.deconv4 = nn.Conv3d(128, 64, 3, padding=1) self.deconv3_1 = nn.ConvTranspose3d(128, 128, 3, padding=1) self.deconv3_2 = nn.ConvTranspose3d(128, 32, 3, padding=1) self.deconv2_off_1 = nn.ConvTranspose3d(64, 64, 3, padding=1) self.deconv2_off_2 = nn.ConvTranspose3d(64, 16, 3, padding=1) self.deconv2_occ_1 = nn.ConvTranspose3d(64, 64, 3, padding=1) self.deconv2_occ_2 = nn.ConvTranspose3d(64, 16, 3, padding=1) self.deconv1_off_1 = nn.ConvTranspose3d(32, 32, 3, padding=1) self.deconv1_off_2 = nn.ConvTranspose3d(32, 3, 3, padding=3) self.deconv1_occ_1 = nn.ConvTranspose3d(32, 32, 3, padding=1) self.deconv1_occ_2 = nn.ConvTranspose3d(32, 1, 3, padding=3) # batchnorm self.deconv4_bn = nn.BatchNorm3d(64) self.deconv3_1_bn = nn.BatchNorm3d(128) self.deconv3_2_bn = nn.BatchNorm3d(32) self.deconv2_off_1_bn = nn.BatchNorm3d(64) self.deconv2_off_2_bn = nn.BatchNorm3d(16) self.deconv2_occ_1_bn = nn.BatchNorm3d(64) self.deconv2_occ_2_bn = nn.BatchNorm3d(16) self.deconv1_off_1_bn = nn.BatchNorm3d(32) self.deconv1_occ_1_bn = nn.BatchNorm3d(32) self.sigmoid = nn.Sigmoid() self.maxunpool = nn.MaxUnpool3d(2) self.skip_connection = skip_connection
def __init__(self): super(TASED_v2, self).__init__() self.base1 = nn.Sequential( SepConv3d(3, 64, kernel_size=7, stride=2, padding=3), nn.MaxPool3d(kernel_size=(1,3,3), stride=(1,2,2), padding=(0,1,1)), BasicConv3d(64, 64, kernel_size=1, stride=1), SepConv3d(64, 192, kernel_size=3, stride=1, padding=1), ) self.maxp2 = nn.MaxPool3d(kernel_size=(1,3,3), stride=(1,2,2), padding=(0,1,1)) self.maxm2 = nn.MaxPool3d(kernel_size=(4,1,1), stride=(4,1,1), padding=(0,0,0)) self.maxt2 = nn.MaxPool3d(kernel_size=(1,3,3), stride=(1,2,2), padding=(0,1,1), return_indices=True) self.base2 = nn.Sequential( Mixed_3b(), Mixed_3c(), ) self.maxp3 = nn.MaxPool3d(kernel_size=(3,3,3), stride=(2,2,2), padding=(1,1,1)) self.maxm3 = nn.MaxPool3d(kernel_size=(4,1,1), stride=(4,1,1), padding=(0,0,0)) self.maxt3 = nn.MaxPool3d(kernel_size=(1,3,3), stride=(1,2,2), padding=(0,1,1), return_indices=True) self.base3 = nn.Sequential( Mixed_4b(), Mixed_4c(), Mixed_4d(), Mixed_4e(), Mixed_4f(), ) self.maxt4 = nn.MaxPool3d(kernel_size=(2,1,1), stride=(2,1,1), padding=(0,0,0)) self.maxp4 = nn.MaxPool3d(kernel_size=(1,2,2), stride=(1,2,2), padding=(0,0,0), return_indices=True) self.base4 = nn.Sequential( Mixed_5b(), Mixed_5c(), ) self.convtsp1 = nn.Sequential( nn.Conv3d(1024, 1024, kernel_size=1, stride=1, bias=False), nn.BatchNorm3d(1024, eps=1e-3, momentum=0.001, affine=True), nn.ReLU(), nn.ConvTranspose3d(1024, 832, kernel_size=(1,3,3), stride=1, padding=(0,1,1), bias=False), nn.BatchNorm3d(832, eps=1e-3, momentum=0.001, affine=True), nn.ReLU(), ) self.unpool1 = nn.MaxUnpool3d(kernel_size=(1,2,2), stride=(1,2,2), padding=(0,0,0)) self.convtsp2 = nn.Sequential( nn.ConvTranspose3d(832, 480, kernel_size=(1,3,3), stride=1, padding=(0,1,1), bias=False), nn.BatchNorm3d(480, eps=1e-3, momentum=0.001, affine=True), nn.ReLU(), ) self.unpool2 = nn.MaxUnpool3d(kernel_size=(1,3,3), stride=(1,2,2), padding=(0,1,1)) self.convtsp3 = nn.Sequential( nn.ConvTranspose3d(480, 192, kernel_size=(1,3,3), stride=1, padding=(0,1,1), bias=False), nn.BatchNorm3d(192, eps=1e-3, momentum=0.001, affine=True), nn.ReLU(), ) self.unpool3 = nn.MaxUnpool3d(kernel_size=(1,3,3), stride=(1,2,2), padding=(0,1,1)) self.convtsp4 = nn.Sequential( nn.ConvTranspose3d(192, 64, kernel_size=(1,4,4), stride=(1,2,2), padding=(0,1,1), bias=False), nn.BatchNorm3d(64, eps=1e-3, momentum=0.001, affine=True), nn.ReLU(), nn.Conv3d(64, 64, kernel_size=(2,1,1), stride=(2,1,1), bias=False), nn.BatchNorm3d(64, eps=1e-3, momentum=0.001, affine=True), nn.ReLU(), nn.ConvTranspose3d(64, 4, kernel_size=1, stride=1, bias=False), nn.BatchNorm3d(4, eps=1e-3, momentum=0.001, affine=True), nn.ReLU(), nn.Conv3d(4, 4, kernel_size=(2,1,1), stride=(2,1,1), bias=False), nn.BatchNorm3d(4, eps=1e-3, momentum=0.001, affine=True), nn.ReLU(), nn.ConvTranspose3d(4, 4, kernel_size=(1,4,4), stride=(1,2,2), padding=(0,1,1), bias=False), nn.Conv3d(4, 1, kernel_size=1, stride=1, bias=True), nn.Sigmoid(), )
def __init__(self, in_ch, out_ch): super(Up, self).__init__() self.unpool = nn.MaxUnpool3d(3, stride=2) self.block = DoubleBlock(in_ch, out_ch)
def __init__(self, kernel_size, stride): super(CropMaxUnpool3d, self).__init__() self.unpool = nn.MaxUnpool3d(kernel_size, stride)
def __init__(self, batch_norm=True): super(StemBlockTranspose, self).__init__() self.split6 = Split([48]) self.convT6_l = ConvTranspose3d(48, 48, 3, stride=2, output_padding=1, batch_norm=batch_norm) self.maxunpool6_r = nn.MaxUnpool3d(3, stride=2, padding=0) self.add6 = Merge(mode='add') self.split5 = Split([24]) self.convT5_l_2 = ConvTranspose3d(24, 16, 3, padding=1, batch_norm=batch_norm) self.convT5_l_1 = ConvTranspose3d(16, 40, 1, batch_norm=batch_norm) self.convT5_r_5 = ConvTranspose3d(24, 16, 3, padding=1, batch_norm=batch_norm) self.convT5_r_4 = ConvTranspose3d(16, 16, (1, 1, 5), padding=(0, 0, 2), batch_norm=batch_norm) self.convT5_r_3 = ConvTranspose3d(16, 16, (1, 5, 1), padding=(0, 2, 0), batch_norm=batch_norm) self.convT5_r_2 = ConvTranspose3d(16, 16, (5, 1, 1), padding=(2, 0, 0), batch_norm=batch_norm) self.convT5_r_1 = ConvTranspose3d(16, 40, 1, batch_norm=batch_norm) self.add5 = Merge(mode='add') self.split4 = Split([16]) self.maxunpool4_l = nn.MaxUnpool3d(3, stride=2, padding=0) self.convT4_r = ConvTranspose3d(24, 16, 3, stride=2, batch_norm=batch_norm) self.add4 = Merge(mode='add') self.convT3 = ConvTranspose3d(16, 8, 3, padding=1, batch_norm=batch_norm) self.convT2 = ConvTranspose3d(8, 8, 3, padding=1, batch_norm=batch_norm) self.convT1 = ConvTranspose3d(8, 1, 3, stride=2, padding=1, batch_norm=batch_norm)
def __init__(self, nlabel, mindepth): super(YunNet, self).__init__() self.nlabel = nlabel self.mindepth = mindepth #spp self.feature_extraction = feature_extraction() #3DCNN self.conv3d0 = nn.Sequential( convbn_3d(64, 32, 3, 1, 1), nn.ReLU(inplace=True), convbn_3d(32, 32, 3, 1, 1), nn.ReLU(inplace=True), ) self.max1 = nn.MaxPool3d((2, 1, 1), return_indices=True) self.max1_1 = nn.Sequential(nn.Conv3d(32, 32, 1, 1, 0, 1), nn.BatchNorm3d(32), nn.ReLU(inplace=True)) self.max2 = nn.MaxPool3d((2, 1, 1), return_indices=True) self.max2_1 = nn.Sequential(nn.Conv3d(32, 32, 1, 1, 0, 1), nn.BatchNorm3d(32), nn.ReLU(inplace=True)) self.max3 = nn.MaxPool3d((2, 1, 1), return_indices=True) self.max3_1 = nn.Sequential(nn.Conv3d(32, 32, 1, 1, 0, 1), nn.BatchNorm3d(32), nn.ReLU(inplace=True)) self.max4 = nn.MaxPool3d((2, 1, 1), return_indices=True) self.max4_1 = nn.Sequential(nn.Conv3d(32, 32, 1, 1, 0, 1), nn.BatchNorm3d(32), nn.ReLU(inplace=True)) self.unpool1 = nn.MaxUnpool3d((2, 1, 1), stride=(2, 1, 1)) self.unpool1_1 = nn.Sequential(nn.Conv3d(32, 32, 1, 1, 0, 1), nn.BatchNorm3d(32)) self.unpool2 = nn.MaxUnpool3d((2, 1, 1), stride=(2, 1, 1)) self.unpool2_1 = nn.Sequential(nn.Conv3d(32, 32, 1, 1, 0, 1), nn.BatchNorm3d(32)) self.unpool3 = nn.MaxUnpool3d((2, 1, 1), stride=(2, 1, 1)) self.unpool3_1 = nn.Sequential(nn.Conv3d(32, 32, 1, 1, 0, 1), nn.BatchNorm3d(32)) self.unpool4 = nn.MaxUnpool3d((2, 1, 1), stride=(2, 1, 1)) self.unpool4_1 = nn.Sequential( nn.Conv3d(32, 32, 1, 1, 0, 1), nn.BatchNorm3d(32), ) self.todepth = nn.Sequential( convbn_3d(32, 32, 3, 1, 1), nn.ReLU(inplace=True), convbn_3d(32, 1, 3, 1, 1), ) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.Conv3d): n = m.kernel_size[0] * m.kernel_size[1] * m.kernel_size[ 2] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.BatchNorm3d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): m.bias.data.zero_()
def __init__(self): super(Net, self).__init__() self.preBlock = nn.Sequential( nn.Conv3d(1, 24, kernel_size=3, padding=1), nn.GroupNorm(3, 24) if config['group_norm'] else nn.BatchNorm3d(24), nn.ReLU(inplace=True), nn.Conv3d(24, 24, kernel_size=3, padding=1), nn.GroupNorm(3, 24) if config['group_norm'] else nn.BatchNorm3d(24), nn.ReLU(inplace=True)) num_blocks_forw = [2, 2, 3, 3] num_blocks_back = [3, 3] self.featureNum_forw = [24, 32, 64, 64, 64] self.featureNum_back = [128, 64, 64] for i in range(len(num_blocks_forw)): blocks = [] for j in range(num_blocks_forw[i]): if j == 0: blocks.append( PostRes(self.featureNum_forw[i], self.featureNum_forw[i + 1], group_norm=config['group_norm'])) else: blocks.append( PostRes(self.featureNum_forw[i + 1], self.featureNum_forw[i + 1], group_norm=config['group_norm'])) setattr(self, 'forw' + str(i + 1), nn.Sequential(*blocks)) for i in range(len(num_blocks_back)): blocks = [] for j in range(num_blocks_back[i]): if j == 0: if i == 0: addition = 3 else: addition = 0 blocks.append( PostRes(self.featureNum_back[i + 1] + self.featureNum_forw[i + 2] + addition, self.featureNum_back[i], group_norm=config['group_norm'])) else: blocks.append( PostRes(self.featureNum_back[i], self.featureNum_back[i], group_norm=config['group_norm'])) setattr(self, 'back' + str(i + 2), nn.Sequential(*blocks)) self.maxpool1 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True) self.maxpool2 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True) self.maxpool3 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True) self.maxpool4 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True) self.unmaxpool1 = nn.MaxUnpool3d(kernel_size=2, stride=2) self.unmaxpool2 = nn.MaxUnpool3d(kernel_size=2, stride=2) self.path1 = nn.Sequential( nn.ConvTranspose3d(68, 64, kernel_size=2, stride=2), nn.GroupNorm(4, 64) if config['group_norm'] else nn.BatchNorm3d(64), nn.ReLU(inplace=True)) self.path2 = nn.Sequential( nn.ConvTranspose3d(64, 64, kernel_size=2, stride=2), nn.GroupNorm(4, 64) if config['group_norm'] else nn.BatchNorm3d(64), nn.ReLU(inplace=True)) self.drop = nn.Dropout3d(p=0.5, inplace=False) self.output = nn.Sequential( nn.Conv3d(self.featureNum_back[0], 64, kernel_size=1), nn.ReLU(), nn.Conv3d(64, 5 * len(config['anchors']), kernel_size=1)) self.dblock = DACblock(64) self.spp = SPPblock(64)
def __init__(self): super(Net, self).__init__() self.preBlock = nn.Sequential( nn.Conv3d(1, 24, kernel_size=3, padding=1), nn.BatchNorm3d(24), nn.ReLU(), nn.Conv3d(24, 24, kernel_size=3, padding=1), nn.BatchNorm3d(24), nn.ReLU()) # 3 poolings, each pooling downsamples the feature map by a factor 2. # 3 groups of blocks. The first block of each group has one pooling. num_blocks_forw = [2, 2, 3, 3] num_blocks_back = [3, 3] self.featureNum_forw = [24, 32, 64, 64, 64] self.featureNum_back = [128, 64, 64] for i in range(len(num_blocks_forw)): blocks = [] for j in range(num_blocks_forw[i]): if j == 0: blocks.append( PostRes(self.featureNum_forw[i], self.featureNum_forw[i + 1])) else: blocks.append( PostRes(self.featureNum_forw[i + 1], self.featureNum_forw[i + 1])) setattr(self, 'forw' + str(i + 1), nn.Sequential(*blocks)) for i in range(len(num_blocks_back)): blocks = [] for j in range(num_blocks_back[i]): if j == 0: if i == 0: addition = 3 else: addition = 0 blocks.append( PostRes( self.featureNum_back[i + 1] + self.featureNum_forw[i + 2] + addition, self.featureNum_back[i])) else: blocks.append( PostRes(self.featureNum_back[i], self.featureNum_back[i])) setattr(self, 'back' + str(i + 2), nn.Sequential(*blocks)) self.maxpool1 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True) self.maxpool2 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True) self.maxpool3 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True) self.maxpool4 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True) self.unmaxpool1 = nn.MaxUnpool3d(kernel_size=2, stride=2) self.unmaxpool2 = nn.MaxUnpool3d(kernel_size=2, stride=2) self.path1 = nn.Sequential( nn.ConvTranspose3d(64, 64, kernel_size=2, stride=2), nn.BatchNorm3d(64), nn.ReLU(inplace=True)) self.path2 = nn.Sequential( nn.ConvTranspose3d(64, 64, kernel_size=2, stride=2), nn.BatchNorm3d(64), nn.ReLU(inplace=True)) self.drop = nn.Dropout3d(p=0.5, inplace=False) self.output = nn.Sequential( nn.Conv3d(self.featureNum_back[0], 64, kernel_size=1), nn.ReLU(), #nn.Dropout3d(p = 0.3), nn.Conv3d(64, 5 * len(config['anchors']), kernel_size=1)) self.nodule_output = nn.Sequential( nn.Conv3d(self.featureNum_back[0], 64, kernel_size=1), nn.ReLU(), #nn.Dropout3d(p = 0.3), nn.Conv3d(64, len(config['anchors']), kernel_size=1)) self.regress_output = nn.Sequential( nn.Conv3d(self.featureNum_back[0], 64, kernel_size=1), nn.ReLU(), #nn.Dropout3d(p = 0.3), nn.Conv3d(64, 4 * len(config['anchors']), kernel_size=1)) focal_bias = -math.log((1.0 - 0.01) / 0.01) self._modules['nodule_output'][2].bias.data.fill_(focal_bias)
def __init__(self): super(Autoencoder3D, self).__init__() # Architecture adopted from: # https://www.sciencedirect.com/science/article/pii/S1361841517301287 # Create memoryless-blocks for reuse self.relu = nn.ReLU(True) self.tanh = nn.Softsign() ######## # # Encoder layers: # ######## # Layer input image size: 1x48x56x48 ; Kernel size: 1 # N channels in: 1 ; N channels out: 32 ; Stride: 1 # Layer output image size: 32x48x56x48 ; Pad: 0 self.conv1 = nn.Conv3d(1, 32, kernel_size=1, padding=0) # Layer input image size: 32x48x56x48 ; Kernel size: 2 # N channels in: 32 ; N channels out: 32 ; Stride: 1 # Layer output image size: 32x24x28x24 ; Pad: 0 self.pool1 = nn.MaxPool3d(2, return_indices=True) # Layer input image size: 32x24x28x24 ; Kernel size: 5 # N channels in: 32 ; N channels out: 32 ; Stride: 1 # Layer output image size: 32x24x28x24 ; Pad: 2 self.conv2 = nn.Conv3d(32, 32, kernel_size=5, padding=2) # Layer input image size: 32x48x56x48 ; Kernel size: 2 # N channels in: 32 ; N channels out: 32 ; Stride: 1 # Layer output image size: 32x12x14x12 ; Pad: 0 self.pool2 = nn.MaxPool3d(2, return_indices=True) # Layer input image size: 32x12x14x12 ; Kernel size: 5 # N channels in: 32 ; N channels out: 32 ; Stride: 1 # Layer output image size: 32x12x14x12 ; Pad: 2 self.conv3 = nn.Conv3d(32, 32, kernel_size=5, padding=2) # Layer input image size: 32x12x14x12 ; Kernel size: 2 # N channels_in: 32 ; N channels_out: 32 ; Stride: 1 # Layer output image size: 32x6x7x6 self.pool3 = nn.MaxPool3d(2, return_indices=True) # Layer input image size: 32x6x7x6 ; Kernel size: 1 # N channels in: 32 ; N channels out: 1 ; Stride: 1 # Layer output image size: 32x6x7x6 ; Pad: 0 self.conv4 = nn.Conv3d(32, 32, kernel_size=1, padding=0) ######## # # Decoder layers: # ######## # Layer input image size: 32x6x7x6 ; Kernel size: 1 # N channels in: 1 ; N channels out: 1 ; Stride: 1 # Layer output image size: 32x6x7x6 ; Pad: 0 # self.deconv4 = nn.ConvTranspose3d(32, 32, kernel_size=1, padding=0) # TODO: rename these layers since they're nolong deconvs self.deconv4 = nn.Conv3d(32, 32, kernel_size=1, padding=0) # Layer input image size: 32x6x7x6 ; Kernel size: 2 # N channels in: 1 ; N channels out: 1 ; Stride: 2 # Layer output image size: 32x12x14x12 ; Pad: 0 self.unpool3 = nn.Upsample(scale_factor=2, mode='trilinear') # Layer input image size: 32x12x14x12 ; Kernel size: 5 # N channels in: 1 ; N channels out: 32 ; Stride: 1 # Layer output image size: 32x12x14x12 ; Pad: 2 # self.deconv3 = nn.ConvTranspose3d(32, 32, kernel_size=5, padding=2) self.deconv3 = nn.Conv3d(32, 32, kernel_size=5, padding=2) # Layer input image size: 32x12x14x12 ; Kernel size: 2 # N channels in: 32 ; N channels out: 32 ; Stride: 2 # Layer output image size: 32x24x28x24 ; Pad: 0 self.unpool2 = nn.MaxUnpool3d(2, stride=2) # self.unpool2 = nn.Upsample(scale_factor=2, mode='trilinear') # Layer input image size: 32x24x28x24 ; Kernel size: 5 # N channels in: 32 ; N channels out: 32 ; Stride: 1 # Layer output image size: 32x24x28x24 ; Pad: 2 # self.deconv2 = nn.ConvTranspose3d(32, 32, kernel_size=5, padding=2) self.deconv2 = nn.Conv3d(32, 32, kernel_size=5, padding=2) # Layer input image size: 32x24x28x24 ; Kernel size: 2 # N channels in: 32 ; N channels out: 32 ; Stride: 2 # Layer output image size: 32x48x56x48 ; Pad: 0 self.unpool1 = nn.MaxUnpool3d(2, stride=2) # self.unpool1 = nn.Upsample(scale_factor=2, mode='trilinear') # Layer input image size: 32x48x56x48 ; Kernel size: 3 # N channels in: 32 ; N channels out: 1 ; Stride: 1 # Layer output image size: 1x48x56x48 ; Pad: 1 # self.deconv1 = nn.Conv3d(32, 1, kernel_size=1, padding=0) self.deconv1 = nn.ConvTranspose3d(32, 1, kernel_size=3, stride=1, padding=1)
def __init__(self): super(UNet3D_ef_deconv, self).__init__() features = 30 in_channels = 3 fin = [ features * 16, features * 8, features * 4, features * 2, features ] fout = [ features * 8, features * 4, features * 2, features, in_channels ] self.features = nn.Sequential( # encoder 1 nn.ConvTranspose3d(fin[0], fin[0], 3, padding=1, bias=False), # nn.Conv3d(in_channels=fin[0],out_channels=fin[0],kernel_size=3,padding=1,stride=1,bias=False), nn.LeakyReLU(negative_slope=0.01, inplace=True), nn.ConvTranspose3d(fin[0], fout[0], 3, padding=1, bias=False), # nn.Conv3d(in_channels=fin[0],out_channels=fout[1],kernel_size=3,padding=1,stride=1,bias=False), nn.LeakyReLU(negative_slope=0.01, inplace=True), # nn.ConvTranspose3d(fout[0], fout[0], kernel_size=2, stride=2), nn.MaxUnpool3d(2, stride=2), #encoder 2 nn.ConvTranspose3d(fin[1], fin[1], 3, padding=1, bias=False), # nn.Conv3d(in_channels=fin[1],out_channels=fin[1],kernel_size=3,padding=1,stride=1,bias=False), nn.LeakyReLU(negative_slope=0.01, inplace=True), nn.ConvTranspose3d(fin[1], fout[1], 3, padding=1, bias=False), # nn.Conv3d(in_channels=fin[1],out_channels=fout[1],kernel_size=3,padding=1,stride=1,bias=False), nn.LeakyReLU(negative_slope=0.01, inplace=True), # nn.ConvTranspose3d(fout[1], fout[1], kernel_size=2, stride=2), nn.MaxUnpool3d(2, stride=2), #encoder 3 nn.ConvTranspose3d(fin[2], fin[2], 3, padding=1, bias=False), # nn.Conv3d(in_channels=fin[2],out_channels=fin[2],kernel_size=3,padding=1,stride=1,bias=False), nn.LeakyReLU(negative_slope=0.01, inplace=True), nn.ConvTranspose3d(fin[2], fout[2], 3, padding=1, bias=False), # nn.Conv3d(in_channels=fin[2],out_channels=fout[2],kernel_size=3,padding=1,stride=1,bias=False), nn.LeakyReLU(negative_slope=0.01, inplace=True), # nn.ConvTranspose3d(fout[2], fout[2], kernel_size=2, stride=2), nn.MaxUnpool3d(2, stride=2), #encoder 4 nn.ConvTranspose3d(fin[3], fin[3], 3, padding=1, bias=False), # nn.Conv3d(in_channels=fin[3],out_channels=fin[3],kernel_size=3,padding=1,stride=1,bias=False), nn.LeakyReLU(negative_slope=0.01, inplace=True), nn.ConvTranspose3d(fin[3], fout[3], 3, padding=1, bias=False), # nn.Conv3d(in_channels=fin[3],out_channels=fout[3],kernel_size=3,padding=1,stride=1,bias=False), nn.LeakyReLU(negative_slope=0.01, inplace=True), # nn.ConvTranspose3d(fout[3], fout[3], kernel_size=2, stride=2), nn.MaxUnpool3d(2, stride=2), #encoder 5 nn.ConvTranspose3d(fin[4], fin[4], 3, padding=1, bias=False), # nn.Conv3d(in_channels=fin[4],out_channels=fin[4],kernel_size=3,padding=1,stride=1,bias=False), nn.LeakyReLU(negative_slope=0.01, inplace=True), nn.ConvTranspose3d(fin[4], fout[4], 3, padding=1, bias=False), # nn.Conv3d(in_channels=fin[4],out_channels=fout[4],kernel_size=3,padding=1,stride=1,bias=False), nn.LeakyReLU(negative_slope=0.01, inplace=True)) self.conv2deconv_indices = { 0: 22, 3: 20, 7: 17, 10: 15, 14: 12, 17: 10, 21: 7, 24: 5, 28: 2, 31: 0 } self.unpool2pool_indices = {6: 19, 13: 14, 20: 9, 27: 4} self.init_weight()
def __init__(self): super(Net, self).__init__() # The first few layers consumes the most memory, so use simple convolution to save memory. # Call these layers preBlock, i.e., before the residual blocks of later layers. self.preBlock = nn.Sequential( nn.Conv3d(1, 24, kernel_size=3, padding=1), nn.BatchNorm3d(24), nn.ReLU(inplace=True), nn.Conv3d(24, 24, kernel_size=3, padding=1), nn.BatchNorm3d(24), nn.ReLU(inplace=True)) # 3 poolings, each pooling downsamples the feature map by a factor 2. # 3 groups of blocks. The first block of each group has one pooling. num_blocks_forw = [2, 2, 3, 3] num_blocks_back = [3, 3] self.featureNum_forw = [24, 32, 64, 64, 64] self.featureNum_back = [128, 64, 64] for i in range(len(num_blocks_forw)): blocks = [] for j in range(num_blocks_forw[i]): if j == 0: blocks.append( PostRes(self.featureNum_forw[i], self.featureNum_forw[i + 1])) else: blocks.append( PostRes(self.featureNum_forw[i + 1], self.featureNum_forw[i + 1])) setattr(self, 'forw' + str(i + 1), nn.Sequential(*blocks)) for i in range(len(num_blocks_back)): blocks = [] for j in range(num_blocks_back[i]): if j == 0: if i == 0: addition = 3 else: addition = 0 blocks.append( PostRes( self.featureNum_back[i + 1] + self.featureNum_forw[i + 2] + addition, self.featureNum_back[i])) else: blocks.append( PostRes(self.featureNum_back[i], self.featureNum_back[i])) setattr(self, 'back' + str(i + 2), nn.Sequential(*blocks)) self.maxpool1 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True) self.maxpool2 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True) self.maxpool3 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True) self.maxpool4 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True) self.unmaxpool1 = nn.MaxUnpool3d(kernel_size=2, stride=2) self.unmaxpool2 = nn.MaxUnpool3d(kernel_size=2, stride=2) self.path1 = nn.Sequential( nn.ConvTranspose3d(64, 64, kernel_size=2, stride=2), nn.BatchNorm3d(64), nn.ReLU(inplace=True)) self.path2 = nn.Sequential( nn.ConvTranspose3d(64, 64, kernel_size=2, stride=2), nn.BatchNorm3d(64), nn.ReLU(inplace=True)) self.drop = nn.Dropout3d(p=0.2, inplace=False) self.output = nn.Sequential( nn.Conv3d(self.featureNum_back[0], 64, kernel_size=1), nn.ReLU(), #nn.Dropout3d(p = 0.3), nn.Conv3d(64, 5 * len(config['anchors']), kernel_size=1))
def __init__(self): super(UnpoolingNet3d, self).__init__(pool=nn.MaxPool3d(2, return_indices=True), unpool=nn.MaxUnpool3d(2))