def test_res_unet_3d(self): from inferno.extensions.model import ResBlockUNet tester = ModelTester((1, 1, 16, 64, 64), (1, 1, 16, 64, 64)) if cuda.is_available(): tester.cuda() # test default unet 3d tester(ResBlockUNet(in_channels=1, out_channels=1, dim=3))
def test_2d_side_out_up(self): from inferno.extensions.model import ResBlockUNet depth = 3 in_channels = 3 x = torch.rand(1, in_channels, 64, 32) model = ResBlockUNet(in_channels=in_channels, out_channels=8, dim=2, side_out_parts=['up'], unet_kwargs=dict(depth=depth)) out_list = model(x) self.assertEqual(len(out_list), depth) self.assertEqual(list(out_list[0].size()), [1,12, 16, 8]) self.assertEqual(list(out_list[1].size()), [1, 6, 32, 16]) self.assertEqual(list(out_list[2].size()), [1, 8, 64, 32])
def __init__(self, in_channels, out_channels, depth=3): super(MySideLossUNet, self).__init__() self.depth = depth self.unet = ResBlockUNet(in_channels=in_channels, out_channels=in_channels*2, dim=2, unet_kwargs=dict(depth=depth), side_out_parts=['bottom', 'up']) # number of out channels self.n_channels_per_output = self.unet.n_channels_per_output # 1x1 conv to give the side outs of the unet # the right number of channels # and a Upsampling to give the right shape upscale_factor = 2**self.depth conv_and_scale = [] for n_channels in self.n_channels_per_output: # conv blocks conv = Conv2D(in_channels=n_channels, out_channels=out_channels, kernel_size=1) if upscale_factor > 1: upsample = nn.Upsample(scale_factor=upscale_factor) conv_and_scale.append(nn.Sequential(conv, upsample)) else: conv_and_scale.append(conv) upscale_factor //= 2 self.conv_and_scale = nn.ModuleList(conv_and_scale) # combined number of channels after concat # concat side output predictions with main output of unet self.n_channels_combined = (self.depth + 1)* out_channels + in_channels*2 self.final_block = nn.Sequential( ResBlock(dim=2,in_channels=self.n_channels_combined, out_channels=self.n_channels_combined), ResBlock(in_channels=self.n_channels_combined, out_channels=out_channels, dim=2, activated=False), )
plt.show() ############################################################################## # Simple UNet # ---------------------------- # We start with a very simple predefined # res block UNet. By default, this UNet uses ReLUs (in conjunction with batchnorm) as nonlinearities # With :code:`activated=False` we make sure that the last layer # is not activated since we chain the UNet with a sigmoid # activation function. from inferno.extensions.model import ResBlockUNet from inferno.extensions.layers import RemoveSingletonDimension model = torch.nn.Sequential( ResBlockUNet(dim=2, in_channels=image_channels, out_channels=pred_channels, activated=False), RemoveSingletonDimension(dim=1), torch.nn.Sigmoid()) ############################################################################## # while the model above will work in principal, it has some drawbacks. # Within the UNet, the number of features is increased by a multiplicative # factor while going down, the so-called gain. The default value for the gain is 2. # Since we start with only a single channel we could either increase the gain, # or use a some convolutions to increase the number of channels # before the the UNet. from inferno.extensions.layers import ConvReLU2D model_a = torch.nn.Sequential( ConvReLU2D(in_channels=image_channels, out_channels=5, kernel_size=3), ResBlockUNet(dim=2, in_channels=5,
def test_res_unet_2d(self): from inferno.extensions.model import ResBlockUNet tester = ModelTester((1, 1, 256, 256), (1, 1, 256, 256)) if cuda.is_available(): tester.cuda() tester(ResBlockUNet(in_channels=1, out_channels=1, dim=2))