Esempio n. 1
0
 def test_get_block_B(self, block_type='B'):
     """
     Test correct behavior for B blocks: H & W remains unchanged.
     """
     self.tg.cfg_var['H'] += [
         3
     ]  # other than D & U, B block can also handle uneven W/H that are not multiples of 2
     self.tg.cfg_var['W'] += [27]
     cnt = 0
     for i in self.tg.generate_tests:
         sampled_layers = [(random.choice('OHCV'), random.randint(1, 10))
                           for i in range(random.randint(1, 4))]
         with self.subTest('Testing:',
                           block_type=block_type,
                           i=i,
                           sampled_layers=sampled_layers):
             in_tensor = EncDecTestGenerator.get_ingoing_tensor(i)
             net, arch = get_block(block_type, i['block_number'],
                                   i['in_channels'], i['out_channels'],
                                   sampled_layers, i)
             out_tensor = net(in_tensor)
             self.assertEqual(in_tensor.size()[2],
                              out_tensor.size()[2])  # same H
             self.assertEqual(in_tensor.size()[3],
                              out_tensor.size()[3])  # same W
             cnt += 1
     print(f'Tested {cnt} combinations.')  # 216
Esempio n. 2
0
 def test_get_block_D_empty_layers(self, block_type='D'):
     """
     Test correct behaviour for D block > 1 when no layers are specified: resolution gets halved, channels doubled. Should work or every even input resolution. 
     """
     self.tg.cfg_var['in_channels'] = [5, 64]
     self.tg.cfg_var['block_number'] = [1, 7]
     self.tg.cfg_var['H'] += [
         28, 300
     ]  # other than D & U, B block can also handle uneven W/H that are not multiples of 2
     self.tg.cfg_var['W'] += [30]
     cnt = 0
     for i in self.tg.generate_tests:
         sampled_layers = [(random.choice('OHCV'), random.randint(1, 10))
                           for i in range(random.randint(1, 4))]
         with self.subTest('Testing:',
                           block_type=block_type,
                           i=i,
                           sampled_layers=sampled_layers):
             in_tensor = EncDecTestGenerator.get_ingoing_tensor(i)
             i['out_channels'] = i['in_channels'] * 2
             net, arch = get_block(block_type, i['block_number'],
                                   i['in_channels'], i['out_channels'],
                                   sampled_layers, i)
             out_tensor = net(in_tensor)
             self.assertEqual(in_tensor.size()[2], out_tensor.size()[2] * 2)
             self.assertEqual(in_tensor.size()[3], out_tensor.size()[3] * 2)
             self.assertEqual(in_tensor.size()[2], out_tensor.size()[2] * 2)
             self.assertEqual(in_tensor.size()[1] * 2, out_tensor.size()[1])
             cnt += 1
     print(f'Tested {cnt} combinations.')
Esempio n. 3
0
 def test_get_block_D0(self, block_type='D'):
     """
     Test correct behavior for first D block: channels_in, channels_out are overwritten with config['classes'], config['base_channels']
     """
     self.tg.cfg_var['block_number'] = [0]
     cnt = 0
     for i in self.tg.generate_tests:
         for j in [
                 True, False
         ]:  # D & U blocks should work without additionally sampling layers
             if j:
                 sampled_layers = [(random.choice('OHCV'),
                                    random.randint(1, 10))
                                   for i in range(random.randint(1, 4))]
             else:
                 sampled_layer = []
             with self.subTest('Testing:',
                               block_type=block_type,
                               i=i,
                               sampled_layers=sampled_layers):
                 in_tensor = EncDecTestGenerator.get_input_tensor(i)
                 net, arch = get_block(block_type, i['block_number'],
                                       i['in_channels'], i['out_channels'],
                                       sampled_layers, i)
                 out_tensor = net(in_tensor)
                 self.assertEqual(in_tensor.size()[1], i['channels'])
                 self.assertEqual(out_tensor.size()[1], i['base_channels'])
                 self.assertEqual(in_tensor.size()[2],
                                  out_tensor.size()[2] * 2)
                 self.assertEqual(in_tensor.size()[3],
                                  out_tensor.size()[3] * 2)
                 cnt += 1
     print(f'Tested {cnt} combinations.')  # 96
Esempio n. 4
0
 def test_get_block_B_misspecified(self, block_type='B'):
     """
     Layers must follow a predefined layout in the config: List[Tuple(str,int)].
     """
     cnt = 0
     for error in [[], [()], [('V', 2, 3)], [('V', 1), ('H', 1, 2)]]:
         for i in self.tg.generate_tests:
             with self.subTest('Testing:',
                               block_type=block_type,
                               i=i,
                               error=error):
                 with self.assertRaises(
                         AssertionError
                 ):  #change to ValueError to see that test is working
                     cnt += 1
                     net, arch = get_block(block_type, i['block_number'],
                                           i['in_channels'],
                                           i['out_channels'], error, i)
     print(f'Tested {cnt} combinations.')  # 384
Esempio n. 5
0
 def test_get_block_U(self, block_type='U'):
     """
     Test correct behaviour for U blocks: resolution gets doubled, channels halved. Should work for all input resolutions, and input channels. 
     """
     self.tg.cfg_var['in_channels'] = [1, 7]
     self.tg.cfg_var['block_number'] = [0, 2]
     self.tg.cfg_var['H'] += [
         301
     ]  # other than D & U, B block can also handle uneven W/H that are not multiples of 2
     self.tg.cfg_var['W'] += [7]
     cnt = 0
     for i in self.tg.generate_tests:
         for j in [
                 True, False
         ]:  # D & U blocks should work without additionally sampling layers
             if j:
                 sampled_layers = [(random.choice('OHCV'),
                                    random.randint(1, 10))
                                   for i in range(random.randint(1, 4))]
             else:
                 sampled_layer = []
             with self.subTest('Testing:',
                               block_type=block_type,
                               i=i,
                               sampled_layers=sampled_layers):
                 in_tensor = EncDecTestGenerator.get_ingoing_tensor(i)
                 i['out_channels'] = i['in_channels'] * 2
                 net, arch = get_block(block_type, i['block_number'],
                                       i['in_channels'], i['out_channels'],
                                       sampled_layers, i)
                 out_tensor = net(in_tensor)
                 self.assertEqual(in_tensor.size()[2] * 2,
                                  out_tensor.size()[2])
                 self.assertEqual(in_tensor.size()[3] * 2,
                                  out_tensor.size()[3])
                 self.assertEqual(in_tensor.size()[1] * 2,
                                  out_tensor.size()[1])
                 cnt += 1
     print(f'Tested {cnt} combinations.')  # 432
    def __init__(self, config: dict = None, from_path: str = False):
        super().__init__()
        self.arch_string = []
        self.nn_down = nn.ModuleList()
        self.nn_bottle = nn.ModuleList()
        self.nn_up = nn.ModuleList()
        self.nn_logits = nn.ModuleList()

        if from_path and config is None:  # path should point to root directory containing model-related information
            self.deserialize_config(from_path)
            self.arch_string = self.config['arch_string']
            self.load_weights_best_mIoU(from_path)

        else:  # sampling
            self.config = sample_config(config)

            if self.config['debug'] >= 1:
                for i in ['D_blocks', 'U_blocks', 'B_blocks']:
                    assert len(
                        self.config[i]
                    ) >= 1, "Only networks are allowed with at least one downsamling, bottleneck, and decoder block each."  # TODO: drop requirement for bottleneck block
                assert len(self.config['D_blocks']) == len(
                    self.config['U_blocks']
                ), "You must specify an equal number of encoder and decoder blocks."

            exp_f = 2  # expansion factor > double/halve channels in each encoder/decoder block

            # Downsampling blocks
            for i, block in enumerate(self.config['D_blocks']):
                block = deepcopy(block)
                channels_in = int(
                    self.config['base_channels'] * exp_f
                    **(i - 1))  # will get ignored in first downsampling block
                channels_out = int(
                    self.config['base_channels'] *
                    exp_f**i)  # will get ignored in second downsampling block
                b, a = get_block('D', i, channels_in, channels_out, block,
                                 self.config)
                self.nn_down += [b]
                self.arch_string += [a]
                if self.config['debug'] > 1:
                    print(
                        f'Appended this downsampling block to overall network topology: {a,b}:'
                    )

            # Bottlenecks blocks
            for i, block in enumerate(self.config['B_blocks']):
                block = deepcopy(block)
                channels_in = channels_out  # spatial resolution and number of channels remains unchanged
                b, a = get_block('B', i, channels_in, channels_out, block,
                                 self.config)
                self.nn_bottle += [b]
                self.arch_string += [a]
                if self.config['debug'] > 1:
                    print(
                        f'Appended this bottleneck block to overall network topology: {a,b}:'
                    )

            # Upsampling blocks
            for i, block in enumerate(self.config['U_blocks']):
                block = deepcopy(block)
                channels_in = int(
                    channels_out * 2
                )  # spatial resolution and number of channels remains unchanged
                channels_out = int(channels_in / 4)
                if i == len(self.config['U_blocks']) - 1:
                    channels_out = self.config['base_channels']
                b, a = get_block('U', i, channels_in, channels_out, block,
                                 self.config)
                self.nn_up += [b]
                self.arch_string += [a]
                if self.config['debug'] > 1:
                    print(
                        f'Appended this upsampling to overall network topology: {a,b}:'
                    )

            # here we could place an upsampling operation if we take an downsampled input but want to predict full resolution

            # last block is currently fixed: out-conv, returning logits
            self.nn_logits += [
                get_layer('C',
                          self.config['base_channels'],
                          self.config['classes'],
                          self.config,
                          out_conv=True)
            ]

            # turn arch string list into arch string: blocks are seperated by '*'
            self.arch_string = '*'.join(self.arch_string)
            self.config['arch_string'] = self.arch_string

            if self.config['debug'] > 1:
                print(f'Training this architecture: {self.arch_string}')