Exemple #1
0
 def test_linsoftmax_test(self):
     """
     Test function of linear layer in eval mode (softmax)
     """
     lin = layers.LinSoftmax(20, 10).eval()
     o = lin(torch.randn(1, 20, 12, 24))
     self.assertGreaterEqual(o[0].min(), 0)
Exemple #2
0
 def test_linsoftmax_aug(self):
     """
     Test basic function of linear layer with 1-augmentation.
     """
     lin = layers.LinSoftmax(20, 10, True)
     o = lin(torch.randn(1, 20, 12, 24))
     self.assertEqual(o[0].shape, (1, 10, 12, 24))
Exemple #3
0
 def test_linsoftmax(self):
     """
     Test basic function of linear layer.
     """
     lin = layers.LinSoftmax(20, 10)
     o = lin(torch.randn(1, 20, 12, 24))
     self.assertEqual(o[0].shape, (1, 10, 12, 24))
Exemple #4
0
 def test_linsoftmax_train(self):
     """
     Test function of linear layer in training mode (log_softmax)
     """
     lin = layers.LinSoftmax(20, 10).train()
     o = lin(torch.randn(1, 20, 12, 24))
     self.assertLess(o[0].max(), 0)
Exemple #5
0
 def build_output(
     self, input: Tuple[int, int, int, int], block: str
 ) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str,
                                           Callable]]:
     """
     Builds an output layer.
     """
     pattern = re.compile(
         r'(O)(?P<name>{\w+})?(?P<dim>2|1|0)(?P<type>l|s|c)(?P<aug>a)?(?P<out>\d+)'
     )
     m = pattern.match(block)
     if not m:
         return None, None, None
     if int(m.group('dim')) != 1:
         raise ValueError('non-2d output not supported, yet')
     nl = m.group('type')
     if nl not in ['s', 'c']:
         raise ValueError('only softmax and ctc supported in output')
     if nl == 'c':
         self.criterion = nn.CTCLoss(reduction='none')
     aug = True if m.group('aug') else False
     lin = layers.LinSoftmax(input[1], int(m.group('out')), aug)
     logger.debug('{}\t\tlinear\taugmented {} out {}'.format(
         self.idx + 1, aug, m.group('out')))
     return lin.get_shape(input), self.get_layer_name(
         m.group(1), m.group('name')), lin
Exemple #6
0
 def test_linsoftmax_aug_test(self):
     """
     Test function of linear layer in eval mode (softmax) with 1-augmentation
     """
     lin = layers.LinSoftmax(20, 10, True).eval()
     o = lin(torch.randn(1, 20, 12, 24))
     self.assertGreaterEqual(o.min(), 0)
Exemple #7
0
 def test_linsoftmax_aug_train(self):
     """
     Test function of linear layer in training mode (log_softmax) with 1-augmentation
     """
     lin = layers.LinSoftmax(20, 10, True).train()
     o = lin(torch.randn(1, 20, 12, 24))
     self.assertLess(o.max(), 0)
Exemple #8
0
 def test_linsoftmax_resize_remove(self):
     """
     Tests resizing of a fully connected layer.
     """
     lin = layers.LinSoftmax(20, 10)
     w_cp = lin.lin.weight.clone()
     b_cp = lin.lin.bias.clone()
     lin.resize(5, (1, 5, 6, 7, 9))
     self.assertTrue(w_cp[(0, 2, 3, 4, 8), :].eq(lin.lin.weight).all())
     self.assertTrue(b_cp[(0, 2, 3, 4, 8), ].eq(lin.lin.bias).all())
Exemple #9
0
 def test_linsoftmax_resize_add(self):
     """
     Tests resizing of a fully connected layer.
     """
     lin = layers.LinSoftmax(20, 10)
     w_cp = lin.lin.weight.clone()
     b_cp = lin.lin.bias.clone()
     lin.resize(25)
     self.assertTrue(w_cp.eq(lin.lin.weight[:10, :]).all())
     self.assertTrue(b_cp.eq(lin.lin.bias[:10]).all())
     self.assertTrue(lin.lin.weight.shape[0] == 25)
     self.assertTrue(lin.lin.bias.shape[0] == 25)
Exemple #10
0
 def test_linsoftmax_resize_both(self):
     """
     Tests resizing of a fully connected layer.
     """
     lin = layers.LinSoftmax(20, 10)
     w_cp = lin.lin.weight.clone()
     b_cp = lin.lin.bias.clone()
     lin.resize(25, (1, 5, 6, 7, 9))
     self.assertTrue(w_cp[(0, 2, 3, 4,
                           8), :].eq(lin.lin.weight[:5, :]).all())
     self.assertTrue(b_cp[(0, 2, 3, 4, 8), ].eq(lin.lin.bias[:5]).all())
     self.assertTrue(lin.lin.weight.shape[0] == 25)
     self.assertTrue(lin.lin.bias.shape[0] == 25)
Exemple #11
0
 def build_output(
     self, input: Tuple[int, int, int, int], blocks: List[str], idx: int
 ) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str,
                                           Callable]]:
     """
     Builds an output layer.
     """
     pattern = re.compile(
         r'(O)(?P<name>{\w+})?(?P<dim>2|1|0)(?P<type>l|s|c)(?P<aug>a)?(?P<out>\d+)'
     )
     m = pattern.match(blocks[idx])
     if not m:
         return None, None, None
     dim = int(m.group('dim'))
     nl = m.group('type')
     outdim = int(m.group('out'))
     if dim == 0:
         raise ValueError('categorical output not supported, yet.')
     if nl == 'c' and dim == 2:
         raise ValueError('CTC not supported for heatmap output')
     if nl in ['l', 's'] and int(m.group('out')) >= 1:
         self.criterion = nn.BCELoss()
     elif nl == 'c':
         self.criterion = nn.CTCLoss(reduction='sum', zero_infinity=True)
     else:
         raise ValueError('unsupported output specification')
     # heatmap output
     if dim == 2:
         act = 's' if nl == 'l' else 'm'
         fn = layers.ActConv2D(input[1], outdim, (1, 1), (1, 1), act)
         self.idx += 1
         logger.debug(
             '{}\t\tconv\tkernel 1 x 1 filters {} stride 1 activation {}'.
             format(self.idx, outdim, nl))
         return fn.get_shape(input), [
             VGSLBlock(blocks[idx], m.group('type'), m.group('name'),
                       self.idx)
         ], fn
     else:
         aug = True if m.group('aug') else False
         lin = layers.LinSoftmax(input[1], int(m.group('out')), aug)
         self.idx += 1
         logger.debug('{}\t\tlinear\taugmented {} out {}'.format(
             self.idx, aug, m.group('out')))
         return lin.get_shape(input), [
             VGSLBlock(blocks[idx], m.group(1), m.group('name'), self.idx)
         ], lin