Exemple #1
0
 def build_output(self, input: Tuple[int, int, int, int], block: str) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]:
     """
     Builds an output layer.
     """
     pattern = re.compile(r'(O)(?P<name>{\w+})?(?P<dim>2|1|0)(?P<type>l|s|c)(?P<aug>a)?(?P<out>\d+)')
     m = pattern.match(block)
     if not m:
         return None, None, None
     dim = int(m.group('dim'))
     nl = m.group('type')
     outdim = int(m.group('out'))
     if dim == 0:
         raise ValueError('categorical output not supported, yet.')
     if nl == 'c' and dim == 2:
         raise ValueError('CTC not supported for heatmap output')
     if nl in ['l', 's'] and int(m.group('out')) >= 1:
         self.criterion = nn.BCELoss()
     elif nl == 'c':
         self.criterion = nn.CTCLoss(reduction='sum', zero_infinity=True)
     else:
         raise ValueError('unsupported output specification')
     # heatmap output
     if dim == 2:
         act = 's' if nl == 'l' else 'm'
         fn = layers.ActConv2D(input[1], outdim, (1, 1), (1, 1), act)
         logger.debug('{}\t\tconv\tkernel 1 x 1 filters {} stride 1 activation {}'.format(self.idx+1, outdim, nl))
         return fn.get_shape(input), self.get_layer_name(m.group('type'), m.group('name')), fn
     else:
         aug = True if m.group('aug') else False
         lin = layers.LinSoftmax(input[1], int(m.group('out')), aug)
         logger.debug('{}\t\tlinear\taugmented {} out {}'.format(self.idx+1, aug, m.group('out')))
         return lin.get_shape(input), self.get_layer_name(m.group(1), m.group('name')), lin
Exemple #2
0
 def build_conv(
     self, input: Tuple[int, int, int, int], blocks: List[str], idx: int
 ) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str,
                                           Callable]]:
     """
     Builds a 2D convolution layer.
     """
     pattern = re.compile(
         r'(?P<type>C)(?P<nl>s|t|r|l|m)(?P<name>{\w+})?(\d+),'
         r'(\d+),(?P<out>\d+)(,(?P<stride_y>\d+),(?P<stride_x>\d+))?')
     m = pattern.match(blocks[idx])
     if not m:
         return None, None, None
     kernel_size = (int(m.group(4)), int(m.group(5)))
     filters = int(m.group('out'))
     stride = (int(m.group('stride_y')),
               int(m.group('stride_x'))) if m.group('stride_x') else (1, 1)
     nl = m.group('nl')
     fn = layers.ActConv2D(input[1], filters, kernel_size, stride, nl)
     self.idx += 1
     logger.debug(
         f'{self.idx}\t\tconv\tkernel {kernel_size[0]} x {kernel_size[1]} '
         f'filters {filters} stride {stride} activation {nl}')
     return fn.get_shape(input), [
         VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)
     ], fn
Exemple #3
0
 def test_actconv2d_lin(self):
     """
     Test convolutional layer without activation.
     """
     conv = layers.ActConv2D(5, 12, (3, 3), 'l')
     o = conv(torch.randn(1, 5, 24, 12))
     self.assertEqual(o.shape, (1, 12, 24, 12))
Exemple #4
0
 def build_conv(
     self, input: Tuple[int, int, int, int], block: str
 ) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str,
                                           Callable]]:
     """
     Builds a 2D convolution layer.
     """
     pattern = re.compile(
         r'(?P<type>C)(?P<nl>s|t|r|l|m)(?P<name>{\w+})?(\d+),(\d+),(?P<out>\d+)(,(?P<stride_y>\d+),(?P<stride_x>\d+))?'
     )
     m = pattern.match(block)
     if not m:
         return None, None, None
     kernel_size = (int(m.group(4)), int(m.group(5)))
     filters = int(m.group('out'))
     stride = (int(m.group('stride_y')),
               int(m.group('stride_x'))) if m.group('stride_x') else (1, 1)
     nl = m.group('nl')
     fn = layers.ActConv2D(input[1], filters, kernel_size, stride, nl)
     logger.debug(
         '{}\t\tconv\tkernel {} x {} filters {} stride {} activation {}'.
         format(self.idx + 1, kernel_size[0], kernel_size[1], filters,
                stride, nl))
     return fn.get_shape(input), self.get_layer_name(
         m.group('type'), m.group('name')), fn
Exemple #5
0
 def test_actconv2d_relu(self):
     """
     Test convolutional layer with relu activation.
     """
     conv = layers.ActConv2D(5, 12, (3, 3), 'r')
     o = conv(torch.randn(1, 5, 24, 12))
     self.assertLessEqual(0, o.min())
     self.assertLessEqual(0, o.max())
Exemple #6
0
 def test_actconv2d_softmax(self):
     """
     Test convolutional layer with softmax activation.
     """
     conv = layers.ActConv2D(5, 12, (3, 3), 'm')
     o = conv(torch.randn(1, 5, 24, 12))
     self.assertTrue(0 <= o.min() <= 1)
     self.assertTrue(0 <= o.max() <= 1)
Exemple #7
0
 def test_actconv2d_tanh(self):
     """
     Test convolutional layer with tanh activation.
     """
     conv = layers.ActConv2D(5, 12, (3, 3), (1, 1), 't')
     o = conv(torch.randn(1, 5, 24, 12))
     self.assertTrue(-1 <= o[0].min() <= 1)
     self.assertTrue(-1 <= o[0].max() <= 1)
Exemple #8
0
 def test_conv_resize_remove(self):
     """
     Tests resizing of a convolutional output layer.
     """
     conv = layers.ActConv2D(20, 10, (1, 1), (1, 1))
     w_cp = conv.co.weight.clone()
     b_cp = conv.co.bias.clone()
     conv.resize(5, (1, 5, 6, 7, 9))
     self.assertTrue(w_cp[(0, 2, 3, 4, 8), :].eq(conv.co.weight).all())
     self.assertTrue(b_cp[(0, 2, 3, 4, 8), ].eq(conv.co.bias).all())
Exemple #9
0
 def test_conv_resize_add(self):
     """
     Tests resizing of a convolutional output layer.
     """
     conv = layers.ActConv2D(20, 10, (1, 1), (1, 1))
     w_cp = conv.co.weight.clone()
     b_cp = conv.co.bias.clone()
     conv.resize(25)
     self.assertTrue(w_cp.eq(conv.co.weight[:10, :]).all())
     self.assertTrue(b_cp.eq(conv.co.bias[:10]).all())
     self.assertTrue(conv.co.weight.shape[0] == 25)
     self.assertTrue(conv.co.bias.shape[0] == 25)
Exemple #10
0
 def test_conv_resize_both(self):
     """
     Tests resizing of a convolutional output layer.
     """
     conv = layers.ActConv2D(20, 10, (1, 1), (1, 1))
     w_cp = conv.co.weight.clone()
     b_cp = conv.co.bias.clone()
     conv.resize(25, (1, 5, 6, 7, 9))
     self.assertTrue(w_cp[(0, 2, 3, 4,
                           8), :].eq(conv.co.weight[:5, :]).all())
     self.assertTrue(b_cp[(0, 2, 3, 4, 8), ].eq(conv.co.bias[:5]).all())
     self.assertTrue(conv.co.weight.shape[0] == 25)
     self.assertTrue(conv.co.bias.shape[0] == 25)