def __init__(self, x, filters, filter_size, step, bias=True, activation=None, padding='VALID', *activation_args, **activation_kwargs): if isinstance(x, Layer): x = x.output self.kernel = graph.Variable( (filter_size, filter_size, x.shape[2], filters)) y = graph.Convolution(x, self.kernel, step, padding) self.bias = graph.Variable((filters, )) if bias else None if bias: y = graph.Sum(y, self.bias) if activation: y = activation(y, *activation_args, **activation_kwargs) super(Convolution, self).__init__((x, ), y, [self.kernel, self.bias])
def test_convolution_with_step(self): x = graph.Constant(self.x) f = graph.Constant(self.filters) conv = graph.Convolution(x, f, step=2) np.testing.assert_array_equal(self.expected_step_2, graph.run(conv))
def test_convolution(self): x_c = graph.Constant(self.x) f_c = graph.Constant(self.filters) conv = graph.Convolution(x_c, f_c) np.testing.assert_array_equal(graph.run(conv), self.expected)
def test_convolution_batched(self): x_p = graph.Placeholder(self.x.shape, batched=True) f_c = graph.Constant(self.filters) conv = graph.Convolution(x_p, f_c) x = np.stack([self.x, 2 * self.x, 3 * self.x]) expected = np.stack( [self.expected, 2 * self.expected, 3 * self.expected]) np.testing.assert_array_equal(graph.run(conv, {x_p: x}), expected)
def test_convolution_grad_with_step(self): x_c = graph.Constant(self.x) f_c = graph.Constant(self.filters) conv = graph.Convolution(x_c, f_c, step=2) grad = graph.Grad(conv, [x_c, f_c]) grad_x, grad_f = graph.run(grad) np.testing.assert_array_equal(grad_x, self.grad_x_step_2) np.testing.assert_array_equal(grad_f, self.grad_f_step_2)
def test_convolution_grad_batched(self): x_p = graph.Placeholder(self.x.shape, batched=True) f_c = graph.Constant(self.filters) x = np.stack([self.x, 2 * self.x, 3 * self.x]) conv = graph.Convolution(x_p, f_c) grad = graph.Grad(conv, [x_p, f_c]) grad_x, grad_f = graph.run(grad, {x_p: x}) np.testing.assert_array_equal(grad_x, np.stack([self.grad_x] * 3)) np.testing.assert_array_equal(grad_f, self.grad_f * 6)