def test_forward_consistency(self, nobias=False):
        x_cpu = chainer.Variable(self.x)
        W_cpu = chainer.Variable(self.W)
        b_cpu = None if nobias else chainer.Variable(self.b)
        y_cpu = functions.dilated_convolution_2d(x_cpu,
                                                 W_cpu,
                                                 b_cpu,
                                                 stride=self.stride,
                                                 pad=self.pad,
                                                 dilate=self.dilate,
                                                 use_cudnn=self.use_cudnn,
                                                 cover_all=self.cover_all)

        x_gpu = chainer.Variable(cuda.to_gpu(self.x))
        W_gpu = chainer.Variable(cuda.to_gpu(self.W))
        b_gpu = None if nobias else chainer.Variable(cuda.to_gpu(self.b))
        y_gpu = functions.dilated_convolution_2d(x_gpu,
                                                 W_gpu,
                                                 b_gpu,
                                                 stride=self.stride,
                                                 pad=self.pad,
                                                 dilate=self.dilate,
                                                 use_cudnn=self.use_cudnn,
                                                 cover_all=self.cover_all)

        testing.assert_allclose(y_cpu.data, y_gpu.data.get(),
                                **self.check_forward_options)
    def test_forward_consistency(self, nobias=False):
        x_cpu = chainer.Variable(self.x)
        W_cpu = chainer.Variable(self.W)
        b_cpu = None if nobias else chainer.Variable(self.b)
        y_cpu = functions.dilated_convolution_2d(
            x_cpu, W_cpu, b_cpu, stride=self.stride, pad=self.pad,
            dilate=self.dilate, cover_all=self.cover_all)

        x_gpu = chainer.Variable(cuda.to_gpu(self.x))
        W_gpu = chainer.Variable(cuda.to_gpu(self.W))
        b_gpu = None if nobias else chainer.Variable(cuda.to_gpu(self.b))
        with chainer.using_config('use_cudnn', self.use_cudnn):
            y_gpu = functions.dilated_convolution_2d(
                x_gpu, W_gpu, b_gpu, stride=self.stride, pad=self.pad,
                dilate=self.dilate, cover_all=self.cover_all)

        testing.assert_allclose(
            y_cpu.data, y_gpu.data.get(), **self.check_forward_options)
Ejemplo n.º 3
0
 def forward(self):
     x = chainer.Variable(self.x)
     W = chainer.Variable(self.W)
     return functions.dilated_convolution_2d(x,
                                             W,
                                             None,
                                             stride=self.stride,
                                             pad=self.pad,
                                             dilate=self.dilate)
Ejemplo n.º 4
0
    def forward(self, x):

        if self.W.array is None:
            self._initialize_params(x.shape[1])

        pad_width = [(0, 0), (0, 0)] + list(map(lambda x: (x, x), self.pad))
        x = F.pad(x, pad_width, self.pad_mode)

        return F.dilated_convolution_2d(x, self.W, self.b, self.stride, 0,
                                        self.dilate)
 def forward(self):
     x = chainer.Variable(self.x)
     W = chainer.Variable(self.W)
     return functions.dilated_convolution_2d(
         x, W, None, stride=self.stride, pad=self.pad, dilate=self.dilate)
Ejemplo n.º 6
0
 def f(*args):
     return F.dilated_convolution_2d(*args, stride=self.stride,
                                     pad=self.pad, dilate=self.dilate,
                                     cover_all=self.cover_all)