def test_valid_insize(self):
        N = self.N
        c = self.c
        ksize = self.ksize
        stride = self.stride
        pad = self.pad
        outs = self.outsize
        cover_all = self.cover_all

        # Make input.
        dims = tuple(
            conv.get_conv_outsize(out, k, s, p, cover_all=cover_all)
            for (out, k, s, p) in zip(outs, ksize, stride, pad))
        x_shape = (N, c) + dims
        x_data = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
        x = chainer.Variable(x_data)

        # Compute unpooling.
        y = functions.unpooling_nd(x,
                                   ksize,
                                   stride,
                                   pad,
                                   outsize=outs,
                                   cover_all=cover_all)

        # Test output's value.
        y_expected = expected_unpooling_nd(x_data, outs, ksize, stride, pad)
        testing.assert_allclose(y_expected, y.data)
Exemplo n.º 2
0
    def check_backward_consistency_regression(self, backend_config):
        # Regression test to two-dimensional unpooling layer.

        x_data, = self.generate_inputs()
        gy_data = numpy.random.uniform(-1, 1, self.gy_shape).astype(self.dtype)

        ksize = self.ksize
        stride = self.stride
        pad = self.pad
        xp = backend.get_array_module(x_data)

        # Backward computation for N-dimensional unpooling layer.
        x_nd = chainer.Variable(xp.array(x_data))
        y_nd = functions.unpooling_nd(
            x_nd, ksize, stride=stride, pad=pad, cover_all=self.cover_all)
        y_nd.grad = gy_data
        y_nd.backward()

        # Backward computation for two-dimensional unpooling layer.
        x_2d = chainer.Variable(xp.array(x_data))
        y_2d = functions.unpooling_2d(
            x_2d, ksize, stride=stride, pad=pad, cover_all=self.cover_all)
        y_2d.grad = gy_data
        y_2d.backward()

        # Test that the two result gradients are close enough.
        opt = self.check_backward_options
        testing.assert_allclose(
            x_nd.grad, x_2d.grad, atol=opt['atol'], rtol=opt['rtol'])
Exemplo n.º 3
0
    def check_backward_consistency_regression(self, x_data, gy_data):
        # Regression test to two-dimensional unpooling layer.

        ndim = len(self.dims)
        if ndim != 2:
            return

        ksize = self.ksize
        stride = self.stride
        pad = self.pad
        xp = cuda.get_array_module(x_data)

        # Backward computation for N-dimensional unpooling layer.
        x_nd = chainer.Variable(xp.array(x_data))
        y_nd = functions.unpooling_nd(
            x_nd, ksize, stride=stride, pad=pad, cover_all=self.cover_all)
        y_nd.grad = gy_data
        y_nd.backward()

        # Backward computation for two-dimensional unpooling layer.
        x_2d = chainer.Variable(xp.array(x_data))
        y_2d = functions.unpooling_2d(
            x_2d, ksize, stride=stride, pad=pad, cover_all=self.cover_all)
        y_2d.grad = gy_data
        y_2d.backward()

        # Test that the two result gradients are close enough.
        opt = self.check_backward_options
        testing.assert_allclose(
            x_nd.grad, x_2d.grad, atol=opt['atol'], rtol=opt['rtol'])
Exemplo n.º 4
0
    def check_backward_consistency_regression(self, backend_config):
        # Regression test to two-dimensional unpooling layer.

        x_data, = self.generate_inputs()
        gy_data = numpy.random.uniform(-1, 1, self.gy_shape).astype(self.dtype)

        ksize = self.ksize
        stride = self.stride
        pad = self.pad
        xp = backend.get_array_module(x_data)

        # Backward computation for N-dimensional unpooling layer.
        x_nd = chainer.Variable(xp.array(x_data))
        y_nd = functions.unpooling_nd(
            x_nd, ksize, stride=stride, pad=pad, cover_all=self.cover_all)
        y_nd.grad = gy_data
        y_nd.backward()

        # Backward computation for two-dimensional unpooling layer.
        x_2d = chainer.Variable(xp.array(x_data))
        y_2d = functions.unpooling_2d(
            x_2d, ksize, stride=stride, pad=pad, cover_all=self.cover_all)
        y_2d.grad = gy_data
        y_2d.backward()

        # Test that the two result gradients are close enough.
        opt = self.check_backward_options
        testing.assert_allclose(
            x_nd.grad, x_2d.grad, atol=opt['atol'], rtol=opt['rtol'])
Exemplo n.º 5
0
    def check_backward_consistency_regression(self, x_data, gy_data):
        # Regression test to two-dimensional unpooling layer.

        ndim = len(self.dims)
        if ndim != 2:
            return

        ksize = self.ksize
        stride = self.stride
        pad = self.pad
        xp = backend.get_array_module(x_data)

        # Backward computation for N-dimensional unpooling layer.
        x_nd = chainer.Variable(xp.array(x_data))
        y_nd = functions.unpooling_nd(
            x_nd, ksize, stride=stride, pad=pad, cover_all=self.cover_all)
        y_nd.grad = gy_data
        y_nd.backward()

        # Backward computation for two-dimensional unpooling layer.
        x_2d = chainer.Variable(xp.array(x_data))
        y_2d = functions.unpooling_2d(
            x_2d, ksize, stride=stride, pad=pad, cover_all=self.cover_all)
        y_2d.grad = gy_data
        y_2d.backward()

        # Test that the two result gradients are close enough.
        opt = self.check_backward_options
        testing.assert_allclose(
            x_nd.grad, x_2d.grad, atol=opt['atol'], rtol=opt['rtol'])
Exemplo n.º 6
0
def unpooling(in_size: int, kernel: int, stride: int, padding: int):
    batch = 2
    in_channel = 5
    out_channel = 3
    x = np.arange(batch * in_channel * in_size**3, dtype=np.float32).reshape(
        (batch, in_channel, in_size, in_size, in_size))
    y = F.unpooling_nd(x, kernel, stride, padding)
    return y.shape
Exemplo n.º 7
0
 def f(x):
     outs = self.gy.shape[2:]
     return functions.unpooling_nd(x,
                                   self.ksize,
                                   stride=self.stride,
                                   pad=self.pad,
                                   outsize=outs,
                                   cover_all=self.cover_all)
Exemplo n.º 8
0
 def forward(self, inputs, device):
     x, = inputs
     y = functions.unpooling_nd(x,
                                self.ksize,
                                self.stride,
                                self.pad,
                                cover_all=self.cover_all)
     return y,
Exemplo n.º 9
0
    def test_invalid_insize(self):
        ksize = self.ksize
        stride = self.stride
        pad = self.pad
        outs = self.outsize
        cover_all = self.cover_all

        # Make input with invalid shape.
        dims = tuple(conv.get_conv_outsize(out, k, s, p, cover_all=cover_all)
                     for (out, k, s, p) in zip(outs, ksize, stride, pad))
        dims = tuple(d + 1 for d in dims)  # Make invalid input shape.
        x_shape = (self.N, self.c) + dims
        x_data = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
        x = chainer.Variable(x_data)

        # Computing unpooling raises exception.
        with self.assertRaises(type_check.InvalidType):
            functions.unpooling_nd(
                x, ksize, stride, pad, outsize=outs, cover_all=cover_all)
Exemplo n.º 10
0
    def test_invalid_insize(self):
        ksize = self.ksize
        stride = self.stride
        pad = self.pad
        outs = self.outsize
        cover_all = self.cover_all

        # Make input with invalid shape.
        dims = tuple(conv.get_conv_outsize(out, k, s, p, cover_all=cover_all)
                     for (out, k, s, p) in zip(outs, ksize, stride, pad))
        dims = tuple(d + 1 for d in dims)  # Make invalid input shape.
        x_shape = (self.N, self.c) + dims
        x_data = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
        x = chainer.Variable(x_data)

        # Computing unpooling raises exception.
        with self.assertRaises(type_check.InvalidType):
            functions.unpooling_nd(
                x, ksize, stride, pad, outsize=outs, cover_all=cover_all)
Exemplo n.º 11
0
    def __call__(self, x):
        if self.up:
            h = F.unpooling_nd(x, 2, 2, 0, cover_all=False)
            h = self.activation(self.bn0(self.cpara(h)))

        elif self.down:
            h = self.activation(self.bn0(self.cdown(x)))

        else:
            h = self.activation(self.bn0(self.cpara(x)))

        return h
Exemplo n.º 12
0
    def check_forward_consistency_regression(self, backend_config):
        # Regression test to two-dimensional unpooling layer.
        inputs, = self.generate_inputs()
        x = chainer.Variable(backend_config.get_array(inputs))

        ksize = self.ksize
        stride = self.stride
        pad = self.pad

        y_nd = functions.unpooling_nd(x, ksize, stride=stride, pad=pad,
                                      cover_all=self.cover_all)
        y_2d = functions.unpooling_2d(x, ksize, stride=stride, pad=pad,
                                      cover_all=self.cover_all)
        testing.assert_allclose(
            y_nd.array, y_2d.array, **self.check_forward_options)
Exemplo n.º 13
0
    def check_forward_consistency_regression(self, backend_config):
        # Regression test to two-dimensional unpooling layer.
        inputs, = self.generate_inputs()
        x = chainer.Variable(backend_config.get_array(inputs))

        ksize = self.ksize
        stride = self.stride
        pad = self.pad

        y_nd = functions.unpooling_nd(x, ksize, stride=stride, pad=pad,
                                      cover_all=self.cover_all)
        y_2d = functions.unpooling_2d(x, ksize, stride=stride, pad=pad,
                                      cover_all=self.cover_all)
        testing.assert_allclose(
            y_nd.array, y_2d.array, **self.check_forward_options)
Exemplo n.º 14
0
    def check_forward_consistency_regression(self, x_data):
        # Regression test to two-dimensional unpooling layer.

        if len(self.dims) != 2:
            return

        ksize = self.ksize
        stride = self.stride
        pad = self.pad

        y_nd = functions.unpooling_nd(x_data, ksize, stride=stride, pad=pad,
                                      cover_all=self.cover_all)
        y_2d = functions.unpooling_2d(x_data, ksize, stride=stride, pad=pad,
                                      cover_all=self.cover_all)
        testing.assert_allclose(
            y_nd.data, y_2d.data, **self.check_forward_options)
Exemplo n.º 15
0
    def check_forward_consistency_regression(self, x_data):
        # Regression test to two-dimensional unpooling layer.

        if len(self.dims) != 2:
            return

        ksize = self.ksize
        stride = self.stride
        pad = self.pad

        y_nd = functions.unpooling_nd(x_data, ksize, stride=stride, pad=pad,
                                      cover_all=self.cover_all)
        y_2d = functions.unpooling_2d(x_data, ksize, stride=stride, pad=pad,
                                      cover_all=self.cover_all)
        testing.assert_allclose(
            y_nd.data, y_2d.data, **self.check_forward_options)
Exemplo n.º 16
0
    def check_forward(self, x_data):
        ksize = self.ksize
        stride = self.stride
        pad = self.pad

        # Compute unpooling.
        x = chainer.Variable(x_data)
        y = functions.unpooling_nd(
            x, ksize, stride, pad, cover_all=self.cover_all)

        # Test output's dtype and shape.
        self.assertEqual(y.data.dtype, self.dtype)
        self.assertEqual(y.data.shape, self.gy.shape)

        # Test output's value.
        outs = self.gy.shape[2:]
        y_expected = expected_unpooling_nd(self.x, outs, ksize, stride, pad)
        testing.assert_allclose(
            y_expected, y.data, **self.check_forward_options)
Exemplo n.º 17
0
    def check_forward(self, x_data):
        ksize = self.ksize
        stride = self.stride
        pad = self.pad

        # Compute unpooling.
        x = chainer.Variable(x_data)
        y = functions.unpooling_nd(
            x, ksize, stride, pad, cover_all=self.cover_all)

        # Test output's dtype and shape.
        self.assertEqual(y.data.dtype, self.dtype)
        self.assertEqual(y.data.shape, self.gy.shape)

        # Test output's value.
        outs = self.gy.shape[2:]
        y_expected = expected_unpooling_nd(self.x, outs, ksize, stride, pad)
        testing.assert_allclose(
            y_expected, y.data, **self.check_forward_options)
Exemplo n.º 18
0
 def reconstruct(self, f):
     batch_size = f.shape[0]
     h = self.linear_reconstruct(f)
     h = F.reshape(
         h,
         self.chain((batch_size, self.decoder_channels[-1]),
                    self.shapes[-1]))
     for layer_idx, num_layers in reversed(
             tuple(enumerate(self.decoder_layers))):
         for rep_idx in range(num_layers):
             dcnv = self.__getattribute__(
                 ("dcnv_{}_{}".format(layer_idx, rep_idx)))
             h = dcnv(h)
         if layer_idx != 0:
             h = F.unpooling_nd(h,
                                1,
                                2,
                                outsize=self.shapes[layer_idx - 1],
                                cover_all=False)
     h = F.reshape(h, self.chain((batch_size, ), self.shapes[0]))
     return h
Exemplo n.º 19
0
    def test_valid_insize(self):
        N = self.N
        c = self.c
        ksize = self.ksize
        stride = self.stride
        pad = self.pad
        outs = self.outsize
        cover_all = self.cover_all

        # Make input.
        dims = tuple(conv.get_conv_outsize(out, k, s, p, cover_all=cover_all)
                     for (out, k, s, p) in zip(outs, ksize, stride, pad))
        x_shape = (N, c) + dims
        x_data = numpy.random.uniform(-1, 1, x_shape).astype(numpy.float32)
        x = chainer.Variable(x_data)

        # Compute unpooling.
        y = functions.unpooling_nd(
            x, ksize, stride, pad, outsize=outs, cover_all=cover_all)

        # Test output's value.
        y_expected = expected_unpooling_nd(x_data, outs, ksize, stride, pad)
        testing.assert_allclose(y_expected, y.data)
Exemplo n.º 20
0
 def test_unpooling_3d(self):
     (x, ksize) = self._get_data(3)
     testing.assert_allclose(
         functions.unpooling_nd(x, ksize).data,
         functions.unpooling_3d(x, ksize).data)
Exemplo n.º 21
0
def _upsample(x, upsample):
    if isinstance(upsample, collections.Iterable):
        outsize = tuple(d * s for d, s in zip(x.shape[2:], upsample))
    else:
        outsize = tuple(d * upsample for d in x.shape[2:])
    return F.unpooling_nd(x, upsample, outsize=outsize)
Exemplo n.º 22
0
 def f(x):
     outs = self.gy.shape[2:]
     return functions.unpooling_nd(
         x, self.ksize, stride=self.stride, pad=self.pad,
         outsize=outs, cover_all=self.cover_all)
Exemplo n.º 23
0
 def f(x):
     return functions.unpooling_nd(
         x, self.ksize, stride=self.stride, pad=self.pad,
         cover_all=self.cover_all)
Exemplo n.º 24
0
 def f(x):
     return functions.unpooling_nd(x,
                                   self.ksize,
                                   self.stride,
                                   self.pad,
                                   cover_all=self.cover_all)
Exemplo n.º 25
0
    def __call__(self, left, right, disp_true):

        refimg_fea = self.feature_extraction(left)
        targetimg_fea = self.feature_extraction(right)
        # matching
        # with chainer.no_backprop_mode():
        cost = None

        for i in range(int(self.maxdisp / 4)):
            if i > 0:
                # limit size i
                cost_i = F.concat(
                    (refimg_fea[:, :, :, i:], targetimg_fea[:, :, :, :-i]),
                    axis=1).reshape(refimg_fea.shape[0],
                                    refimg_fea.shape[1] * 2, 1,
                                    refimg_fea.shape[2],
                                    refimg_fea.shape[3] - i)
                cost_zero = Variable(
                    cuda.cupy.zeros(
                        (refimg_fea.shape[0], int(refimg_fea.shape[1] * 2), 1,
                         refimg_fea.shape[2], i),
                        dtype=cuda.cupy.float32))
                cost_i = F.concat((cost_zero, cost_i), axis=4)
                cost = F.concat((cost, cost_i), axis=2)
            else:
                cost = F.concat(
                    (refimg_fea, targetimg_fea),
                    axis=1).reshape(refimg_fea.shape[0],
                                    refimg_fea.shape[1] * 2, 1,
                                    refimg_fea.shape[2], refimg_fea.shape[3])

        # gpu0 to gpu1
        cost = F.copy(cost, self.gpu1)

        cost0 = self.dres0(cost)
        cost0 = self.dres1(cost0) + cost0
        cost0 = self.dres2(cost0) + cost0
        cost0 = self.dres3(cost0) + cost0
        cost0 = self.dres4(cost0) + cost0
        cost = self.classify(cost0)

        # gpu1 to gpu0
        cost = F.copy(cost, self.gpu0)

        cost = F.unpooling_nd(cost,
                              4,
                              outsize=(self.maxdisp, left.shape[2],
                                       left.shape[3]))
        cost = F.average_pooling_nd(cost, 3, 1, 1)
        # here insert average_pooling_nd(kernel=3, stride=1) for trilinear upsampling !!!
        cost = F.squeeze(cost, 1)
        pred = F.softmax(cost)  # ???
        pred = disparityregression(self.maxdisp)(pred)

        # calculate loss
        pred = F.clip(pred.reshape(pred.shape[0], -1), 0., float(self.maxdisp))
        disp_true = disp_true.reshape(disp_true.shape[0], -1)

        # mask
        if self.train_type == "kitti":
            pred_mask = F.where(disp_true > 0., pred, disp_true)
        elif self.train_type == "sceneflow":
            pred_mask = F.where(disp_true < maxdisp, pred, disp_true)
        else:
            pred_mask = pred

        #mask = Variable(disp_true).array < self.maxdisp
        loss = F.huber_loss(pred_mask, disp_true, delta=1)
        loss = F.average(loss / pred_mask.shape[1])

        chainer.reporter.report({'loss': loss}, self)

        if self.training:
            return loss
        else:
            return pred.reshape(1, 1, left.shape[2], right.shape[3])
Exemplo n.º 26
0
 def forward(self, inputs, device):
     x, = inputs
     y = functions.unpooling_nd(
         x, self.ksize, self.stride, self.pad, cover_all=self.cover_all)
     return y,
Exemplo n.º 27
0
    def __call__(self, left, right, disp_true):
        # gpu0 to gpu1
        left = F.copy(left, self.gpu1)
        right = F.copy(right, self.gpu1)

        refimg_fea = self.feature_extraction(left)
        targetimg_fea = self.feature_extraction(right)

        refimg_fea = F.copy(refimg_fea, self.gpu0)
        targetimg_fea = F.copy(targetimg_fea, self.gpu0)

        # matching
        # with chainer.no_backprop_mode():
        cost = None

        for i in range(int(self.maxdisp / 4)):
            if i > 0:
                # limit size i
                cost_i = F.concat(
                    (refimg_fea[:, :, :, i:], targetimg_fea[:, :, :, :-i]),
                    axis=1).reshape(refimg_fea.shape[0],
                                    refimg_fea.shape[1] * 2, 1,
                                    refimg_fea.shape[2],
                                    refimg_fea.shape[3] - i)
                cost_zero = Variable(
                    cuda.cupy.zeros(
                        (refimg_fea.shape[0], int(refimg_fea.shape[1] * 2), 1,
                         refimg_fea.shape[2], i),
                        dtype=cuda.cupy.float32))
                cost_i = F.concat((cost_zero, cost_i), axis=4)
                cost = F.concat((cost, cost_i), axis=2)
            else:
                cost = F.concat(
                    (refimg_fea, targetimg_fea),
                    axis=1).reshape(refimg_fea.shape[0],
                                    refimg_fea.shape[1] * 2, 1,
                                    refimg_fea.shape[2], refimg_fea.shape[3])

        cost = F.copy(cost, self.gpu2)

        cost0 = self.dres0(cost)
        cost0 = self.dres1(cost0) + cost0

        out1, pre1, post1 = self.dres2(cost0, None, None)
        out1 = out1 + cost0

        out2, pre2, post2 = self.dres3(out1, pre1, post1)
        out2 = out2 + cost0

        out3, pre3, post3 = self.dres4(out2, pre1, post2)
        out3 = out3 + cost0

        cost1 = self.classify1(out1)
        cost2 = self.classify2(out2) + cost1
        cost3 = self.classify3(out3) + cost2

        # gpu1 to gpu0
        left = F.copy(left, self.gpu0)
        right = F.copy(right, self.gpu0)
        #disp_true = F.copy(disp_true, self.gpu0)
        cost1 = F.copy(cost1, self.gpu0)
        cost2 = F.copy(cost2, self.gpu0)
        cost3 = F.copy(cost3, self.gpu0)

        if self.training:
            # trilinear upsample
            cost1 = F.unpooling_nd(cost1,
                                   4,
                                   outsize=(self.maxdisp, left.shape[2],
                                            left.shape[3]))
            cost1 = F.average_pooling_nd(cost1, 3, 1, 1)

            cost2 = F.unpooling_nd(cost2,
                                   4,
                                   outsize=(self.maxdisp, left.shape[2],
                                            left.shape[3]))
            cost2 = F.average_pooling_nd(cost2, 3, 1, 1)

            # for cost1
            cost1 = F.squeeze(cost1, 1)
            pred1 = F.softmax(cost1)  # ???
            pred1 = disparityregression(self.maxdisp)(pred1)

            # for cost2
            cost2 = F.squeeze(cost2, 1)
            pred2 = F.softmax(cost2)  # ???
            pred2 = disparityregression(self.maxdisp)(pred2)

        # for cost3
        cost3 = F.unpooling_nd(cost3,
                               4,
                               outsize=(self.maxdisp, left.shape[2],
                                        left.shape[3]))
        cost3 = F.average_pooling_nd(cost3, 3, 1, 1)

        cost3 = F.squeeze(cost3, 1)
        pred3 = F.softmax(cost3)  # ???
        pred3 = disparityregression(self.maxdisp)(pred3)

        def calculate_disp_loss(pred, disp_true, train_type):
            # calculate loss
            pred = F.clip(pred.reshape(pred.shape[0], -1), 0.,
                          float(self.maxdisp))
            disp_true = disp_true.reshape(disp_true.shape[0], -1)

            # mask
            if train_type == "kitti":
                pred_mask = F.where(disp_true > 0., pred, disp_true)
            elif train_type == "sceneflow":
                pred_mask = F.where(disp_true < float(self.maxdisp), pred,
                                    disp_true)
            else:
                pred_mask = pred

            #mask = Variable(disp_true).array < self.maxdisp
            loss = F.huber_loss(pred_mask, disp_true, delta=1)
            loss = F.average(loss / pred_mask.shape[1])
            return loss

        if self.training:
            loss1 = calculate_disp_loss(pred1, disp_true, self.train_type)
            loss2 = calculate_disp_loss(pred2, disp_true, self.train_type)
            loss3 = calculate_disp_loss(pred3, disp_true, self.train_type)
            loss = loss1 + loss2 + loss3

            chainer.reporter.report(
                {
                    'loss1': loss1,
                    'loss2': loss2,
                    'loss3': loss3,
                    'loss': loss
                }, self)

            return loss
        else:
            return pred3.reshape(1, 1, left.shape[2], right.shape[3])
Exemplo n.º 28
0
from chainer.functions import unpooling_nd
import numpy as np

B = 3
C = 2
H = 11
W = 11
D = 11

unpooling_nd(
    np.arange(B * C * H * W * D, dtype=np.float32).reshape((B, C, H, W, D)), 2,
    2, 0, (22, 22, 22), False)
Exemplo n.º 29
0
    def calc(self, x, target):
        """
        :param xp.ndarray x:
        :param xp.ndarray target:
        :return: Variable
        """
        assert self.reconstruction_loss_attached or self.pca_loss_attached
        assert self.pca_attached or not self.pca_loss_attached

        original_shape = list(x.shape)  # [batch, dim1, dim2, dim3]
        new_shape = copy(original_shape)
        new_shape.insert(1, 1)  # [batch, 1, dim1, dim2, dim3]
        x_masked = F.reshape(F.scale(x, self.mask), new_shape)
        padding_history = []
        shape_history = []

        h = x_masked
        for downsample_degree in range(self.tmp_n_blocks):
            for conv_idx in range(self.n_conv_per_block):
                conv = self.__getattribute__("conv_{}_{}".format(
                    downsample_degree, conv_idx))
                if self.debug:
                    print("conv_{}_{}".format(downsample_degree, conv_idx),
                          conv.W.shape)
                h = conv(h)
                if self.debug:
                    print("\t{}".format(h.shape))
                if not (downsample_degree == self.tmp_n_blocks - 1
                        and conv_idx == self.n_conv_per_block - 1):
                    if self.debug:  # rawなので,特徴抽出層でReLUしない
                        print("relu")
                    h = F.relu(h)
            if downsample_degree != self.tmp_n_blocks - 1:
                shape = h.shape[2:]
                shape_history.append(shape)
                padding = tuple([x % 2 for x in shape])
                padding_history.append(padding)
                if self.debug:
                    print("average_pooling_nd")
                h = F.average_pooling_nd(h, 2, 2,
                                         padding)  # dimensionの0側にpaddingがかかる
                if self.debug:
                    print("\t{}".format(h.shape))
        # この段階でhがfeature
        pca_loss = None
        if self.pca_attached:
            if self.debug:
                print("pca")
            feature = self.pca(h)
            if self.pca_loss_attached:
                pca_loss = F.mean_absolute_error(feature, h)
                report({'pca_loss': pca_loss}, self)
                if not self.reconstruction_loss_attached:
                    return pca_loss
            h = feature
            if self.debug:
                print("\t{}".format(h.shape))

        h = F.relu(h)
        if self.debug:
            print("relu")

        for downsample_degree in reversed(range(self.tmp_n_blocks)):
            for conv_idx in range(self.n_conv_per_block):
                conv = self.__getattribute__("dcnv_{}_{}".format(
                    downsample_degree, conv_idx))
                if self.debug:
                    print("dcnv_{}_{}".format(downsample_degree, conv_idx),
                          conv.W.shape)
                h = conv(h)
                if self.debug:
                    print("\t{}".format(h.shape))
                if not (downsample_degree == 0 and conv_idx
                        == self.n_conv_per_block - 1):  # 最終出力層はReLUしない
                    if self.debug:
                        print("relu")
                    h = F.relu(h)
            if downsample_degree != 0:
                shape = shape_history.pop()
                padding = padding_history.pop()
                if self.debug:
                    print("unpooling_nd")
                h = F.unpooling_nd(h, 2, 2, padding, shape, cover_all=False)
                if self.debug:
                    print("\t{}".format(h.shape))
        out = F.reshape(h, tuple(original_shape))
        out_masked = F.scale(out, self.mask)

        target_masked = F.scale(target, self.mask)

        reconstruction_loss = F.mean_absolute_error(
            out_masked, target_masked) * self.loss_const
        report({'reconstruction_loss': reconstruction_loss}, self)
        if self.pca_loss_attached:
            return reconstruction_loss + pca_loss
        else:
            return reconstruction_loss
Exemplo n.º 30
0
    def calc(self, x, target):
        """
        :param xp.ndarray x:
        :param xp.ndarray target:
        :return: Variable
        """
        original_shape = list(x.shape)  # [batch, dim1, dim2, dim3]
        new_shape = copy(original_shape)
        new_shape.insert(1, 1)  # [batch, 1, dim1, dim2, dim3]
        x_masked = F.reshape(F.scale(x, self.mask), new_shape)

        padding_history = []
        shape_history = []

        h = x_masked
        for downsample_degree in range(self.n_downsamples + 1):
            for conv_idx in range(self.n_conv_per_downsample):
                conv = self.__getattribute__("conv_{}_{}".format(
                    downsample_degree, conv_idx))
                if self.debug:
                    print("conv_{}_{}".format(downsample_degree, conv_idx),
                          conv.W.shape)
                h = conv(h)
                if self.debug:
                    print("\t{}".format(h.shape))
                if conv_idx != self.n_conv_per_downsample - 1:  # poolingや特徴抽出の前はReLUしない
                    if self.debug:
                        print("relu")
                    h = F.relu(h)
            if downsample_degree != self.n_downsamples:
                shape = h.shape[2:]
                shape_history.append(shape)
                padding = tuple([x % 2 for x in shape])
                padding_history.append(padding)
                if self.debug:
                    print("average_pooling_nd")
                h = F.average_pooling_nd(h, 2, 2,
                                         padding)  # dimensionの0側にpaddingがかかる
                if self.debug:
                    print("\t{}".format(h.shape))
        # この段階でhがfeature
        for downsample_degree in reversed(range(self.n_downsamples + 1)):
            for conv_idx in range(self.n_conv_per_downsample):
                conv = self.__getattribute__("dcnv_{}_{}".format(
                    downsample_degree, conv_idx))
                if self.debug:
                    print("dcnv_{}_{}".format(downsample_degree, conv_idx),
                          conv.W.shape)
                h = conv(h)
                if self.debug:
                    print("\t{}".format(h.shape))
                if conv_idx != self.n_conv_per_downsample - 1:  # unpoolingの前はReLUしない
                    if self.debug:
                        print("relu")
                    h = F.relu(h)
            if downsample_degree != 0:
                shape = shape_history.pop()
                padding = padding_history.pop()
                if self.debug:
                    print("unpooling_nd")
                h = F.unpooling_nd(h, 2, 2, padding, shape, cover_all=False)
                if self.debug:
                    print("\t{}".format(h.shape))
        out = F.reshape(h, tuple(original_shape))
        out_masked = F.scale(out, self.mask)

        target_masked = F.scale(target, self.mask)
        loss = F.mean_absolute_error(out_masked,
                                     target_masked) * self.loss_const

        return loss
Exemplo n.º 31
0
 def test_unpooling_3d(self):
     (x, ksize) = self._get_data(3)
     testing.assert_allclose(
         functions.unpooling_nd(x, ksize).data,
         functions.unpooling_3d(x, ksize).data)