def __call__(self, x):
        xp = self.xp
        info_total = 0

        z1, z2, z3, z4 = xp.split(x.data, indices_or_sections=self.idx, axis=1)
        z3 = F.depth2space(z3, 2).data
        z2 = F.depth2space(z2, 4).data
        z1 = F.depth2space(z1, 8).data

        if self.ct1 != 0:
            h = z1
            h_small = xp.concatenate(
                (z2, M.unpooling_2d(z3, 2).data, M.unpooling_2d(z4, 4).data),
                axis=1)
            info = self.single_scale([self.b4_0, self.b4_1],
                                     '4',
                                     h,
                                     x_small=h_small)
            info_total += info

        if self.ct2 != 0:
            h = xp.concatenate((z1[:, :, ::2, ::2], z2), axis=1)
            h_small = xp.concatenate((z3, M.unpooling_2d(z4, 2).data), axis=1)
            info = self.single_scale([self.b8_0, self.b8_1],
                                     '8',
                                     h,
                                     x_small=h_small)
            info_total += info

        if self.ct3 != 0:
            h = xp.concatenate((z1[:, :, ::4, ::4], z2[:, :, ::2, ::2], z3),
                               axis=1)
            h_small = z4
            info = self.single_scale([self.b16_0, self.b16_1],
                                     '16',
                                     h,
                                     x_small=h_small)
            info_total += info

        h = xp.concatenate(
            (z1[:, :, ::8, ::8], z2[:, :, ::4, ::4], z3[:, :, ::2, ::2], z4),
            axis=1)

        for i, name in zip(range(3), 'abc'):
            info = self.single_scale([self.b32_0[i], self.b32_1[i]],
                                     '32{}'.format(name), h)
            info_total += info
            h = h[:, :, ::2, ::2]

        info_total += self.final(h)

        mean_entropy = info_total / x[0].data.size
        return mean_entropy
Beispiel #2
0
    def __call__(self, x):
        z1, z2, z3, z4 = F.split_axis(x, indices_or_sections=self.idx, axis=1)
        z3 = F.depth2space(z3, 2)
        z2 = F.depth2space(z2, 4)
        z1 = F.depth2space(z1, 8)

        h = self.b5(z4)

        h = F.depth2space(h, 2)
        h = F.concat([z3, h], axis=1)
        h = self.b4(h)

        h = F.depth2space(h, 2)
        h = F.concat([z2, h], axis=1)
        h = self.b3(h)

        h = F.depth2space(h, 2)
        h = F.concat([z1, h], axis=1)
        h = self.b2(h)

        h = F.depth2space(h, 2)
        h = self.b1(h)

        h = self.conv(h)
        h = F.depth2space(h, 2)
        return h
Beispiel #3
0
    def __call__(self, color, spec):
        color_res1 = self.color_head(color)
        color = self.color_body_pre(color_res1)

        spec_res1 = self.spec_head(spec)
        spec = self.spec_body_pre(spec_res1)

        if self.fix_params_before_bridges:
            spec.unchain_backward()
            color.unchain_backward()

        if self.connected:
            color_tmp = self.spec_to_color_bridge(F.depth2space(spec, 2))
            spec_tmp = self.color_to_spec_bridge(F.space2depth(color, 2))
            color += color_tmp
            spec += spec_tmp

        color = self.color_body_post(color)
        spec = self.spec_body_post(spec)

        color += color_res1
        spec += spec_res1

        color = self.color_tail(color)
        spec = self.spec_tail(spec)
        return color, spec
Beispiel #4
0
 def __call__(self, x):
     b, c, h, w = x.shape
     h1 = x.reshape(b * c, 1, h, w)
     h2 = h1
     h3 = convolution_2d(h2, self.xp.array(self.w))
     h4 = depth2space(h3, 2)
     return h4.reshape(b, c, (h - 1) * 2, (w - 1) * 2)
Beispiel #5
0
 def __call__(self, x):
     batch, channels, height, width = x.shape
     h1 = x.reshape(batch * channels, 1, height, width)
     h2 = pad(h1, ((0, 0), (0, 0), (2, 2), (2, 2)), mode="symmetric")
     h3 = convolution_2d(h2, self.xp.asarray(self.w))
     h4 = depth2space(h3, 2)
     return h4.reshape(batch, channels, height * 2, width * 2)
 def setup(self):
     xp = self.xp
     x = xp.random.randn(128, 80, 30, 20).astype(xp.float32)
     gy = xp.random.randn(128, 20, 60, 40).astype(xp.float32)
     r = 2
     func = lambda x: F.depth2space(x, r)
     self.setup_benchmark(func, (x, ), gy)
 def __call__(self, x):
     _, _, in_h, in_w = x.shape
     self._compute_outsize(in_h, in_w)
     self._compute_padsize(in_h, in_w, self.out_h, self.out_w)
     x = F.pad(x, ((0, 0), (0, 0), (self.ph_mid, self.ph - self.ph_mid),
                   (self.pw_mid, self.pw - self.pw_mid)),
               mode='constant')
     h = self.conv(x)
     return F.depth2space(h, self.r)
    def __init__(self, opt, input_ch, output_ch, rate=2):
        super().__init__()
        he_w = HeNormal()
        output_ch = output_ch * rate**2

        with self.init_scope():
            self.c = define_conv(opt)(input_ch, output_ch, ksize=3, stride=1, pad=1, initialW=he_w)

        self.ps_func = lambda x: F.depth2space(x, rate)
Beispiel #9
0
    def check_backward(self, random_array, random_grad_array):
        x = chainer.Variable(random_array)
        y = functions.depth2space(x, 2)
        y.grad = random_grad_array
        y.backward()

        def func():
            return (functions.depth2space(x, 2).data,)
        gx, = gradient_check.numerical_grad(func, (x.data,), (y.grad,))

        testing.assert_allclose(x.grad, gx, rtol=0.0001)
Beispiel #10
0
    def check_forward(self, depth_data, space_data):
        depth = chainer.Variable(depth_data)
        d2s = functions.depth2space(depth, self.r)
        d2s_value = cuda.to_cpu(d2s.data)

        self.assertEqual(d2s_value.dtype, self.dtype)
        self.assertEqual(d2s_value.shape, (2, 2, 6, 4))

        d2s_expect = space_data

        testing.assert_allclose(d2s_value, d2s_expect)
    def check_forward(self, depth_data, space_data):
        depth = chainer.Variable(depth_data)
        d2s = functions.depth2space(depth, self.r)
        d2s_value = cuda.to_cpu(d2s.data)

        self.assertEqual(d2s_value.dtype, self.dtype)
        self.assertEqual(d2s_value.shape, (2, 2, 6, 4))

        d2s_expect = space_data

        testing.assert_allclose(d2s_value, d2s_expect)
Beispiel #12
0
    def _logits(self, x):
        # Batch normalization after ReLU as activation function except the final layer
        h = self.bn1(F.relu(self.conv1(x)))
        h = self.bn2(F.relu(self.conv2(h)))
        h = self.bn3(F.relu(self.conv3(h)))
        h = self.bn4(F.relu(self.conv4(h)))
        h = self.conv5(h)
        h = F.depth2space(
            h, self.DSTX // h.shape[3]
        )  # Resize the final layer to the original image size by Pixel Shuffler method

        return h
    def single_scale_decode(self,
                            func_list,
                            decoder,
                            x,
                            out_shape,
                            x_small=None):
        xp = self.xp
        shape00 = (1, out_shape[1], math.ceil(out_shape[2] / 2),
                   math.ceil(out_shape[3] / 2))
        shape01 = (1, out_shape[1], math.ceil(out_shape[2] / 2),
                   math.floor(out_shape[3] / 2))
        shape10 = (1, out_shape[1], math.floor(out_shape[2] / 2),
                   math.ceil(out_shape[3] / 2))
        shape11 = (1, out_shape[1], math.floor(out_shape[2] / 2),
                   math.floor(out_shape[3] / 2))

        h = x
        p11 = func_list[0](h).data
        p11 = p11[:, :, :shape11[2], :shape11[3]]
        p11 = p11.transpose((0, 2, 3, 1)).reshape((np.prod(shape11), -1))
        p11 = F.softmax(p11, axis=1).data
        t11 = xp.array(decoder.call(cuda.to_cpu(p11)))
        t11 = t11.reshape((1, shape11[2], shape11[3], shape11[1])).transpose(
            (0, 3, 1, 2)).astype(np.float32)
        t11p = xp.pad(t11, [(0, i - j) for i, j in zip(shape00, shape11)],
                      mode='constant')

        h = xp.concatenate((x, t11p), axis=1)
        p01, p10 = xp.split(func_list[1](h).data,
                            axis=1,
                            indices_or_sections=2)
        p01 = p01[:, :, :shape01[2], :shape01[3]]
        p01 = p01.transpose((0, 2, 3, 1)).reshape((np.prod(shape01), -1))
        p01 = F.softmax(p01, axis=1).data
        t01 = xp.array(decoder.call(cuda.to_cpu(p01)))
        t01 = t01.reshape((1, shape01[2], shape01[3], shape01[1])).transpose(
            (0, 3, 1, 2)).astype(np.float32)
        t01p = xp.pad(t01, [(0, i - j) for i, j in zip(shape00, shape01)],
                      mode='constant')
        p10 = p10[:, :, :shape10[2], :shape10[3]]
        p10 = p10.transpose((0, 2, 3, 1)).reshape((np.prod(shape10), -1))
        p10 = F.softmax(p10, axis=1).data
        t10 = xp.array(decoder.call(cuda.to_cpu(p10)))
        t10 = t10.reshape((1, shape10[2], shape10[3], shape10[1])).transpose(
            (0, 3, 1, 2)).astype(np.float32)
        t10p = xp.pad(t10, [(0, i - j) for i, j in zip(shape00, shape10)],
                      mode='constant')

        t = xp.concatenate((x[:, :out_shape[1]], t01p, t10p, t11p), axis=1)
        t = F.depth2space(t, 2).data
        t = t[:, :out_shape[1], :out_shape[2], :out_shape[3]]
        return t
 def __call__(self, x):
     if self.sample in ['maxpool_res', 'avgpool_res']:
         h = self.activation(self.norm(self.c(x)))
         h = self.normr(self.cr(h))
         if self.sample == 'maxpool_res':
             h = F.max_pooling_2d(h, 2, 2, 0)
             h = h + F.max_pooling_2d(self.cskip(x), 2, 2, 0)
         elif self.sample == 'avgpool_res':
             h = F.average_pooling_2d(h, 2, 2, 0)
             h = h + F.average_pooling_2d(self.cskip(x), 2, 2, 0)
     elif 'resize' in self.sample:
         H, W = x.data.shape[2:]
         h0 = F.resize_images(x, (2 * H, 2 * W))
         h = self.norm(self.c(h0))
         if self.sample == 'resize_res':
             h = self.activation(h)
             h = self.cskip(h0) + self.normr(self.cr(h))
     elif 'pixsh' in self.sample:
         h0 = F.depth2space(x, 2)
         h = self.norm(self.c(h0))
         if self.sample == 'pixsh_res':
             h = self.activation(h)
             h = self.cskip(h0) + self.normr(self.cr(h))
     elif 'unpool' in self.sample:
         h0 = F.unpooling_2d(x, 2, 2, 0, cover_all=False)
         h = self.norm(self.c(h0))
         if self.sample == 'unpool_res':
             h = self.activation(h)
             h = self.cskip(h0) + self.normr(self.cr(h))
     else:
         if self.sample == 'maxpool':
             h = self.c(x)
             h = F.max_pooling_2d(h, 2, 2, 0)
         elif self.sample == 'avgpool':
             h = self.c(x)
             h = F.average_pooling_2d(h, 2, 2, 0)
         else:
             h = self.c(x)
         h = self.norm(h)
     if self.dropout:
         h = F.dropout(h, ratio=self.dropout)
     if self.activation is not None:
         h = self.activation(h)
     return h
Beispiel #15
0
 def __call__(self, z):
     h = F.reshape(F.leaky_relu(self.bn0(self.l0(z))),
                   (len(z), self.ch, self.bottom_height, self.bottom_width))
     h = F.dropout(h, 0.5)
     h = F.leaky_relu(F.depth2space(self.bn1_0(self.c1_0(h)), 2))
     h = F.dropout(h, 0.4)
     h = F.leaky_relu(F.depth2space(self.bn1_1(self.c1_1(h)), 2))
     h = F.dropout(h, 0.2)
     h = F.leaky_relu(F.depth2space(self.bn2_0(self.c2_0(h)), 2))
     h = F.leaky_relu(F.depth2space(self.bn2_1(self.c2_1(h)), 2))
     h = F.leaky_relu(F.depth2space(self.bn3_0(self.c3_0(h)), 2))
     h = F.leaky_relu(F.depth2space(self.bn3_1(self.c3_1(h)), 2))
     x = F.sigmoid(self.c4_0(h))
     return x
 def func(x):
     return F.depth2space(x, r)
Beispiel #17
0
 def __call__(self, x):
     return cf.depth2space(self.conv(x), r=self.scale)
Beispiel #18
0
 def __call__(self, x):
     x = self.convs(x)
     x = F.depth2space(x, 2)
     return x
Beispiel #19
0
 def __call__(self, x: chainer.Variable):
     h = self.conv(x)
     h = depth2space(h, r=2)
     h = chainer.functions.relu(h)
     return h
Beispiel #20
0
def train_cnn():
    # model initialization
    if RESTART_POS == 0:
        model = SRNet()
    else:
        param_path = RES_DIR + "p_{0:08d}.pickle".format(RESTART_POS)
        try:
            with open(param_path, 'rb') as fp:
                model = pickle.load(fp)
        except IOError:
            error_exit("train_cnn: cannot load a parameter file " + param_path)
    if 0 <= GPU_ID:
        cuda.check_cuda_available()
        cuda.get_device(GPU_ID).use()
        model.to_gpu()
    optim = optimizers.Adam().setup(model)
    while True:
        msg, data = data_q.get()
        if msg == "end":
            eval_q.put("end")
            break
        elif msg == "prepare":
            # prepares evaluation dataset
            res_q.put("disp", "preparing evaluation dataset")
            res_q.put("prog_set", EVAL_NUM * 2)
            ev_sv = pool_map(load_img, EVAL_NUM, True, DIGIT_LEN, EVAL_DIR)
            ev_in = pool_map(prepare_eval_img, EVAL_NUM, False, ev_sv)
        # evaluation
        elif msg == "eval":
            eval_q.put("eval_start", data)
            res_q.put("disp", "evaluating @ batch {:d}".format(data))
            res_q.put("prog_set", EVAL_NUM * 2)
            # performs forward propagation and checks value distribution
            for i, nw in enumerate(ev_in):
                lr = uint8_to_float(nw)
                if 0 <= GPU_ID:
                    lr = cuda.to_gpu(lr)
                with chainer.using_config("train", False):
                    with chainer.no_backprop_mode():
                        sr = model(lr, None)
                        if   SR_METHOD == "sp":
                            sr = F.depth2space(sr, 2).data
                        elif SR_METHOD == "fp":
                            sr[1] = sr[1, :, ::  , ::-1]
                            sr[2] = sr[2, :, ::-1, ::  ]
                            sr[3] = sr[3, :, ::-1, ::-1]
                            sr = sr.reshape((1, 12, sr.shape[2], sr.shape[3]))
                            sr = F.depth2space(sr, 2).data
                        sr = float_to_uint8(sr[0]).transpose(1, 2, 0)
                        eval_q.put("eval_res", (sr, ev_sv[i]))
                        res_q.put("prog")
            eval_q.put("eval_finish", model.copy().to_cpu())
        # training
        else:
            # receives batch data
            count, train_batch, sv_batch = data
            if 0 <= GPU_ID:
                train_batch = cuda.to_gpu(train_batch)
                sv_batch    = cuda.to_gpu(sv_batch)
            # update
            loss = model(train_batch, sv_batch)
            model.cleargrads()
            loss.backward()
            optim.update()
            eval_q.put("train", (count, float(cuda.to_cpu(loss.data))))
    def fixed_encode(self, x, im_shape, loop=3, filename='test.bin'):
        if x.shape[0] != 1:
            print('batchsize have to be 1')
            exit(-1)
        xp = self.xp
        pt_list = []
        # print('init:\n{}'.format(x.data[0, 0, :5, :5]))

        z1, z2, z3, z4 = xp.split(x.data, indices_or_sections=self.idx, axis=1)
        z3 = F.depth2space(z3, 2).data
        z2 = F.depth2space(z2, 4).data
        z1 = F.depth2space(z1, 8).data

        z_concat = z1
        if self.ct1 != 0:
            h = _crop(z_concat, im_shape=im_shape, shrink=3)
            # print('d4:\n{}'.format(h[0, 0, :5, :5]))
            h_small = xp.concatenate(
                (z2, M.unpooling_2d(z3, 2).data, M.unpooling_2d(z4, 4).data),
                axis=1)
            self.single_scale_encode([self.b4_0, self.b4_1],
                                     pt_list,
                                     h,
                                     x_small=h_small)

        z_concat = xp.concatenate((z_concat[:, :, ::2, ::2], z2), axis=1)
        if self.ct2 != 0:
            h = _crop(z_concat, im_shape=im_shape, shrink=4)
            # print('d8:\n{}'.format(h[0, 0, :5, :5]))
            h_small = xp.concatenate((z3, M.unpooling_2d(z4, 2).data), axis=1)
            self.single_scale_encode([self.b8_0, self.b8_1],
                                     pt_list,
                                     h,
                                     x_small=h_small)

        z_concat = xp.concatenate((z_concat[:, :, ::2, ::2], z3), axis=1)
        if self.ct3 != 0:
            h = _crop(z_concat, im_shape=im_shape, shrink=5)
            # print('d16:\n{}'.format(h[0, 0, :5, :5]))
            h_small = z4
            self.single_scale_encode([self.b16_0, self.b16_1],
                                     pt_list,
                                     h,
                                     x_small=h_small)

        h = xp.concatenate((z_concat[:, :, ::2, ::2], z4), axis=1)
        for i in range(loop):
            # print('d32_{}:\n{}'.format(i, h[0, 0, :5, :5]))
            h = self.single_scale_encode([self.b32_0[i], self.b32_1[i]],
                                         pt_list, h)

        # print('df:\n{}'.format(h[0, 0, :5, :5]))
        t_init = h.astype(np.int32).transpose(0, 2, 3, 1).flatten()
        p_init = xp.tile(
            F.softmax(self.d_pred, axis=1).data, (h.shape[2] * h.shape[3], 1))
        pt_list.append((cuda.to_cpu(p_init), cuda.to_cpu(t_init)))

        encoder = RC.Encoder(np.uint16(im_shape[2]), np.uint16(im_shape[3]),
                             filename)
        for p, t in reversed(pt_list):
            encoder.call(p, t)
        encoder.finish()
        del encoder
Beispiel #22
0
 def _do_after_cal_0(self, x):
     if self.nn == 'up_subpixel':
         x = F.depth2space(x, 2)
     return x
Beispiel #23
0
 def f(x):
     y = functions.depth2space(x, self.r)
     return y * y
Beispiel #24
0
 def __call__(self, x):
     h = F.depth2space(self.conv(x), 2)
     return h
 def f(x):
     return functions.depth2space(x, self.r)
Beispiel #26
0
 def func():
     return (functions.depth2space(x, 2).data,)
 def f(x):
     y = functions.depth2space(x, self.r)
     return y * y
Beispiel #28
0
 def f(x):
     return functions.depth2space(x, self.r)