def test_backward(self):
        x = chainer.Variable(numpy.array([1]))
        y1 = F.identity(x)
        y2 = F.identity(x)
        z = y1 + y2

        z.grad = numpy.array([1])
        z.backward(retain_grad=True)

        self.assertEqual(y1.grad[0], 1)
        self.assertEqual(y2.grad[0], 1)
        self.assertEqual(x.grad[0], 2)
Beispiel #2
0
    def test_backward(self):
        x = chainer.Variable(numpy.array([1]))
        y1 = F.identity(x)
        y2 = F.identity(x)
        z = y1 + y2

        z.grad = numpy.array([1])
        z.backward(retain_grad=True)

        self.assertEqual(y1.grad[0], 1)
        self.assertEqual(y2.grad[0], 1)
        self.assertEqual(x.grad[0], 2)
Beispiel #3
0
    def __call__(self, x, t):

        self.clear()
        h = F.relu(self.bn1(self.conv1(x), test=not self.train))
        h = self.res2(h, self.train)
        h = F.max_pooling_2d(h, 2, stride=2)
        h = self.res3(h, self.train)
        h = self.res4(h, self.train)
        inter = F.identity(h)
        h = self.hg1(h, self.train)

        # Residual layers at output resolution
        h = self.res5(h, self.train)
        h = F.relu(self.bn6(self.conv6(h), test=not self.train)) 
        ll_ = self.conv7(h) 

        # Predicted heatmaps
        tmpOut = self.inter_conv1(h)
        tmpOut_ = self.inter_conv2(tmpOut)
           
        h = add(ll_, tmpOut_, inter)
        h = self.hg2(h, self.train)
        h = self.res8(h, self.train)
        h = F.relu(self.bn9(self.conv9(h), test=not self.train))
        h = self.final_conv1(h)
        h = F.concat((tmpOut, h))

        t = F.concat((t, t))
         
        self.loss = F.mean_squared_error(h,t)
        if self.train:
            return self.loss
        else:
            self.pred = h
            return self.pred
Beispiel #4
0
 def test_raise(self):
     x = np.array([1], np.float32)
     x = chainer.Variable(x)
     y = F.identity(x)
     y.grad = np.array([np.nan], np.float32)
     with self.assertRaises(RuntimeError):
         y.backward()
Beispiel #5
0
 def test_raise(self):
     x = np.array([1], np.float32)
     x = chainer.Variable(x)
     y = F.identity(x)
     y.grad = np.array([np.nan], np.float32)
     with self.assertRaises(RuntimeError):
         y.backward()
Beispiel #6
0
 def test_raise_double_backprop_2(self):
     x = chainer.Variable(np.empty(1, np.float32))
     z = F.identity(x)  # new style
     y = IdentityFunction()(z)  # old style
     y.backward()
     with self.assertRaises(RuntimeError):
         x.grad_var.backward()
Beispiel #7
0
 def test_raise_double_backprop_2(self):
     x = chainer.Variable(np.empty(1, np.float32))
     z = F.identity(x)  # new style
     y = IdentityFunction()(z)  # old style
     y.backward()
     with self.assertRaises(RuntimeError):
         x.grad_var.backward()
 def test_default_backward(self):
     x = chainer.Variable(np.empty(1, np.float32))
     y = F.identity(x)
     y.backward()
     self.assertIsNone(x.grad_var.creator)
     x.grad_var.backward()
     self.assertIsNone(y.grad_var.grad_var)
Beispiel #9
0
 def __call__(self, x):
     if self.resize_identity:
         identity = self.identity_conv(x)
     else:
         identity = F.identity(x)
     x = self.body(x)
     x = x + identity
     return x
Beispiel #10
0
 def house_transform(self,z):
     vec_t = self.qh_vec_0
     
     for i in range(self.num_trans):
         vec_t = F.identity(self.qlin_h_vec_t(vec_t))
         vec_t_product = F.matmul(vec_t, vec_t, transb=True)
         vec_t_norm_sqr = F.tile(F.sum(F.square(vec_t)), (z.shape[0], z.shape[1]))
         z = z - 2*F.matmul(vec_t_product,  z)/vec_t_norm_sqr
     return z
 def crop(inputs, outsize, offset):
     x = F.identity(inputs)
     crop_axis = [i!=j for i, j in zip(inputs.data.shape, outsize)]
     i = 0
     for index, tf in enumerate(crop_axis):
         if tf:
             _, x, _ = F.split_axis(x, [offset[i], offset[i] + outsize[index]], index)
             i += 1
     return x
 def crop(inputs, outsize, offset):
     x = F.identity(inputs)
     crop_axis = [i!=j for i, j in zip(inputs.data.shape, outsize)]
     i = 0
     for index, tf in enumerate(crop_axis):
         if tf:
             _, x, _ = F.split_axis(x, [offset[i], offset[i] + outsize[index]], index)
             i += 1
     return x
Beispiel #13
0
    def test_forward(self):
        xs = (Variable(numpy.array([0])), Variable(numpy.array([0])), Variable(numpy.array([0])))
        xs[0].rank = 1
        xs[1].rank = 3
        xs[2].rank = 2
        ys = identity(*xs)

        self.assertEqual(len(ys), len(xs))
        for y in ys:
            # rank is (maximum rank in xs) + 2, since Function call
            # automatically inserts Split function.
            self.assertEqual(y.rank, 5)
Beispiel #14
0
    def test_forward(self):
        xs = (Variable(numpy.array([0])), Variable(numpy.array([0])),
              Variable(numpy.array([0])))
        xs[0].rank = 1
        xs[1].rank = 3
        xs[2].rank = 2
        ys = identity(*xs)

        self.assertEqual(len(ys), len(xs))
        for y in ys:
            # rank is (maximum rank in xs) + 2, since Function call
            # automatically inserts Split function.
            self.assertEqual(y.rank, 5)
Beispiel #15
0
    def __call__(self, x):
        x, t, l = x

        reshape = (1, x.shape[1]) + (1,) * (x.ndim - 2)

        if chainer.config.train:
            # batch norm
            mean = F.mean(x, axis=(0,) + tuple(range(2, x.ndim)))
            x = x - F.broadcast_to(
                F.reshape(mean, reshape),
                x.shape)
            var = F.mean(x ** 2, axis=(0,) + tuple(range(2, x.ndim)))
            m = x.size // self.gamma.size
            adjust = m / max(m - 1., 1.)  # unbiased estimation
            self.avg_mean *= self.decay
            self.avg_mean += (1 - self.decay) * mean.array
            self.avg_var *= self.decay
            self.avg_var += (1 - self.decay) * adjust * var.array
        else:
            mean = self.avg_mean
            var = self.avg_var
            x = x - F.broadcast_to(F.reshape(mean, reshape), x.shape)

        z0 = F.identity(self.gamma) / F.sqrt(var + self.eps)
        z = F.reshape(z0, reshape)
        x = x * F.broadcast_to(z, x.shape) + F.broadcast_to(
            F.reshape(self.beta, reshape), x.shape)

        # calculate Lipschitz constant
        if getattr(chainer.config, 'lmt', False):
            if getattr(chainer.config, 'exact', False):
                l = l * F.reshape(F.max(F.absolute(z0)), (1,))
            else:
                normalize(self.u.array)
                perturb(self.u.array, 1e-2, self.xp)
                u = self.u * z0
                l = l * l2_norm(u)

        return x, t, l
Beispiel #16
0
    def __call__(self, x):

        if not self._residual:

            h = x

            for i in range(self._ninner):
                h = self['conv_%d' % i](h)

                if self._norm_param is not None \
                        and _n_spatial_unit(h) != 1: # NOTE: if spatial unit is 1, activations could be always
                    h = self['conv_norm_%d' % i](
                        h)  #        zeroed by the batch normalization

                h = self._activation(h)

            return h

        else:

            h = x

            for i in range(self._ninner):
                h = self['conv_%d' % i](h)

                if self._norm_param is not None \
                        and _n_spatial_unit(h) != 1:
                    h = self['conv_norm_%d' % i](h)

                if i == 0:
                    g = F.identity(h)  # TODO: order should be checked

                if i != (self._ninner - 1):
                    h = self._activation(h)

            return self._activation(g + h)
Beispiel #17
0
 def forward(self, x, x_org, feed_previous=False):
     bsize = len(x)
     x_emb = self.embedding(x)
     x_emb = normalizing(x_emb, 1)
     x_emb = F.expand_dims(x_emb, 1)
     H = self.conv_encoder(x_emb)
     H_mean, H_log_sigma_sq = self.vae_classifier(H)
     mu = self.xp.zeros((bsize, self.ef_dim), np.float32)
     ln_sigma = self.xp.ones((bsize, self.ef_dim), np.float32)
     eps = F.gaussian(mu, ln_sigma)  # N(0, 1)
     H_dec = H_mean + eps * F.sqrt(F.exp(H_log_sigma_sq))
     H_dec2 = F.identity(H_dec)
     # moddel: cnn_rnn
     loss, rec_sent_1, _ = self.lstm_decoder(H_dec2,
                                             x_org,
                                             feed_previous=feed_previous)
     _, rec_sent_2, _ = self.lstm_decoder(H_dec2, x_org, feed_previous=True)
     # KL loss
     kl_loss = F.mean(-0.5 * F.mean(1 + H_log_sigma_sq \
                                 - F.square(H_mean) \
                                 - F.exp(H_log_sigma_sq), axis=1))
     loss += kl_loss
     chainer.report({'loss': loss.data, 'kl_loss': kl_loss.data}, self)
     return loss, rec_sent_1, rec_sent_2
Beispiel #18
0
 def test_int(self):
     x = np.array([1], np.int)
     x = chainer.Variable(x)
     y = F.identity(x)
     y.grad = np.array([0], np.int)
     y.backward()
Beispiel #19
0
 def __call__(self, x):
     x = F.identity(x)
     return self.ops(*([x] + self.args))
 def predict(self, x):
     h1 = F.identity(self.l1(x))
     h2 = F.identity(self.l2(h1))
     return self.l3(h2)
Beispiel #21
0
 def __call__(self, x_data):
     with using_config("train", False):
         h = self.forward(x_data)
     return F.identity(h)
Beispiel #22
0
 def Q_func(self, x):
     h1 = F.leaky_relu(self.L1(x))
     h2 = F.leaky_relu(self.L2(h1))
     h3 = F.leaky_relu(self.L3(h2))
     return F.identity(self.Q_value(h3))
Beispiel #23
0
def empty(x):
    xp = backend.get_array_module(x)
    s0, _, s2, s3 = x.shape
    return F.identity(xp.empty((s0, 0, s2, s3), dtype=np.float32))
Beispiel #24
0
    def __call__(self, x):

        store_activations = {}

        # down convolution
        for i in range(1, self.n_layers + 1):

            if i == 1:
                h = F.identity(x)
            else:
                h = F.max_pooling_nd(h, 2, stride=2)

            h = self['down_unet_block_%d' % (i)](h)
            h = self.down_conv_dropout(h)
            store_activations['down_unet_block_%d' % (i)] = h

        del h  # clear hidden layer

        # up convolution
        for i in range(self.n_layers - 1, 0, -1):

            if i == self.n_layers - 1:
                h = store_activations['down_unet_block_%d' % (i + 1)]
                del store_activations['down_unet_block_%d' % (i + 1)]  # clear
            else:
                h = h

            h = self['deconv_%d' % i](h)
            if self.batch_norm:
                h = self['bn_deconv_%d' % i](h)
            h = self.up_conv_activate_function(h)
            down_conv = store_activations['down_unet_block_%d' % (i)]
            del store_activations['down_unet_block_%d' % (i)]  # clear

            if self.n_dims == 2:
                h = F.concat([
                    h[:, :, 0:down_conv.shape[2], 0:down_conv.shape[3]],
                    down_conv
                ])  # fuse layer
            elif self.n_dims == 3:
                h = F.concat([
                    h[:, :, 0:down_conv.shape[2], 0:down_conv.shape[3],
                      0:down_conv.shape[4]], down_conv
                ])  # fuse layer
            del down_conv

            h = self['up_unet_block_%d' % i](h)
            h = self.up_conv_dropout(h)

            if i == 1:
                o = self['up_conv%d_3' % i](h)
                if self.n_dims == 2:
                    score = o[:, :, 0:x.shape[2], 0:x.shape[3]]
                elif self.n_dims == 3:
                    score = o[:, :, 0:x.shape[2], 0:x.shape[3], 0:x.shape[4]]

                self.score = score

        del h, o  # clear hidden layer

        return self.score
 def __call__(self, x):
     x = F.identity(x)
     return self.bn(x)
Beispiel #26
0
 def test_int(self):
     x = np.array([1], np.int)
     x = chainer.Variable(x)
     y = F.identity(x)
     y.grad = np.array([0], np.int)
     y.backward()
Beispiel #27
0
 def __call__(self, x):
     x = F.identity(x)
     self.args[self.input_argname] = x
     return self.ops(**self.args)