示例#1
0
    def forward(self, f, corr_feature, pos):
        p0 = F.pad(f[0], ((0, 0), (0, 0), (16, 16), (16, 16)), 'constant')
        p0 = p0[:, :, 4 * pos[0]:4 * pos[0] + 61, 4 * pos[1]:4 * pos[1] + 61]

        p1 = F.pad(f[1], ((0, 0), (0, 0), (8, 8), (8, 8)), 'constant')
        p1 = p1[:, :, 2 * pos[0]:2 * pos[0] + 31, 2 * pos[1]:2 * pos[1] + 31]

        p2 = F.pad(f[2], ((0, 0), (0, 0), (4, 4), (4, 4)), 'constant')
        p2 = p2[:, :, pos[0]:pos[0] + 15, pos[1]:pos[1] + 15]

        p3 = corr_feature[:, :, pos[0], pos[1]].reshape((-1, 256, 1, 1))

        out = self.deconv(p3)
        # NOTE: In the original Torch, resize_images uses 'nearest' interpolation
        out = self.h2(out) + self.v2(p2)
        out = self.post0(
            resize_images(out, (31, 31), align_corners=False, mode='nearest'))
        out = self.h1(out) + self.v1(p1)
        out = self.post1(
            resize_images(out, (61, 61), align_corners=False, mode='nearest'))
        out = self.h0(out) + self.v0(p0)
        out = self.post2(
            resize_images(out, (127, 127), align_corners=False,
                          mode='nearest'))

        return out.reshape((-1, 127**2))
示例#2
0
    def __init__(self):
        super(ResnetBlock, self).__init__()

        initialW = chainer.initializers.Normal(scale=0.02)
        with self.init_scope():
            self.l0 = lambda x: F.pad(x, [(0, 0), (0, 0), (1, 1), (1, 1)],
                                      mode='reflect')
            self.l1 = L.Convolution2D(256,
                                      256,
                                      ksize=3,
                                      stride=1,
                                      initialW=initialW)
            self.l2 = InstanceNormalization(256, decay=0.9, eps=1e-05)
            self.l3 = lambda x: F.relu(x)
            self.l4 = lambda x: F.pad(x, [(0, 0), (0, 0), (1, 1), (1, 1)],
                                      mode='reflect')
            self.l5 = L.Convolution2D(256,
                                      256,
                                      ksize=3,
                                      stride=1,
                                      initialW=initialW)
            self.l6 = InstanceNormalization(256, decay=0.9, eps=1e-05)

        self.functions = []
        for i in range(0, 7):
            self.functions.append(getattr(self, 'l{:d}'.format(i)))
示例#3
0
def align_speaker(ys, ts):
    """Match shape as num_speaker reported can be more or less

    Args:
     ys: B-length list of predictions
     ts: B-length list of predictions

    Returns:
     ys: Aligned B-length list of predictions
     ts: Aligned B-length list of predictions
    """
    num_speakers = [max(y.shape[1], t.shape[1]) for y, t in zip(ys, ts)]
    ys = [
        F.pad(y, ((0, 0), (0, n_spk - y.shape[1])),
              'constant',
              constant_values=0) for y, n_spk in zip(ys, num_speakers)
    ]
    ts = [
        F.cast(
            F.pad(F.cast(t, 'f'), ((0, 0), (0, n_spk - t.shape[1])),
                  'constant',
                  constant_values=0), 'i').array
        for t, n_spk in zip(ts, num_speakers)
    ]
    return ys, ts
示例#4
0
 def __call__(self, *_x, **kwargs):
     """
         実行
     """
     links = self.children()
     _y = F.transpose(_x[0], (0, 2, 1, 3))
     _y = F.leaky_relu(self.e(_y))
     _y = F.reshape(_y, (_y.shape[0], _y.shape[1], 5, 10))
     # 1
     _h = F.pad(_y, pad_width=((0, 0), (0, 0), (4, 0), (0, 0)), mode="constant") 
     _h = F.leaky_relu(self.c11(_h))
     _h = F.pad(_h, pad_width=((0, 0), (0, 0), (4, 0), (0, 0)), mode="constant") 
     _h = F.leaky_relu(self.c12(_h)) + _y
     _y = F.reshape(_y, (_y.shape[0], _y.shape[1], 10, 5))
     # 2
     _h = F.pad(_y, pad_width=((0, 0), (0, 0), (4, 0), (0, 0)), mode="constant") 
     _h = F.leaky_relu(self.c21(_h))
     _h = F.pad(_h, pad_width=((0, 0), (0, 0), (4, 0), (0, 0)), mode="constant") 
     _h = F.leaky_relu(self.c22(_h)) + _y
     _y = F.reshape(_y, (_y.shape[0], _y.shape[1], 25, 2))
     # 3
     _h = F.pad(_y, pad_width=((0, 0), (0, 0), (4, 0), (0, 0)), mode="constant") 
     _h = F.leaky_relu(self.c31(_h))
     _h = F.pad(_h, pad_width=((0, 0), (0, 0), (4, 0), (0, 0)), mode="constant") 
     _h = F.leaky_relu(self.c32(_h)) + _y
     _y = F.reshape(_y, (_y.shape[0], _y.shape[1], 50, 1))
     # 4 
     _h = F.pad(_y, pad_width=((0, 0), (0, 0), (4, 0), (0, 0)), mode="constant") 
     _h = F.leaky_relu(self.c41(_h))
     _h = F.pad(_h, pad_width=((0, 0), (0, 0), (4, 0), (0, 0)), mode="constant") 
     _h = F.leaky_relu(self.c42(_h)) + _y
     _y = self.d(_y)
     _y = F.transpose(_y, (0, 2, 1, 3))
     return _y
    def __call__(self, encs, hiddens, batch_size, prev_image, num_masks, color_channels):
        """
            Learn through StatelessDNA.
            Args:
                encs: An array of computed transformation
                hiddens: An array of hidden layers
                batch_size: Size of mini batches
                prev_image: The image to transform
                num_masks: Number of masks to apply
                color_channels: Output color channels
            Returns:
                transformed: A list of masks to apply on the previous image
        """
        logger = logging.getLogger(__name__)
        
        enc0, enc1, enc2, enc3, enc4, enc5, enc6 = encs
        hidden1, hidden2, hidden3, hidden4, hidden5, hidden6, hidden7 = hiddens

        # DNA specific
        enc7 = self.enc7(enc6)
        enc7 = F.relu(enc7)
        if num_masks != 1:
            raise ValueError('Only one mask is supported for DNA model.')

        # Construct translated images
        img_height = prev_image.shape[2]
        img_width = prev_image.shape[3]
        prev_image_pad = F.pad(prev_image, pad_width=[[0,0], [0,0], [2,2], [2,2]], mode='constant', constant_values=0)
        kernel_inputs = []
        for xkern in range(DNA_KERN_SIZE):
            for ykern in range(DNA_KERN_SIZE):
                #tmp = F.get_item(prev_image_pad, list([slice(0,prev_image_pad.shape[0]), slice(0,prev_image_pad.shape[1]), slice(xkern,img_height), slice(ykern,img_width)]))
                tmp = prev_image_pad[:,:,xkern:img_height, ykern:img_width]
                # ** Added this operation to make sure the size was still the original one!
                tmp = F.pad(tmp, [[0,0], [0,0], [0, xkern], [0, ykern]], mode='constant', constant_values=0)
                tmp = F.expand_dims(tmp, axis=1) # Previously axis=3 but our channel is on axis=1 ? ok!
                kernel_inputs.append(tmp.data)
        kernel_inputs = F.concat(kernel_inputs, axis=1) # Previously axis=3 but our channel us on axis=1 ? ok!

        # Normalize channels to 1
        kernel_normalized = F.relu(enc7 - RELU_SHIFT) + RELU_SHIFT
        kernel_normalized_sum = F.sum(kernel_normalized, axis=1, keepdims=True) # Previously axis=3 but our channel are on axis 1 ? ok!
        kernel_normalized = broadcasted_division(kernel_normalized, kernel_normalized_sum)
        kernel_normalized = F.expand_dims(kernel_normalized, axis=2)
        #kernel_normalized = F.scale(kernel_inputs, kernel_normalized, axis=0)
        kernel_normalized = broadcast_scale(kernel_inputs, kernel_normalized)
        kernel_normalized = F.sum(kernel_normalized, axis=1, keepdims=False)
        transformed = [kernel_normalized]

        return transformed, enc7
示例#6
0
    def __init__(self):
        super(ResnetGenerator, self).__init__()

        initialW = chainer.initializers.Normal(scale=0.02)
        with self.init_scope():
            self.l0 = lambda x: F.pad(x, [(0, 0), (0, 0), (3, 3), (3, 3)],
                                      mode='reflect')
            self.l1 = L.Convolution2D(3, 64, ksize=7, stride=1,
                                      initialW=initialW)
            # Chainer <-> PyTorch
            # * decay=0.9 <-> momentum=0.1
            #   (FIXME: https://github.com/keras-team/keras/issues/6839)
            # * use_gamma=False, use_beta=False <-> affine=False
            self.l2 = InstanceNormalization(64, decay=0.9, eps=1e-05)
            self.l3 = lambda x: F.relu(x)
            self.l4 = L.Convolution2D(64, 128, ksize=3, stride=2, pad=1,
                                      initialW=initialW)
            self.l5 = InstanceNormalization(128, decay=0.9, eps=1e-05)
            self.l6 = lambda x: F.relu(x)
            self.l7 = L.Convolution2D(128, 256, ksize=3, stride=2, pad=1,
                                      initialW=initialW)
            self.l8 = InstanceNormalization(256, decay=0.9, eps=1e-05)
            self.l9 = lambda x: F.relu(x)
            self.l10 = ResnetBlock()
            self.l11 = ResnetBlock()
            self.l12 = ResnetBlock()
            self.l13 = ResnetBlock()
            self.l14 = ResnetBlock()
            self.l15 = ResnetBlock()
            self.l16 = ResnetBlock()
            self.l17 = ResnetBlock()
            self.l18 = ResnetBlock()
            self.l19 = L.Deconvolution2D(256, 128, ksize=3, stride=2, pad=1,
                                         initialW=initialW)
            self.l20 = InstanceNormalization(128, decay=0.9, eps=1e-05)
            self.l21 = lambda x: F.relu(x)
            self.l22 = L.Deconvolution2D(128, 64, ksize=3, stride=2, pad=1,
                                         initialW=initialW)
            self.l23 = InstanceNormalization(64, decay=0.9, eps=1e-05)
            self.l24 = lambda x: F.relu(x)
            self.l25 = lambda x: F.pad(x, [(0, 0), (0, 0), (3, 3), (3, 3)],
                                       mode='reflect')
            self.l26 = L.Convolution2D(64, 3, ksize=7, stride=1,
                                       initialW=initialW)
            self.l27 = lambda x: F.tanh(x)

        self.functions = []
        for i in range(0, 28):
            self.functions.append(getattr(self, 'l{:d}'.format(i)))
示例#7
0
 def __call__(self, x):
     batch, channels, height, width = x.shape
     h1 = x.reshape(batch * channels, 1, height, width)
     h2 = pad(h1, ((0, 0), (0, 0), (2, 2), (2, 2)), mode="symmetric")
     h3 = convolution_2d(h2, self.xp.asarray(self.w))
     h4 = depth2space(h3, 2)
     return h4.reshape(batch, channels, height * 2, width * 2)
示例#8
0
 def forward(self, inputs, device):
     x, = inputs
     y = functions.pad(x,
                       self.pad_width,
                       mode=self.mode,
                       constant_values=self.constant_values)
     return y,
 def _fixed_padding(self, inputs, kernel_size, rate=1):
     '''
     ""Pads the input along the spatial dimensions independently of input size.
       Pads the input such that if it was used in a convolution with 'VALID' padding,
       the output would have the same dimensions as if the unpadded input was used
       in a convolution with 'SAME' padding.
       Args:
         inputs: A tensor of size [batch, height_in, width_in, channels].
         kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
         rate: An integer, rate for atrous convolution.
       Returns:
         output: A tensor of size [batch, height_out, width_out, channels] with the
           input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
     '''
     kernel_size_effective = [
         kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
         kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)
     ]
     pad_total = [
         kernel_size_effective[0] - 1, kernel_size_effective[1] - 1
     ]
     pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
     pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
     padded_inputs = F.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]],
                                    [pad_beg[1], pad_end[1]], [0, 0]],
                           mode='constant')
     return padded_inputs
示例#10
0
 def __init__(self, function, inputs, outputs):
     super(ConvertConvolution2D, self).__init__()
     [kh, kw] = function.params['W'].shape[2:]
     [sy, sx] = function.params['stride']
     [in_h, in_w] = inputs[0].shape[2:]
     [out_h, out_w] = outputs[0].shape[2:]
     [ph_pre, ph_post] = function.params['pad_h']
     [pw_pre, pw_post] = function.params['pad_w']
     ph_post = max(
         ph_post - ((ph_pre + in_h + ph_post) - ((out_h - 1) * sy + kh)), 0)
     pw_post = max(
         pw_post - ((pw_pre + in_w + pw_post) - ((out_w - 1) * sx + kw)), 0)
     padding = [(0, 0), (0, 0), (ph_pre, ph_post), (pw_pre, pw_post)]
     self.pad = lambda x: F.pad(
         x, padding, mode='constant', constant_values=0.0)
     with self.init_scope():
         self.conv = L.Convolution2D(
             in_channels=function.params['W'].shape[1] *
             function.params['groups'],
             out_channels=function.params['W'].shape[0],
             ksize=tuple(function.params['W'].shape[2:]),
             stride=tuple(function.params['stride']),
             pad=0,
             nobias=(function.params['b'] is None),
             initialW=function.params['W'],
             initial_bias=function.params['b'],
             dilate=tuple(function.params['dilate']),
             groups=function.params['groups'])
示例#11
0
文件: scae.py 项目: elda27/deepnet
    def apply(self, layer_name, h, stores=None):
        if self.use_skip_connection and layer_name in self.combine_layers:
            assert stores is not None

            source = stores[layer_name]

            assert h.shape[1] == source.shape[1], \
                'Unmatched num units\nDecoding unit:{}, Encoded unit:{}'.format(
                    h.shape, source.shape)

            if all(
                [hs > fhs for hs, fhs in zip(h.shape[2:], source.shape[2:])]):
                # Decoding image is larger than encoder image
                padding_pix = (np.array(h.shape[2:]) -
                               np.array(source.shape[2:])) / 2
                pads = [(0, 0),
                        (0, 0)] + [(int(math.floor(pix)), int(math.ceil(pix)))
                                   for pix in padding_pix]
                source = F.pad(source, pads, 'constant')
            else:
                source = utils.crop(source, h.shape, self.n_dim)

            h = F.concat((h, source), axis=1)

        h = self.layers[layer_name](h)

        if layer_name in self.store_params:
            self.stores[layer_name] = h
        return h
示例#12
0
    def case_for_relation(self, neighborWeight, neighbor, assign, Rs,
                          relationsT):

        neighborR = list()
        #print len(neighbor)
        for i, t in enumerate(neighbor):
            #print t.shape
            t = F.reshape(t, (2, -1))
            t = F.pad(t, ((0, 0), (0, 1)), 'constant')
            t = F.reshape(t, (1, 1, 2, -1))
            #print t.shape
            t = getattr(self, self.forwardR[0][0])(t)
            #print t.shape

            t = F.reshape(t, (1, -1))
            #print t.shape
            neighborR.append(t)

        resultT = list()
        for i, r in enumerate(Rs):
            Rlist = assign[i]
            templist = list()
            sumWeight = 0
            for x in Rlist:
                templist.append(neighborR[x] * neighborWeight[x])
                sumWeight = sumWeight + neighborWeight[x]
            resultT.append(sum(templist) / sumWeight)

        result = F.concat(resultT, axis=0)
        return result
示例#13
0
def distort_points(coef, x):
    """Apply distortion to given points.

    Args:
        coef (:class `~chainer.Variable` or :ref:`ndarray`):
            Distortion coefficients.
            A 2-D array of shape `(B, K)`
            K is 4 or 5 or 8. The elements corresponds to
            (k1, k2, p1, p2, [k3, [k4, k5 k6]])
            respectively.

        x (:class `~chainer.Variable` or :ref:`ndarray`):
            A 3-D array of shape `(B, 2, N)`
            
    Returns:
        ~chainer.Variable:
            A 3-D array of shape `(B, 2, N)`
    """
    xp = backend.get_array_module(x)
    _, K = coef.shape
    if K < 8:
        coef = F.pad(coef, ((0, 0), (0, 8 - K)), 'constant')
    coef = coef[:, :, None]

    # Compute
    # f = (1 + k1r^2 + k2r^4 + k3r^6) / (1 + k4r^2 + k5r^4 + k6r^6)
    r2 = F.sum(x * x, 1, keepdims=True)  # r^2
    f = (1 + r2 * (coef[:, 0:1] + r2 * (coef[:, 1:2] + r2 * coef[:, 4:5]))) / \
        (1 + r2 * (coef[:, 5:6] + r2 * (coef[:, 6:7] + r2 * coef[:, 7:8])))

    xy = F.prod(x, 1, keepdims=True)

    return x * f + 2 * xy * coef[:, 2:4] + coef[:, 3:1:-1] * (r2 + 2 * x * x)
    def __call__(self, X):
        h0 = F.pad(X, ((0, 0), (0, 0), (0, 0), (37, 37)),
                   'constant')  # (1, 96, 1366) -> (1, 96, 1440)
        h1 = F.transpose(self.norm0(F.transpose(h0, axes=(0, 3, 1, 2))),
                         axes=(0, 2, 3, 1))  # normalize along time axis is OK?
        h1 = F.max_pooling_2d(F.elu(self.norm1(self.conv1(h1))), (2, 2),
                              stride=(2, 2))
        h1 = F.dropout(h1, ratio=0.1)
        h2 = F.max_pooling_2d(F.elu(self.norm2(self.conv2(h1))), (3, 3),
                              stride=(3, 3))
        h2 = F.dropout(h2, ratio=0.1)
        h3 = F.max_pooling_2d(F.elu(self.norm3(self.conv3(h2))), (4, 4),
                              stride=(4, 4))
        h3 = F.dropout(h3, ratio=0.1)
        h4 = F.max_pooling_2d(F.elu(self.norm4(self.conv4(h3))), (4, 4),
                              stride=(4, 4))
        h4 = F.dropout(h4, ratio=0.1)
        h4 = F.transpose(h4, axes=(0, 3, 1, 2))
        h4 = F.reshape(h4, (h4.shape[0], 15, 128))

        self.gru1.reset_state()  # reset hidden states per. track Is this OK?
        self.gru2.reset_state()  # reset hidden states per. track Is this OK?
        for i in range(h4.shape[1]):
            h5 = self.gru1(h4[:, i, :])
            h6 = self.gru2(h5)

        h6 = F.dropout(h6, ratio=0.3)
        h7 = F.sigmoid(self.fc1(h6))

        return h7
示例#15
0
    def __call__(self, x):
        y = F.split_axis(
            self[0](F.pad(x, ((0, 0), (0, 0), (0, 0), (self[0].dilate[1], 0)),
                          'constant')), 2, 1)
        y = F.split_axis(self[1](F.sigmoid(y[0]) * F.tanh(y[1])), (61, ), 1)

        return x + y[0], y[1]
示例#16
0
 def check_forward(self, x_data):
     y = functions.pad(x_data, self.pad_width, mode=self.mode,
                       constant_values=self.constant_values)
     y_expected = numpy.pad(self.x, self.pad_width, mode=self.mode,
                            constant_values=self.constant_values)
     self.assertEqual(y.dtype, y_expected.dtype)
     testing.assert_allclose(y.data, y_expected)
示例#17
0
def batch_global_rigid_transformation(Rs, Js, parent, rotate_base=False):
    """
    Computes absolute joint locations given pose.

    rotate_base: if True, rotates the global rotation by 90 deg in x axis.
    if False, this is the original SMPL coordinate.

    Args:
      Rs: N x 24 x 3 x 3 rotation vector of K joints
      Js: N x 24 x 3, joint locations before posing
      parent: 24 holding the parent id for each index

    Returns
      new_J : `Tensor`: N x 24 x 3 location of absolute joints
      A     : `Tensor`: N x 24 4 x 4 relative joint transformations for LBS.
    """
    xp = Rs.xp
    N = Rs.shape[0]
    if rotate_base:
        print('Flipping the SMPL coordinate frame!!!!')
        rot_x = Variable([[1, 0, 0], [0, -1, 0], [0, 0, -1]], dtype=Rs.dtype)
        rot_x = F.reshape(F.tile(rot_x, [N, 1]), [N, 3, 3])
        root_rotation = F.matmul(Rs[:, 0, :, :], rot_x)
    else:
        root_rotation = Rs[:, 0, :, :]

    # Now Js is N x 24 x 3 x 1
    Js = F.expand_dims(Js, -1)

    def make_A(R, t, name=None):
        # Rs is N x 3 x 3, ts is N x 3 x 1
        R_homo = F.pad(R, [[0, 0], [0, 1], [0, 0]], 'constant')
        t_homo = F.concat([t, xp.ones([N, 1, 1], 'f')], 1)
        return F.concat([R_homo, t_homo], 2)

    A0 = make_A(root_rotation, Js[:, 0])
    results = [A0]
    for i in range(1, parent.shape[0]):
        j_here = Js[:, i] - Js[:, parent[i]]
        A_here = make_A(Rs[:, i], j_here)
        res_here = F.matmul(results[parent[i]], A_here)
        results.append(res_here)

    # 10 x 24 x 4 x 4
    results = F.stack(results, axis=1)

    new_J = results[:, :, :3, 3]

    # --- Compute relative A: Skinning is based on
    # how much the bone moved (not the final location of the bone)
    # but (final_bone - init_bone)
    # ---
    Js_w0 = F.concat([Js, xp.zeros([N, 24, 1, 1], 'f')], 2)
    init_bone = F.matmul(results, Js_w0)
    # Append empty 4 x 3:
    init_bone = F.pad(init_bone, [[0, 0], [0, 0], [0, 0], [3, 0]], 'constant')
    A = results - init_bone

    return new_J, results
示例#18
0
    def __call__(self, x):
        h = F.pad(x, self.PAD_WIDTH_3, mode='reflect')
        h = F.relu(self.c7s1_32_inorm(self.c7s1_32_conv(h)))
        h = F.relu(self.d64_inorm(self.d64_conv(h)))
        h = F.relu(self.d128_inorm(self.d128_conv(h)))
        h = self.r_blocks(h)

        h = self.u64_dconv(h)
        h = F.pad(h, self.U64_PAD_WIDTH, 'constant', constant_values=0)
        h = F.relu(self.u64_inorm(h))
        h = self.u32_dconv(h)
        h = F.pad(h, self.U32_PAD_WIDTH, 'constant', constant_values=0)
        h = F.relu(self.u32_inorm(h))

        h = F.pad(h, self.PAD_WIDTH_3, mode='reflect')
        h = F.relu(self.c7s1_3_inorm(self.c7s1_3_conv(h)))
        return h
示例#19
0
    def forward(self, x):
        if self.W.array is None:
            self._initialize_params(x.shape[1])

        pad_width = [(0, 0), (0, 0)] + list(map(lambda x: (x, x), self.pad))
        x = F.pad(x, pad_width, self.pad_mode)

        return F.depthwise_convolution_2d(x, self.W, self.b, self.stride, 0)
示例#20
0
 def __call__(self, x):
     if self.tf_mode:
         x = F.pad(x,
                   pad_width=calc_tf_padding(x, kernel_size=3, stride=2),
                   mode="constant",
                   constant_values=0)
     x = self.conv(x)
     return x
示例#21
0
 def __call__(self, x):
     if self.reflect and self.pad > 0:
         return self.conv(
             pad(self.c * x, ((0, 0), (0, 0), (self.pad, self.pad),
                              (self.pad, self.pad)),
                 mode="symmetric"))
     else:
         return self.conv(self.c * x)
示例#22
0
 def __call__(self, x):
     identity = x
     x = self.body(x)
     if self.resize_identity:
         identity = self.identity_pool(identity)
         channels = identity.shape[1]
         identity = F.pad(identity, pad_width=((0, 0), (0, channels), (0, 0), (0, 0)), mode="constant", constant_values=0)
     x = x + identity
     return x
示例#23
0
    def _predict_labels(self, sent_states, pred_heads, gold_heads, batch_stats,
                        sorted_labels=None):
        """Predict the label for each of the arcs predicted in _predict_heads."""
        batch_size, max_sent_len, col_lengths = batch_stats

        calc_loss = sorted_labels is not None
        if calc_loss:
            labels = self.encoder.transpose_batch(sorted_labels)

        u_lbl = self.U_lbl(sent_states)
        u_lbl = F.reshape(u_lbl, (-1, batch_size, self.mlp_lbl_units))

        w_lbl = self.W_lbl(sent_states)
        w_lbl = F.reshape(w_lbl, (-1, batch_size, self.mlp_lbl_units))

        sent_lbls = []
        # we start from 1 because we don't consider root
        for i in range(1, max_sent_len):
            # num_active
            num_active = col_lengths[i]
            # if we are calculating loss create truth variables
            if calc_loss:
                # i-1 because sentence has root appended to beginning
                gold_labels = labels[i-1]

                true_heads = gold_heads[i-1]
            arc_pred = pred_heads[i-1]

            # ================== LABEL PREDICTION ======================
            # TODO: maybe we should use arc_pred sometimes in training??
            # NOTE: gold_heads values after num_active gets mutated here
            # make sure you don't use ignore_label in softmax - even if ok in forward
            # it will be wrong in backprop (we limit to :self.num_active)
            # gh_copy = self.xp.copy(gold_heads.data)
            head_indices = true_heads.data if chainer.config.train else arc_pred
            head_indices = head_indices[:num_active]

            l_heads = u_lbl[head_indices, self.xp.arange(len(head_indices)), :]
            l_w = w_lbl[i][:num_active]
            UWl = F.reshape(F.tanh(l_heads + l_w), (-1, self.mlp_lbl_units))

            if self.lbl_dropout > 0.:
                UWl = F.dropout(UWl, ratio=self.lbl_dropout)

            lbls = self.V_lblT(UWl)

            # Calculate losses
            if calc_loss:
                label_loss = F.sum(F.softmax_cross_entropy(lbls[:num_active], gold_labels[:num_active], reduce='no'))
                self.loss += label_loss

            reshaped_lbls = F.reshape(lbls, (num_active, -1, 1))
            reshaped_lbls = F.pad(reshaped_lbls,
                    ((0, batch_size - num_active),(0,0), (0,0)), 'constant')
            sent_lbls.append(reshaped_lbls)
        lbls = F.concat(sent_lbls, axis=2)
        return lbls
 def __call__(self, x):
     _, _, in_h, in_w = x.shape
     self._compute_outsize(in_h, in_w)
     self._compute_padsize(in_h, in_w, self.out_h, self.out_w)
     x = F.pad(x, ((0, 0), (0, 0), (self.ph_mid, self.ph - self.ph_mid),
                   (self.pw_mid, self.pw - self.pw_mid)),
               mode='constant')
     h = self.conv(x)
     return F.depth2space(h, self.r)
示例#25
0
    def __call__(self, *_x, **kwargs):
        """
            モデルのグラフ実装
            Parameter
            ---------
            x: ndarray(tuple)
                変換前特徴量
                shape: [N,1025,200,1]
            Returns
            -------
            _y: ndarray
                変換後特徴量
                shape: [N,1025,200,1]
        """
        links = self.children()
        _y = F.transpose(_x[0], (0, 2, 1, 3))
        _y = self.e(_y)
        _y = F.leaky_relu(_y)
        _y = F.reshape(_y, (_y.shape[0], _y.shape[1], 5, 10))
        # 1
        _h = F.pad(_y, pad_width=((0, 0), (0, 0), (4, 0), (0, 0)), mode="constant") 
        _h = self.c11(_h)
        _h = F.leaky_relu(_h)
        _h = F.pad(_h, pad_width=((0, 0), (0, 0), (4, 0), (0, 0)), mode="constant") 
        _h = self.c12(_h)
        _y = F.leaky_relu(_h) + _y
        _y = F.reshape(_y, (_y.shape[0], _y.shape[1], 10, 5))
        # 2
        _h = F.pad(_y, pad_width=((0, 0), (0, 0), (4, 0), (0, 0)), mode="constant") 
        _h = self.c21(_h)
        _h = F.leaky_relu(_h)
        _h = F.pad(_h, pad_width=((0, 0), (0, 0), (4, 0), (0, 0)), mode="constant") 
        _h = self.c22(_h)
        _y = F.leaky_relu(_h) + _y
        _y = F.reshape(_y, (_y.shape[0], _y.shape[1], 25, 2))
        # 3
        _h = F.pad(_y, pad_width=((0, 0), (0, 0), (4, 0), (0, 0)), mode="constant") 
        _h = self.c31(_h)
        _h = F.leaky_relu(_h)
        _h = F.pad(_h, pad_width=((0, 0), (0, 0), (4, 0), (0, 0)), mode="constant") 
        _h = self.c32(_h)
        _y = F.leaky_relu(_h) + _y
        _y = F.reshape(_y, (_y.shape[0], _y.shape[1], 50, 1))
        # 4 
        _h = F.pad(_y, pad_width=((0, 0), (0, 0), (4, 0), (0, 0)), mode="constant") 
        _h = self.c41(_h)
        _h = F.leaky_relu(_h)
        _h = F.pad(_h, pad_width=((0, 0), (0, 0), (4, 0), (0, 0)), mode="constant") 
        _h = self.c42(_h)
        _y = F.leaky_relu(_h) + _y

        _y = self.d(_y)
        _y = F.transpose(_y, (0, 2, 1, 3))
        return _y
示例#26
0
 def __call__(self, x):
     identity = x
     x = self.body(x)
     x = self.bn(x)
     if self.resize_identity:
         identity = self.identity_pool(identity)
     if self.identity_pad_width is not None:
         identity = F.pad(identity, pad_width=self.identity_pad_width, mode="constant", constant_values=0)
     x = x + identity
     return x
 def __call__(self, x):
     x1 = self.pool(x)
     x1 = self.conv1(x1)
     x2 = x[:, :, :-1, :-1]
     x2 = F.pad(x2, pad_width=((0, 0), (0, 0), (1, 0), (1, 0)), mode="constant", constant_values=0)
     x2 = self.pool(x2)
     x2 = self.conv2(x2)
     x = F.concat((x1, x2), axis=1)
     x = self.bn(x)
     return x
示例#28
0
 def __call__(self, x):
     if self.pad_type == 'reflect':
         h = F.pad(
             x,
             [[0, 0], [0, 0], [self.pad, self.pad], [self.pad, self.pad]],
             mode='reflect')
     else:
         h = F.pad(
             x,
             [[0, 0], [0, 0], [self.pad, self.pad], [self.pad, self.pad]],
             mode='constant',
             constant_values=0)
     if self.equalised:
         b, c, _, _ = h.shape
         inv_c = np.sqrt(2.0 / c) / self.ksize
         h = inv_c * h
     if self.separable:
         return self.pointwise(self.depthwise(h))
     return self.c(h)
示例#29
0
def _tf_padding(x, ksize, stride, tf_padding):
    pad_2 = _get_pad(x.shape[2], ksize[0], stride[0], tf_padding)
    pad_3 = _get_pad(x.shape[3], ksize[1], stride[1], tf_padding)
    if pad_2 or pad_3:
        return pad(x, ((0, 0), (0, 0),
                       (pad_2 // 2, int(np.ceil(float(pad_2) / 2))),
                       (pad_3 // 2, int(np.ceil(float(pad_3) / 2)))),
                   mode='constant')
    else:
        return x
    def __call__(self, x):
        pd = _get_same_padding(x.shape[2:4], self.ksize, self.stride)
        pd = ((0, 0), (0, 0), pd[0], pd[1])
        h = F.pad(x, pd, mode='constant', constant_values=0)
        h = self.conv(h)
        h = self.bn(h)
        if self.activation_fn is not None:
            h = self.activation_fn(h)

        return h
示例#31
0
 def __call__(self, x):
     if self.lanczos:
         p = self.n - 1
         batch, channels, height, width = x.shape
         h1 = x.reshape(batch * channels, 1, height, width)
         h2 = pad(h1, ((0, 0), (0, 0), (p, p), (p, p)), mode="symmetric")
         h3 = convolution_2d(h2, self.xp.asarray(self.w), stride=2)
         return h3.reshape(batch, channels, height // 2, width // 2)
     else:
         return average_pooling_2d(x, ksize=2, stride=2)
示例#32
0
 def f(x):
     return functions.pad(x, pad_width=self.pad_width, mode=self.mode)
示例#33
0
def batch_global_rigid_transformation(Rs, Js, parent, rotate_base=False):
    """
    Computes absolute joint locations given pose.

    rotate_base: if True, rotates the global rotation by 90 deg in x axis.
    if False, this is the original SMPL coordinate.

    Args:
      Rs: N x 24 x 3 x 3 rotation vector of K joints
      Js: N x 24 x 3, joint locations before posing
      parent: 24 holding the parent id for each index

    Returns
      new_J : `Tensor`: N x 24 x 3 location of absolute joints
      A     : `Tensor`: N x 24 4 x 4 relative joint transformations for LBS.
    """
    xp = Rs.xp
    N = Rs.shape[0]
    if rotate_base:
        print('Flipping the SMPL coordinate frame!!!!')
        rot_x = Variable(
            [[1, 0, 0], [0, -1, 0], [0, 0, -1]], dtype=Rs.dtype)
        rot_x = F.reshape(F.tile(rot_x, [N, 1]), [N, 3, 3])
        root_rotation = F.matmul(Rs[:, 0, :, :], rot_x)
    else:
        root_rotation = Rs[:, 0, :, :]

    # Now Js is N x 24 x 3 x 1
    Js = F.expand_dims(Js, -1)

    def make_A(R, t, name=None):
        # Rs is N x 3 x 3, ts is N x 3 x 1
        R_homo = F.pad(R, [[0, 0], [0, 1], [0, 0]], 'constant')
        t_homo = F.concat([t, xp.ones([N, 1, 1], 'f')], 1)
        return F.concat([R_homo, t_homo], 2)

    A0 = make_A(root_rotation, Js[:, 0])
    results = [A0]
    for i in range(1, parent.shape[0]):
        j_here = Js[:, i] - Js[:, parent[i]]
        A_here = make_A(Rs[:, i], j_here)
        res_here = F.matmul(
            results[parent[i]], A_here)
        results.append(res_here)

    # 10 x 24 x 4 x 4
    results = F.stack(results, axis=1)

    new_J = results[:, :, :3, 3]

    # --- Compute relative A: Skinning is based on
    # how much the bone moved (not the final location of the bone)
    # but (final_bone - init_bone)
    # ---
    Js_w0 = F.concat([Js, xp.zeros([N, 24, 1, 1], 'f')], 2)
    init_bone = F.matmul(results, Js_w0)
    # Append empty 4 x 3:
    init_bone = F.pad(init_bone, [[0, 0], [0, 0], [0, 0], [3, 0]], 'constant')
    A = results - init_bone

    return new_J, results
示例#34
0
 def make_A(R, t, name=None):
     # Rs is N x 3 x 3, ts is N x 3 x 1
     R_homo = F.pad(R, [[0, 0], [0, 1], [0, 0]], 'constant')
     t_homo = F.concat([t, xp.ones([N, 1, 1], 'f')], 1)
     return F.concat([R_homo, t_homo], 2)
示例#35
0
 def f(x):
     return functions.pad(
         x, pad_width=self.pad_width, mode=self.mode,
         constant_values=self.constant_values)
示例#36
0
 def forward(self, inputs, device):
     x, = inputs
     y = functions.pad(x, self.pad_width, self.mode)
     return y,
示例#37
0
 def forward(self, inputs, device):
     x, = inputs
     y = functions.pad(x, self.pad_width, mode=self.mode,
                       constant_values=self.constant_values)
     return y,
示例#38
0
 def f(x):
     y = functions.pad(x, pad_width=self.pad_width, mode=self.mode)
     return y * y