コード例 #1
0
def deconvolution_filter_grad_backward(inputs,
                                       base_axis=1,
                                       pad=None,
                                       stride=None,
                                       dilation=None,
                                       group=1,
                                       channel_last=False,
                                       output_padding=None):
    """
    Args:
      inputs (list of nn.Variable): Incomming grads/inputs to/of the forward function.
      kwargs (dict of arguments): Dictionary of the corresponding function arguments.

    Return:
      list of Variable: Return the gradients wrt inputs of the corresponding function.
    """
    gdw = inputs[0]
    dy = inputs[1]
    x0 = inputs[2]

    ctx = nn.get_current_context()
    dfx = DeconvolutionDataGrad(ctx, base_axis, pad, stride, dilation, group,
                                channel_last, output_padding)
    dfx.xshape = x0.shape

    gdy = F.deconvolution(x0, gdw, None, base_axis, pad, stride, dilation,
                          group, channel_last, output_padding)
    gx0 = dfx(dy, gdw)
    return gdy, gx0
コード例 #2
0
def deconvolution(inp,
                  outmaps,
                  kernel,
                  pad=None,
                  stride=None,
                  dilation=None,
                  group=1,
                  w_init=None,
                  b_init=None,
                  base_axis=1,
                  fix_parameters=False,
                  rng=None,
                  with_bias=True):
    """
    Deconvolution layer.

    Args:
        inp (~nnabla.Variable): N-D array.
        outmaps (int): Number of deconvolution kernels (which is equal to the number of output channels). For example, to apply deconvolution on an input with 16 types of filters, specify 16.
        kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply deconvolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
        pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
        stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
        dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
        group (int): Number of groups of channels. This makes connections across channels sparser by grouping connections along map direction.
        w_init (~nnabla.initializer.BaseInitializer): Initializer for weight.
        b_init (~nnabla.initializer.BaseInitializer): Initializer for bias.
        base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
        fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
        rng (numpy.random.RandomState): Random generator for Initializer.
        with_bias (bool): Specify whether to include the bias term.

    Returns:
        :class:`~nnabla.Variable`: N-D array.

    """
    if w_init is None:
        w_init = UniformInitializer(calc_uniform_lim_glorot(
            outmaps, inp.shape[base_axis], tuple(kernel)),
                                    rng=rng)
    if with_bias and b_init is None:
        b_init = ConstantInitializer()
    w = get_parameter_or_create("W", (inp.shape[base_axis], outmaps / group) +
                                tuple(kernel), w_init, not fix_parameters)
    b = None
    if with_bias:
        b = get_parameter_or_create("b", (outmaps, ), b_init,
                                    not fix_parameters)
    return F.deconvolution(inp, w, b, base_axis, pad, stride, dilation, group)
コード例 #3
0
ファイル: test_stft.py プロジェクト: sony/nnabla
def create_inv_window_func(window_type, window_size, stride, fft_size, length):
    w = create_window_func(window_type, window_size)

    # Padding with zero
    if (window_size < fft_size):
        diff = fft_size - window_size
        w = np.pad(w, (diff // 2, diff - diff // 2), mode='constant')

    w = nn.Variable.from_numpy_array(w)
    w = w.reshape((1, 1, w.shape[0]))

    # Overwrap add for window
    ones = F.constant(1, (1, 1, (length - fft_size) // stride + 1))
    iw = F.deconvolution(ones, w * w, stride=(stride, ))
    iw.forward()

    # Flatten
    iw = np.reshape(iw.d, (iw.shape[2], ))

    return iw
コード例 #4
0
ファイル: ops.py プロジェクト: shikisawamura/nnabla-examples
def upsample_conv_2d(x, w, k=None, factor=2, gain=1):
    assert isinstance(factor, int) and factor >= 1

    # Check weight shape.
    assert w.ndim == 4
    convH = w.shape[2]
    convW = w.shape[3]
    assert convW == convH

    # Setup filter kernel.
    if k is None:
        k = [1] * factor
    k = _setup_kernel(k) * (gain * (factor**2))
    p = (k.shape[0] - factor) - (convW - 1)

    # Execute.
    w = w[:, :, ::-1, ::-1]
    x = F.deconvolution(x, w, stride=(factor, factor))
    return _simple_upfirdn_2d(x,
                              k,
                              pad0=(p + 1) // 2 + factor - 1,
                              pad1=p // 2 + 1)
コード例 #5
0
ファイル: parametric_functions.py プロジェクト: zwsong/nnabla
def deconvolution(inp, outmaps, kernel,
                  pad=None, stride=None, dilation=None, group=1,
                  w_init=None, b_init=None,
                  base_axis=1, fix_parameters=False, rng=None, with_bias=True):
    """
    Deconvolution layer.

    Args:
        inp (~nnabla.Variable): N-D array.
        outmaps (int): Number of deconvolution kernels (which is equal to the number of output channels). For example, to apply deconvolution on an input with 16 types of filters, specify 16.
        kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply deconvolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
        pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
        stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
        dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
        group (int): Number of groups of channels. This makes connections across channels sparser by grouping connections along map direction.
        w_init (~nnabla.initializer.BaseInitializer): Initializer for weight.
        b_init (~nnabla.initializer.BaseInitializer): Initializer for bias.
        base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
        fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
        rng (numpy.random.RandomState): Random generator for Initializer.
        with_bias (bool): Specify whether to include the bias term.

    Returns:
        :class:`~nnabla.Variable`: N-D array.

    """
    if w_init is None:
        w_init = UniformInitializer(
            calc_uniform_lim_glorot(outmaps, inp.shape[base_axis], tuple(kernel)), rng=rng)
    if with_bias and b_init is None:
        b_init = ConstantInitializer()
    w = get_parameter_or_create(
        "W", (inp.shape[base_axis], outmaps / group) + tuple(kernel),
        w_init, not fix_parameters)
    b = None
    if with_bias:
        b = get_parameter_or_create(
            "b", (outmaps,), b_init, not fix_parameters)
    return F.deconvolution(inp, w, b, base_axis, pad, stride, dilation, group)
コード例 #6
0
ファイル: deconvolution.py プロジェクト: zge/nnabla
 def __call__(self, inp):
     return F.deconvolution(inp, self.W, self.b, self.base_axis, self.pad,
                            self.stride, self.dilation, self.group)
コード例 #7
0
def istft(y_r,
          y_i,
          window_size,
          stride,
          fft_size,
          window_type='hanning',
          center=True):
    '''Workaround wrapper of ISTFT for fixing a bug in nnabla<=1.15.0
    '''
    from utils import get_nnabla_version_integer
    if get_nnabla_version_integer() > 11500:
        return F.istft(**locals())
    import numpy as np
    from nnabla.parameter import get_parameter, get_parameter_or_create
    conv_cos = get_parameter('conv_cos')
    conv_sin = get_parameter('conv_sin')

    if conv_cos is None or conv_sin is None:
        if window_type == 'hanning':
            window_func = np.hanning(window_size + 1)[:-1]
        elif window_type == 'hamming':
            window_func = np.hamming(window_size + 1)[:-1]
        elif window_type == 'rectangular' or window_type is None:
            window_func = np.ones(window_size)
        else:
            raise ValueError("Unknown window type {}.".format(window_type))

        # pad window if `fft_size > window_size`
        if fft_size > window_size:
            diff = fft_size - window_size
            window_func = np.pad(window_func, (diff // 2, diff - diff // 2),
                                 mode='constant')
        elif fft_size < window_size:
            raise ValueError(
                "FFT size has to be as least as large as window size.")

        # compute inverse STFT filter coefficients
        if fft_size % stride != 0:
            raise ValueError("FFT size needs to be a multiple of stride.")

        inv_window_func = np.zeros_like(window_func)
        for s in range(0, fft_size, stride):
            inv_window_func += np.roll(np.square(window_func), s)

        mat_cos = np.zeros((fft_size // 2 + 1, 1, fft_size))
        mat_sin = np.zeros((fft_size // 2 + 1, 1, fft_size))

        for w in range(fft_size // 2 + 1):
            alpha = 1.0 if w == 0 or w == fft_size // 2 else 2.0
            alpha /= fft_size
            for t in range(fft_size):
                mat_cos[w, 0, t] = alpha * \
                    np.cos(2. * np.pi * w * t / fft_size)
                mat_sin[w, 0, t] = alpha * \
                    np.sin(2. * np.pi * w * t / fft_size)
        mat_cos = mat_cos * window_func / inv_window_func
        mat_sin = mat_sin * window_func / inv_window_func

        conv_cos = get_parameter_or_create('conv_cos',
                                           initializer=mat_cos,
                                           need_grad=False)
        conv_sin = get_parameter_or_create('conv_sin',
                                           initializer=mat_sin,
                                           need_grad=False)

    # compute inverse STFT
    x_cos = F.deconvolution(y_r, conv_cos, stride=(stride, ))
    x_sin = F.deconvolution(y_i, conv_sin, stride=(stride, ))

    x = F.reshape(x_cos - x_sin, (x_cos.shape[0], x_cos.shape[2]))

    if center:
        x = x[:, fft_size // 2:-fft_size // 2]

    return x
コード例 #8
0
    def backward_impl(self, inputs, outputs, prop_down, accum):
        # inputs: [inputs_fwd_graph] + [inputs_bwd_graph] or
        # [inputs_fwd_graph] + [outputs_fwd_graph] + [inputs_bwd_graph]

        # Args
        with_bias = True if len(inputs) == 4 else False
        base_axis = self.forward_func.info.args["base_axis"]
        pad = self.forward_func.info.args["pad"]
        stride = self.forward_func.info.args["stride"]
        dilation = self.forward_func.info.args["dilation"]
        group = self.forward_func.info.args["group"]
        channel_last = self.forward_func.info.args["channel_last"]
        output_padding = self.forward_func.info.args["output_padding"]

        # Inputs
        x0 = inputs[0].data
        w0 = inputs[1].data
        b0 = inputs[2].data if with_bias else None
        dy = inputs[3].data if with_bias else inputs[2].data
        # Outputs
        dx0 = outputs[0].data
        dw0 = outputs[1].data
        db0 = outputs[2].data if with_bias else None
        # Grads of inputs
        g_x0 = inputs[0].grad
        g_w0 = inputs[1].grad
        g_b0 = inputs[2].grad if with_bias else None
        g_dy = inputs[3].grad if with_bias else inputs[2].grad
        # Grads of outputs
        g_dx0 = outputs[0].grad
        g_dw0 = outputs[1].grad
        g_db0 = outputs[2].grad if with_bias else None

        # Computation
        ## w.r.t. x or w.r.t. w
        if prop_down[0] or prop_down[1]:
            # we can re-use the backward of the forward with different inputs
            inp_x = nn.Variable(x0.shape).apply(data=g_dx0,
                                                grad=g_x0,
                                                need_grad=prop_down[0])
            inp_w = nn.Variable(w0.shape).apply(data=g_dw0,
                                                grad=g_w0,
                                                need_grad=prop_down[1])
            out_y = nn.Variable(dy.shape).apply(grad=dy)
            inputs = [inp_x, inp_w]
            outputs = [out_y]
            if with_bias:
                inp_b = nn.Variable(b0.shape).apply(need_grad=False)
                inputs += [inp_b]
            self.forward_func.backward(inputs, outputs, accum)
        ## w.r.t. b
        if with_bias and prop_down[2] and not accum[2]:
            zeros = F.constant(0, b0.shape)
            if not nn.get_auto_forward():
                zeros.forward()
            g_b0.copy_from(zeros.data)
        ## w.r.t. dy
        if (not with_bias and prop_down[2]) or (with_bias and prop_down[3]):
            accum_dy = accum[3] if with_bias else accum[2]
            params = {
                'base_axis': base_axis,
                'pad': pad,
                'stride': stride,
                'dilation': dilation,
                'output_padding': output_padding,
                'group': group,
                'channel_last': channel_last
            }
            g_dy_ = (F.deconvolution(g_dx0, w0, None, **params) +
                     F.deconvolution(x0, g_dw0, None, **params))
            if with_bias:
                if not channel_last:
                    g_db0 = F.reshape(g_db0, [
                        1 if i != base_axis else g_db0.shape[0]
                        for i in range(g_dy.ndim)
                    ])
                else:
                    g_db0 = F.reshape(g_db0, [
                        1 if i != (g_dy.ndim - 1) else g_db0.shape[0]
                        for i in range(g_dy.ndim)
                    ])
                g_dy_ += g_db0
            if accum_dy:
                g_dy += g_dy_
            else:
                g_dy.copy_from(g_dy_)