Esempio n. 1
0
def test_graph_more_than_2_outputs(seed, clear_buffer):
    count = 0

    def func_hook(f):
        nonlocal count
        if f.name == 'Split':
            count += 1

    nn.clear_parameters()

    a = nn.Variable.from_numpy_array(np.ones((10, )))
    b = nn.Variable.from_numpy_array(np.ones((10, )))
    c = F.add2(a, b, inplace=True, outputs=[a.data])
    y = F.split(c, axis=0)
    nn.forward_all(y, function_pre_hook=func_hook)

    assert count == 1

    res = [x.d for x in y]
    assert_allclose(res, [2.0] * 10)

    a = nn.Variable.from_numpy_array(np.ones((10, )))
    b = nn.Variable.from_numpy_array(np.ones((10, )))
    c = F.add2(a, b, inplace=True, outputs=[a.data])
    y = F.split(c, axis=0)
    for yy in y:
        yy.forward()
    res = [x.d for x in y]
    assert_allclose(res, [11.0] * 10)
def ref_fused_batch_normalization(x, beta, gamma, rmean, rvar, z, axes,
                                  decay_rate, eps, batch_stat, nonlinearity,
                                  output_stat):
    with nn.context_scope(cpu_context):
        xvar = nn.Variable.from_numpy_array(x)
        betavar = nn.Variable.from_numpy_array(beta)
        gammavar = nn.Variable.from_numpy_array(gamma)
        rmeanvar = nn.Variable.from_numpy_array(rmean)
        rvarvar = nn.Variable.from_numpy_array(rvar)
        if z is not None:
            zvar = nn.Variable.from_numpy_array(z)
        with nn.auto_forward():
            bn = F.batch_normalization(xvar, betavar, gammavar, rmeanvar,
                                       rvarvar, axes, decay_rate, eps,
                                       batch_stat, output_stat)
            if z is None:
                if output_stat:
                    y = bn[0]
                else:
                    y = bn
            else:
                if output_stat:
                    y = F.add2(bn[0], zvar)
                else:
                    y = F.add2(bn, zvar)
            y = F.relu(y)
        rmean[:] = rmeanvar.d
        rvar[:] = rvarvar.d
        if output_stat:
            return y.d, bn[1].d, bn[2].d
        else:
            return y.d
Esempio n. 3
0
def construct_cell(prev_layer, architecture, num_nodes, output_filter, scope,
                   test):
    """
    Constructing the cell, which consists of multiple nodes.
    The cell made by this function is either Conv cell or Reduction cell.
    layer_id is somewhere between [0, num_cells)
    the name of the incoming scope should be "w{}".format(layer_id), like "w2" at layer 2 
    so in the end scope name will be like w1_<node>_<ops>
    """
    assert len(architecture) // 4 == num_nodes
    used_indices = set()
    local_used_weights = set()

    ops = {
        0: depthwise_separable_conv3x3,
        1: depthwise_separable_conv5x5,
        2: average_pool,
        3: max_pool,
        4: identity
    }

    # 2 previous outputs to be fed as inputs
    layers = [prev_layer[-2], prev_layer[-1]]

    for node in range(num_nodes):
        ind = node
        # get ops id and input index
        one_node = architecture[4 * ind:4 * (ind + 1)]
        idx_1, ops_1, idx_2, ops_2 = one_node
        # store the node's index used as input
        used_indices.update({idx_1, idx_2})
        scope_1 = "{0}_{1}_{2}".format(scope, node, ops_1)
        scope_2 = "{0}_{1}_{2}".format(scope, node, ops_2)

        # for each nodes, apply the operation and get its weights name
        h1 = ops[ops_1](layers[idx_1], output_filter, scope_1, test)
        local_used_weights.update(get_weights_name(scope_1, ops_1))

        h2 = ops[ops_2](layers[idx_2], output_filter, scope_2, test)
        local_used_weights.update(get_weights_name(scope_2, ops_2))

        # add them as output of that node
        h_add = F.add2(h1, h2)
        layers.append(h_add)  # store each output temporarily

    all_indices = set(range(num_nodes + 2))  # all nodes in the cell
    # exclude nodes not used as others' input
    candidates = all_indices - used_indices
    h_out = layers[candidates.pop()]  # randomly pop output

    for j in candidates:
        h_out = F.add2(h_out, layers[j])  # repeatedly sum up outputs

    return h_out, local_used_weights
Esempio n. 4
0
def propagate(h,
              edges,
              state_size=None,
              w_initializer=None,
              u_initializer1=None,
              u_initializer2=None,
              bias_initializer=None,
              edge_initializers=None):
    """
    Propagate vertex representations

    Arguments:

    h                 -- the input vertex representations (nnabla.Variable with shape (|V|, D))
    edges             -- the dictionary that represents the graph edge ({label, [in, out]})
    state_size        -- (optional) the size of hidden state (h.shape[1] is used if this argument is None)
    w_initializer     -- (optional)
    u_initializer1    -- (optional)
    u_initializer2    -- (optional)
    bias_initializer  -- (optional)
    edge_initializers -- (optional)

    Return value

    - Return a variable with shape (|V|, D)
    """
    if state_size is None:
        state_size = h.shape[1]
    h_size = h.shape[1]
    with nn.parameter_scope("activate"):
        a = activate(h,
                     edges,
                     state_size,
                     bias_initializer=bias_initializer,
                     edge_initializers=edge_initializers)
    with nn.parameter_scope("W_zr"):
        ws = PF.affine(a, (3, h_size), with_bias=False, w_init=w_initializer)
    (z1, r1, h_hat1) = split(ws, axis=1)
    with nn.parameter_scope("U_zr"):
        us = PF.affine(h, (2, state_size),
                       with_bias=False,
                       w_init=u_initializer1)
    (z2, r2) = split(us, axis=1)
    z = F.sigmoid(F.add2(z1, z2))
    r = F.sigmoid(F.add2(r1, r2))
    with nn.parameter_scope("U"):
        h_hat2 = PF.affine(F.mul2(r, h),
                           state_size,
                           with_bias=False,
                           w_init=u_initializer2)
    h_hat = F.tanh(F.add2(h_hat1, h_hat2))
    return F.add2(F.sub2(h, F.mul2(z, h)), F.mul2(z, h_hat))
Esempio n. 5
0
def test_all_reduce_callback(seed, pack_size, division, comm_nccl_opts):
    if comm_nccl_opts is None:
        pytest.skip(
            "Communicator test is disabled. You can turn it on by an option `--test-communicator`."
        )
    if len(comm_nccl_opts.devices) < 2:
        pytest.skip("Communicator test is disabled. Use more than 1 gpus.")

    comm = comm_nccl_opts.comm
    device_id = int(comm_nccl_opts.device_id)
    nn.set_default_context(comm_nccl_opts.ctx)

    nn.clear_parameters()
    x_data_list = []
    num_layers = 20
    rng = np.random.RandomState(seed)
    for l in range(num_layers):
        x_data = rng.rand(3, 4)
        x_data_list.append(x_data)

    # all_reduce_callback
    x_list1 = []
    n1 = nn.Variable([3, 4])
    n1.d = 0
    for l in range(num_layers):
        x = nn.Variable([3, 4], need_grad=True)
        n1 = F.add2(n1, x)
        x.d = x_data_list[l] * (device_id + 1)
        x.g = 0
        x_list1.append(x)
    n1.backward(clear_buffer=True,
                communicator_callbacks=comm.all_reduce_callback(
                    [v.grad for v in x_list1], pack_size, division))

    # Ref AllReduce
    x_list2 = []
    n2 = nn.Variable([3, 4])
    n2.d = 0
    for l in range(num_layers):
        x = nn.Variable([3, 4], need_grad=True)
        n2 = F.add2(n2, x)
        x.d = x_data_list[l] * (device_id + 1)
        x.g = 0
        x_list2.append(x)
    n2.backward(clear_buffer=True)
    comm.all_reduce([v.grad for v in x_list2],
                    inplace=False,
                    division=division)

    # Check
    for x, ref in zip(x_list1, x_list2):
        assert np.allclose(x.g, ref.g)
Esempio n. 6
0
def bottleneck(x, ochannels, shortcut_type, stride, test, channel_last=False):
    def bn(h):
        axes = [3 if channel_last else 1]
        return PF.batch_normalization(h, axes=axes, batch_stat=not test)

    assert ochannels % 4 == 0
    hchannels = ochannels / 4
    with nn.parameter_scope("bottleneck1"):
        h = F.relu(
            bn(
                PF.convolution(x,
                               hchannels, (1, 1),
                               with_bias=False,
                               channel_last=channel_last)))
    with nn.parameter_scope("bottleneck2"):
        h = F.relu(
            bn(
                PF.convolution(h,
                               hchannels, (3, 3),
                               pad=(1, 1),
                               stride=stride,
                               with_bias=False,
                               channel_last=channel_last)))
    with nn.parameter_scope("bottleneck3"):
        h = bn(
            PF.convolution(h,
                           ochannels, (1, 1),
                           with_bias=False,
                           channel_last=channel_last))
    with nn.parameter_scope("bottleneck_s"):
        s = shortcut(x, ochannels, stride, shortcut_type, test, channel_last)
    return F.relu(F.add2(h, s))
Esempio n. 7
0
    def __call__(self, x, ochannels, stride):

        hchannels = self.get_bottleneck_channels(ochannels)
        cardinality = 32 if self.resnext else 1
        stride1 = None if self.resnext else stride
        stride2 = stride if self.resnext else None

        with nn.parameter_scope("bottleneck1"):
            h = self.bn(
                pf_convolution(x,
                               hchannels, (1, 1),
                               stride=stride1,
                               **self.conv_opts))
        with nn.parameter_scope("bottleneck2"):
            h = self.bn(
                pf_convolution(h,
                               hchannels, (3, 3),
                               stride=stride2,
                               group=cardinality,
                               **self.conv_opts))
        with nn.parameter_scope("bottleneck3"):
            h = pf_convolution(h, ochannels, (1, 1), **self.conv_opts)
            h = self.bn(h, no_relu=True)
        with nn.parameter_scope("se"):
            h = self.seblock(h)
        with nn.parameter_scope("bottleneck_s"):
            s = shortcut(x,
                         ochannels,
                         stride,
                         self.shortcut_type,
                         self.test,
                         channel_last=self.channel_last)
        return F.relu(F.add2(h, s))
Esempio n. 8
0
def res_block(res_input, res, inmaps, outmaps, block_scope='res_block'):
    """
    Residual block for Discriminator
    """

    name_scope = f'Discriminator/{block_scope}_{res}x{res}'

    out = conv_layer(res_input,
                     inmaps,
                     inmaps,
                     kernel_size=3,
                     name_scope=f'{name_scope}/Conv1')
    out = conv_layer(out,
                     inmaps,
                     outmaps,
                     kernel_size=3,
                     downsample=True,
                     name_scope=f'{name_scope}/Conv2')

    skip = conv_layer(res_input,
                      inmaps,
                      outmaps,
                      kernel_size=1,
                      downsample=True,
                      bias=False,
                      act=F.identity,
                      name_scope=f'{name_scope}/ConvSkip')

    out = F.mul_scalar(F.add2(out, skip),
                       1 / np.sqrt(2).astype(np.float32),
                       inplace=False)

    return out
Esempio n. 9
0
 def _lstm_cell(self, name, n_hidden, x_in, h=None, c=None):
     if h is None:
         h = nn.Variable.from_numpy_array(
             np.zeros((self._batch_size, self._cols_size)))
     if c is None:
         c = nn.Variable.from_numpy_array(
             np.zeros((self._batch_size, n_hidden)))
     h = F.concatenate(h, x_in, axis=1)  # LSTM_Concatenate -> cols_size * 2
     with nn.parameter_scope(name + '_Affine'):  # LSTM_Affine -> n_hidden
         h1 = PF.affine(h, (n_hidden, ), base_axis=1)
     with nn.parameter_scope(name + '_IGate'):  # LSTM_IGate -> n_hidden
         h2 = PF.affine(h, (n_hidden, ), base_axis=1)
     with nn.parameter_scope(name + '_FGate'):  # LSTM_FGate -> n_hidden
         h3 = PF.affine(h, (n_hidden, ), base_axis=1)
     with nn.parameter_scope(name + '_OGate'):  # LSTM_OGate -> n_hidden
         h4 = PF.affine(h, (n_hidden, ), base_axis=1)
     h1 = F.tanh(h1)  # LSTM_Tanh
     h2 = F.sigmoid(h2)  # LSTM_Sigmoid
     h3 = F.sigmoid(h3)  # LSTM_Sigmoid_2
     h4 = F.sigmoid(h4)  # LSTM_Sigmoid_3
     h5 = F.mul2(h2, h1)  # LSTM_Mul2 -> n_hidden
     h6 = F.mul2(h3, c)  # LSTM_Mul2_2 -> n_hidden
     h7 = F.add2(h5, h6, inplace=True)  # LSTM_Add2 -> n_hidden
     h8 = F.tanh(h7)  # LSTM_Tanh_2 -> n_hidden
     h9 = F.mul2(h4, h8)  # LSTM_Mul2_3 -> n_hidden
     c = h7  # LSTM_C
     h = h9  # LSTM_H
     return (h, c)
Esempio n. 10
0
def LSTMCell(x, h2, h1):

    units = h1.shape[1]

    #first stack  h2=hidden, h1= cell
    h2 = F.concatenate(h2, x, axis=1)

    h3 = PF.affine(h2, (units), name='Affine')

    h4 = PF.affine(h2, (units), name='InputGate')

    h5 = PF.affine(h2, (units), name='ForgetGate')

    h6 = PF.affine(h2, (units), name='OutputGate')

    h3 = F.tanh(h3)

    h4 = F.sigmoid(h4)

    h5 = F.sigmoid(h5)

    h6 = F.sigmoid(h6)

    h4 = F.mul2(h4, h3)

    h5 = F.mul2(h5, h1)

    h4 = F.add2(h4, h5, True)

    h7 = F.tanh(h4)

    h6 = F.mul2(h6, h7)

    return h6, h4  # hidden, cell
Esempio n. 11
0
def stochastic_res_unit(x, scope_name, act=F.relu, dn=False, test=False):
    if not test:
        flag = np.random.randint(2)
        if flag:
            h = res_block(x, scope_name, act=act, dn=dn, test=test)
            h = F.add2(h, x)
        else:
            h = x
    else:
        h = res_block(x, scope_name, act=act, dn=dn, test=test)
        h = F.add2(h, x)
    h = act(h)
    # Maxpooling
    if dn:
        h = F.max_pooling(h, kernel=(2, 2), stride=(2, 2))
    return h
Esempio n. 12
0
def stochastic_res_unit(x, scope_name, act=F.relu, dn=False, test=False):
    if not test:
        flag = np.random.randint(2)
        if flag:
            h = res_block(x, scope_name, act=act, dn=dn, test=test)
            h = F.add2(h, x)
        else:
            h = x
    else:
        h = res_block(x, scope_name, act=act, dn=dn, test=test)
        h = F.add2(h, x)
    h = act(h)
    # Maxpooling
    if dn:
        h = F.max_pooling(h, kernel=(2, 2), stride=(2, 2))
    return h
Esempio n. 13
0
def resblock_d(h, y, scopename,
               n_classes, maps, kernel=(3, 3), pad=(1, 1), stride=(1, 1),
               downsample=True, test=False, sn=True):
    """Residual block for discriminator"""
    s = h
    _, c, _, _ = h.shape
    assert maps // 2 == c or maps == c
    maps1 = c if maps // 2 == c else maps
    maps2 = maps
    with nn.parameter_scope(scopename):
        # LeakyRelu -> Conv
        with nn.parameter_scope("conv1"):
            #h = F.leaky_relu(h, 0.2, False)
            h = F.relu(h, False)
            h = convolution(h, maps1, kernel=kernel, pad=pad, stride=stride,
                            with_bias=True, sn=sn, test=test, init_scale=np.sqrt(2))

        # LeakyRelu -> Conv -> Downsample
        with nn.parameter_scope("conv2"):
            #h = F.leaky_relu(h, 0.2, True)
            h = F.relu(h, True)
            h = convolution(h, maps2, kernel=kernel, pad=pad, stride=stride,
                            with_bias=True, sn=sn, test=test, init_scale=np.sqrt(2))
            if downsample:
                h = F.average_pooling(h, kernel=(2, 2))

        # Shortcut: Conv -> Downsample
        if c != maps2 or downsample:
            with nn.parameter_scope("shortcut"):
                s = convolution(s, maps2, kernel=(1, 1), pad=(0, 0), stride=(1, 1),
                                with_bias=True, sn=sn, test=test)
        if downsample:
            s = F.average_pooling(s, kernel=(2, 2))
    return F.add2(h, s, True)
Esempio n. 14
0
def conv_lstm_cell(input_tensor, cur_state, n_filt, kernel_size):
    """
    conv lstm cell definition
    """

    def split(inp):
        _, channels, _, _ = inp.shape
        channels = channels / 4
        return inp[:, :channels, :, :], inp[:, channels:2 * channels, :, :], \
            inp[:, 2 * channels:3 * channels, :, :], \
            inp[:, 3 * channels:4 * channels, :, :]

    h_cur, c_cur = cur_state
    # concatenate along channel axis
    combined = F.concatenate(*[input_tensor, h_cur], axis=1)
    combined_conv = conv2d(combined, 4 * n_filt, kernel_size, 1, kernel_size // 2,
                           name='conv_lstm_cell')
    cc_i, cc_f, cc_o, cc_g = split(combined_conv)
    act_i = F.sigmoid(cc_i)
    act_f = F.sigmoid(cc_f)
    act_o = F.sigmoid(cc_o)
    act_g = F.tanh(cc_g)
    c_next = F.add2(act_f * c_cur, act_i * act_g)
    h_next = act_o * F.tanh(c_next)
    return h_next, c_next
Esempio n. 15
0
def optblock_d(h, y, scopename,
               n_classes, maps, kernel=(3, 3), pad=(1, 1), stride=(1, 1),
               downsample=True, test=False, sn=True):
    """Optimized block for discriminator"""
    s = h
    _, c, _, _ = h.shape
    with nn.parameter_scope(scopename):
        # Conv
        with nn.parameter_scope("conv1"):
            h = convolution(h, maps, kernel=kernel, pad=pad, stride=stride,
                            with_bias=True, sn=sn, test=test, init_scale=np.sqrt(2))

        # ReLU -> Conv
        with nn.parameter_scope("conv2"):
            #h = F.leaky_relu(h, 0.2, True)
            h = F.relu(h, True)
            h = convolution(h, maps, kernel=kernel, pad=pad, stride=stride,
                            with_bias=True, sn=sn, test=test, init_scale=np.sqrt(2))
            if downsample:
                h = F.average_pooling(h, kernel=(2, 2))

        # Shortcut: Conv -> Downsample
        with nn.parameter_scope("shortcut"):
            if downsample:
                s = F.average_pooling(s, kernel=(2, 2))
            s = convolution(s, maps, kernel=(1, 1), pad=(0, 0), stride=(1, 1),
                            with_bias=True, sn=sn, test=test)
    return F.add2(h, s, True)
Esempio n. 16
0
def apply_ops_and_connect(x, prev_layers, connect_pattern, ops, elem,
                          output_filter, scope, test):
    """
        execute the operation at the current layer.
        and if there is a skip connection with the previous layers,
        sum the whole values up. 
    """

    assert len(prev_layers) == len(connect_pattern) + 1

    is_skip_connected = False  # set as False initially.
    local_used_weights = set()
    # feeding previous output to the current layer.
    x = ops[elem](x, output_filter, scope, test)
    local_used_weights.update(get_weights_name(scope, elem))

    for flag, prev_output in zip(connect_pattern, prev_layers[:-1]):
        # ignore the last variable stored in prev_layers[-1]
        # since that is the previous layer's output (which is already used as the input above)
        if flag == 1:
            # skip connection exists.
            is_skip_connected = True
            x = F.add2(x, prev_output)

    if is_skip_connected:
        with nn.parameter_scope(scope + "/skip"):
            x = PF.batch_normalization(x, batch_stat=not test)
        local_used_weights.update({
            "{}/skip/bn/gamma".format(scope), "{}/skip/bn/beta".format(scope)
        })

    return x, local_used_weights
Esempio n. 17
0
def res_unit(x, scope_name, act=F.relu, dn=False, test=False):
    C = x.shape[1]

    with nn.parameter_scope(scope_name):
        # Conv -> BN -> Relu
        with nn.parameter_scope("conv1"):
            h = PF.convolution(x, C/2, kernel=(1, 1), pad=(0, 0), with_bias=False)
            h = PF.batch_normalization(h, decay_rate=0.9, batch_stat=not test)
            h = act(h)
        # Conv -> BN -> Relu
        with nn.parameter_scope("conv2"):
            h = PF.convolution(h, C/2, kernel=(3, 3), pad=(1, 1), with_bias=False)
            h = PF.batch_normalization(h, decay_rate=0.9, batch_stat=not test)
            h = act(h)
        # Conv -> BN
        with nn.parameter_scope("conv3"): 
            h = PF.convolution(h, C, kernel=(1, 1), pad=(0, 0), with_bias=False)
            h = PF.batch_normalization(h, decay_rate=0.9, batch_stat=not test)
    # Residual -> Relu
    h = F.add2(h, x)
    h = act(h)
    
    # Maxpooling
    if dn:
        h = F.max_pooling(h, kernel=(2, 2), stride=(2, 2))
    
    return h
Esempio n. 18
0
 def res_unit(x, scope_name, rng, dn=False):
     C = x.shape[1]
     with nn.parameter_scope(scope_name):
         # Conv -> BN -> Nonlinear
         with nn.parameter_scope("conv1"):
             h = PF.convolution(x, C / 2, kernel=(1, 1), pad=(0, 0),
                                with_bias=False)
             h = PF.batch_normalization(h, batch_stat=not test)
             h = act(h)
         # Conv -> BN -> Nonlinear
         with nn.parameter_scope("conv2"):
             h = PF.convolution(h, C / 2, kernel=(3, 3), pad=(1, 1),
                                with_bias=False)
             h = PF.batch_normalization(h, batch_stat=not test)
             h = act(h)
         # Conv -> BN
         with nn.parameter_scope("conv3"):
             h = PF.convolution(h, C, kernel=(1, 1), pad=(0, 0),
                                with_bias=False)
             h = PF.batch_normalization(h, batch_stat=not test)
         # Residual -> Nonlinear
         h = act(F.add2(h, x, inplace=True))
         # Maxpooling
         if dn:
             h = F.max_pooling(h, kernel=(2, 2), stride=(2, 2))
         return h
Esempio n. 19
0
def attn_block(x, name, num_heads=4, fix_parameters=False):
    """Multihead attention block"""
    B, C, H, W = x.shape

    with nn.parameter_scope(name):
        # Get query, key, value
        h = normalize(x, name="norm")
        # nin(3 * C) -> split is faster?
        q = nin(h, C, name="q")
        k = nin(h, C, name="k")
        v = nin(h, C, name="v")

        # Attention
        w = F.batch_matmul(F.reshape(q, (B * num_heads, -1, H * W)),
                           F.reshape(k, (B * num_heads, -1, H * W)),
                           transpose_a=True)
        w = F.mul_scalar(w, int(C)**(-0.5), inplace=True)

        assert w.shape == (B * num_heads, H * W, H * W)
        w = F.softmax(w, axis=-1)

        h = F.reshape(v, (B * num_heads, -1, H * W))
        h = F.batch_matmul(h, w)
        h = F.reshape(h, (B, C, H, W))

        # output projection
        h = nin(h, C, name='proj_out', zeroing_w=True)

    assert h.shape == x.shape
    return F.add2(h, x, inplace=True)
Esempio n. 20
0
def resblock_g(h, y, scopename,
               n_classes, maps, kernel=(3, 3), pad=(1, 1), stride=(1, 1),
               upsample=True, test=False, sn=True, coefs=[1.0]):
    """Residual block for generator"""

    s = h
    _, c, _, _ = h.shape
    with nn.parameter_scope(scopename):
        # BN -> Relu -> Upsample -> Conv
        with nn.parameter_scope("conv1"):
            h = CCBN(h, y, n_classes, test=test, coefs=coefs)
            h = F.relu(h, inplace=True)
            if upsample:
                h = F.unpooling(h, kernel=(2, 2))
            h = convolution(h, maps, kernel=kernel, pad=pad, stride=stride,
                            with_bias=True, sn=sn, test=test, init_scale=np.sqrt(2))

        # BN -> Relu -> Conv
        with nn.parameter_scope("conv2"):
            h = CCBN(h, y, n_classes, test=test, coefs=coefs)
            h = F.relu(h, inplace=True)
            h = convolution(h, maps, kernel=kernel, pad=pad, stride=stride,
                            with_bias=True, sn=sn, test=test, init_scale=np.sqrt(2))

        # Shortcut: Upsample -> Conv
        if upsample:
            s = F.unpooling(s, kernel=(2, 2))
        if c != maps or upsample:
            with nn.parameter_scope("shortcut"):
                s = convolution(s, maps, kernel=(1, 1), pad=(0, 0), stride=(1, 1),
                                with_bias=True, sn=sn, test=test)
    return F.add2(h, s, True)
Esempio n. 21
0
 def unit(i, prefix):
     c1 = PF.convolution(i, 4, (3, 3), pad=(1, 1), name=prefix + '-c1')
     c2 = PF.convolution(F.relu(c1),
                         4, (3, 3),
                         pad=(1, 1),
                         name=prefix + '-c2')
     c = F.add2(c2, c1, inplace=True)
     return c
Esempio n. 22
0
def model(img, sf):
    """
    Define JSInet model
    """
    with nn.parameter_scope('Network'):
        with nn.parameter_scope('local_contrast_enhancement'):
            ## ================= Local Contrast Enhancement Subnet ============================ ##
            ch = 64
            b = guided_filter(img, 5, 0.01)
            n1 = conv_2d(b, ch, kernel=(3, 3), name='conv/0')
            for i in range(4):
                n1 = res_block(n1, ch, 'res_block/%d' % i)
            n1 = F.relu(n1, inplace=True)
            local_filter_2d = conv_2d(
                n1, (9**2) * (sf**2), kernel=(3, 3),
                name='conv_k')  # [B, H, W, (9x9)*(sfxsf)]
            # dynamic 2D upsampling with 2D local filters
            pred_C = dyn_2d_up_operation(b, local_filter_2d, (9, 9), sf)
            # local contrast mask
            pred_C = 2 * F.sigmoid(pred_C)
            ## ================= Detail Restoration Subnet ============================ ##
            ch = 64
            d = F.div2(img, b + 1e-15)
        with nn.parameter_scope('detail_restoration'):
            n3 = conv_2d(d, ch, kernel=(3, 3), name='conv/0')
            for i in range(4):
                n3 = res_block(n3, ch, 'res_block/%d' % i)
                if i == 0:
                    d_feature = n3
            n3 = F.relu(n3, inplace=True)
            # separable 1D filters
            dr_k_h = conv_2d(n3, 41 * sf**2, kernel=(3, 3), name='conv_k_h')
            dr_k_v = conv_2d(n3, 41 * sf**2, kernel=(3, 3), name='conv_k_v')
            # dynamic separable upsampling with with separable 1D local filters
            pred_D = dyn_sep_up_operation(d, dr_k_v, dr_k_h, 41, sf)
        ## ================= Image Reconstruction Subnet ============================ ##
        with nn.parameter_scope('image_reconstruction'):
            n4 = conv_2d(img, ch, kernel=(3, 3), name='conv/0')
            for i in range(4):
                if i == 1:
                    n4 = F.concatenate(n4, d_feature, axis=3)
                    n4 = res_block_concat(n4, ch, 'res_block/%d' % i)
                else:
                    n4 = res_block(n4, ch, 'res_block/%d' % i)
            n4 = F.relu(n4, inplace=True)

            n4 = F.relu(conv_2d(n4, ch * sf * sf, kernel=(3, 3),
                                name='conv/1'),
                        inplace=True)
            # (1,100,170,1024) -> (1,100,170,4,4,64) -> (1,100,4,170,4,64)
            # pixel shuffle
            n4 = depth_to_space(n4, sf)
            pred_I = conv_2d(n4, 3, kernel=(3, 3), name='conv/2')

    pred = F.add2(pred_I, pred_D, inplace=True) * pred_C
    jsinet = namedtuple('jsinet', ['pred'])
    return jsinet(pred)
Esempio n. 23
0
def network_LSTM(x, D, C, InputShape, HiddenSize, test=False):
    # Input_2:x -> 687
    # Delya_in:D -> 100
    # Cell_in:C -> 100

    # Concatenate -> 787
    h = F.concatenate(D, x, axis=1)

    # Affine -> 100
    h1 = PF.affine(h, HiddenSize, name='Affine')

    # InputGate -> 100
    h2 = PF.affine(h, HiddenSize, name='InputGate')

    # OutputGate -> 100
    h3 = PF.affine(h, HiddenSize, name='OutputGate')

    # ForgetGate -> 100
    h4 = PF.affine(h, HiddenSize, name='ForgetGate')
    # Sigmoid
    h1 = F.sigmoid(h1)
    # Sigmoid_2
    h2 = F.sigmoid(h2)

    # Sigmoid_3
    h3 = F.sigmoid(h3)
    # Sigmoid_4
    h4 = F.sigmoid(h4)

    # Mul2 -> 100
    h1 = F.mul2(h1, h2)

    # Mul2_3 -> 100
    h4 = F.mul2(h4, C)

    # Add2 -> 100
    h1 = F.add2(h1, h4, True)

    # Tanh
    h5 = F.tanh(h1)

    # Cell_out
    h6 = F.identity(h1)

    # Mul2_2 -> 100
    h5 = F.mul2(h5, h3)
    # Dropout
    if not test:
        h5 = F.dropout(h5)

    # Output
    h5 = F.identity(h5)

    # Concatenate_2 -> 200
    h5 = F.concatenate(h5, h6, axis=1)
    return h5
Esempio n. 24
0
 def res_unit(x, scope):
     C = x.shape[1]
     with nn.parameter_scope(scope):
         with nn.parameter_scope('conv1'):
             h = F.elu(bn(PF.convolution(x, C / 2, (1, 1), with_bias=False)))
         with nn.parameter_scope('conv2'):
             h = F.elu(
                 bn(PF.convolution(h, C / 2, (3, 3), pad=(1, 1), with_bias=False)))
         with nn.parameter_scope('conv3'):
             h = bn(PF.convolution(h, C, (1, 1), with_bias=False))
     return F.elu(F.add2(h, x, inplace=True))
Esempio n. 25
0
 def res_unit(x, scope):
     C = x.shape[1]
     with nn.parameter_scope(scope):
         with nn.parameter_scope('conv1'):
             h = F.elu(bn(PF.convolution(x, C / 2, (1, 1), with_bias=False)))
         with nn.parameter_scope('conv2'):
             h = F.elu(
                 bn(PF.convolution(h, C / 2, (3, 3), pad=(1, 1), with_bias=False)))
         with nn.parameter_scope('conv3'):
             h = bn(PF.convolution(h, C, (1, 1), with_bias=False))
     return F.elu(F.add2(h, x, inplace=True))
Esempio n. 26
0
    def call(self, x, training=True):

        h = self.conv1(x)
        h = self.conv2(h)
        h = self.conv3(h)

        s = x
        if self.skip_by_conv:
            s = self.skip(s)
        h = F.relu(F.add2(h, s, inplace=True), inplace=True)
        return h
Esempio n. 27
0
def loss_function(pred, aux_logits, label):
    """
        Compute loss.
    """
    if aux_logits is None:
        loss = F.mean(F.softmax_cross_entropy(pred, label))
    else:
        loss = F.softmax_cross_entropy(pred, label)
        loss_from_aux = F.softmax_cross_entropy(aux_logits, label)
        loss = F.mean(F.add2(loss, loss_from_aux))
    return loss
Esempio n. 28
0
 def bn(self, h, z=None, no_relu=False):
     axes = [get_channel_axis(self.channel_last)]
     if no_relu:
         h = PF.batch_normalization(h, axes=axes, batch_stat=not self.test)
         if z is None:
             return h
         return F.add2(z, h)
     return PF.fused_batch_normalization(h,
                                         z,
                                         axes=axes,
                                         batch_stat=not self.test)
Esempio n. 29
0
 def compute_mel(self, wave):
     hp = self.hparams
     reals, imags = F.stft(wave,
                           window_size=hp.win_length,
                           stride=hp.hop_length,
                           fft_size=hp.n_fft)
     linear = F.pow_scalar(
         F.add2(F.pow_scalar(reals, 2), F.pow_scalar(imags, 2)), 0.5)
     mels = F.batch_matmul(self.basis, linear)
     mels = F.log(F.clip_by_value(mels, 1e-5,
                                  np.inf)).apply(need_grad=False)
     return mels
Esempio n. 30
0
 def residual_block(res_blk_input, output_channels=64, scope='res_block'):
     """
     define a residual block here with conv + relu + conv
     """
     with nn.parameter_scope(scope):
         feats = conv2d(res_blk_input, output_channels, 3, 1, 1,
                        name='conv1', init_method='kaiming_normal', scale=0.1)
         feats = F.relu(feats)
         feats = conv2d(feats, output_channels, 3, 1, 1,
                        name='conv2', init_method='kaiming_normal', scale=0.1)
         feats = F.add2(feats, res_blk_input)
     return feats
Esempio n. 31
0
def loss_function(pred, label, aux_logits=None, aux_weights=1.0):
    """
        Compute loss.
    """
    if aux_logits is None:
        loss = F.mean(F.softmax_cross_entropy(pred, label))
    else:
        loss = F.softmax_cross_entropy(pred, label)
        loss_from_aux = F.mul_scalar(
            F.softmax_cross_entropy(aux_logits, label), aux_weights)
        loss = F.mean(F.add2(loss, loss_from_aux))
    return loss
Esempio n. 32
0
    def __init__(self, x, weight, bias, beta, gamma, rmean, rvar, z,
                 base_axis, pad, stride, dilation, group, channel_last,
                 decay_rate, eps, batch_stat,
                 nonlinearity, nonlinearity_args, pad_mode, constant_value):

        from collections import OrderedDict
        inputs = OrderedDict()
        xvar = nn.Variable.from_numpy_array(x)
        weightvar = nn.Variable.from_numpy_array(weight)
        inputs['x'] = xvar
        inputs['weight'] = weightvar
        biasvar = None
        betavar = None
        gammavar = None
        rmeanvar = None
        rvarvar = None
        zvar = None
        if bias is not None:
            biasvar = nn.Variable.from_numpy_array(bias)
            inputs['bias'] = biasvar
        if beta is not None:
            betavar = nn.Variable.from_numpy_array(beta)
            gammavar = nn.Variable.from_numpy_array(gamma)
            rmeanvar = nn.Variable.from_numpy_array(rmean)
            rvarvar = nn.Variable.from_numpy_array(rvar)
            inputs['beta'] = betavar
            inputs['gamma'] = gammavar
            inputs['rmean'] = rmeanvar
            inputs['rvar'] = rvarvar
        if z is not None:
            zvar = nn.Variable.from_numpy_array(z)
            inputs['z'] = zvar

        spatial_dims = xvar.ndim - (base_axis + 1)
        assert (len(pad) == spatial_dims or len(pad) == 2 * spatial_dims)
        if len(pad) == spatial_dims:
            pad_width = tuple(p for _ in range(2) for p in pad)
        else:  # if len(pad) == 2 * spatial_dims:
            pad_width = pad
        h = F.pad(xvar, pad_width, pad_mode, constant_value)
        conv_pad = (0,) * spatial_dims
        h = F.convolution(h, weightvar, biasvar, base_axis,
                          conv_pad, stride, dilation, group, channel_last)
        if beta is not None:
            h = F.batch_normalization(h, betavar, gammavar, rmeanvar, rvarvar,
                                      [h.ndim - 1 if channel_last else base_axis],
                                      decay_rate, eps, batch_stat)
        if z is not None:
            h = F.add2(h, zvar)
        h = ref_activation(h, nonlinearity, nonlinearity_args)
        self.input_dict = inputs
        self.output = h
Esempio n. 33
0
    def test_clear_input_if_no_need_grad_branch1(self):
        x1 = nn.Variable([1, 5], need_grad=True)
        x2 = nn.Variable([1, 5], need_grad=True)
        x3 = nn.Variable([1, 5], need_grad=True)

        xx1 = F.identity(x1)
        xx2 = F.identity(x2)
        y1 = F.mul2(xx1, xx2)  # (1)
        xx3 = F.identity(x3)
        y2 = F.add2(xx2, xx3)  # (2)
        y3 = F.add2(y1, y2)  # (3)

        answer = []
        answer.append([False])
        answer.append([False])
        answer.append([False, False])  # (1)
        answer.append([False])
        answer.append([False, True])  # (2) use xx2 in backward
        answer.append([True, True])  # (3)

        y3.forward(clear_no_need_grad=True)
        self.check_input_data_clear_called_flags(answer)
Esempio n. 34
0
    def __add__(self, other):
        """
        Element-wise addition.
        Implements the addition operator expression ``A + B``, together with :func:`~nnabla.variable.__radd__` .
        When a scalar is specified for ``other``, this function performs an
        element-wise operation for all elements in ``self``.

        Args:
            other (float or ~nnabla.Variable): Internally calling
                :func:`~nnabla.functions.add2` or
                :func:`~nnabla.functions.add_scalar` according to the
                type.

        Returns: :class:`nnabla.Variable`

        """
        import nnabla.functions as F
        if isinstance(other, Variable):
            return F.add2(self, other)
        return F.add_scalar(self, other)