Ejemplo n.º 1
1
def mnist_lenet_prediction(image, test=False):
    """
    Construct LeNet for MNIST.
    """
    image /= 255.0
    c1 = PF.convolution(image, 16, (5, 5), name='conv1')
    c1 = F.relu(F.max_pooling(c1, (2, 2)), inplace=True)
    c2 = PF.convolution(c1, 16, (5, 5), name='conv2')
    c2 = F.relu(F.max_pooling(c2, (2, 2)), inplace=True)
    c3 = F.relu(PF.affine(c2, 50, name='fc3'), inplace=True)
    c4 = PF.affine(c3, 10, name='fc4')
    return c4
Ejemplo n.º 2
0
 def _lstm_cell(self, name, n_hidden, x_in, h=None, c=None):
     if h is None:
         h = nn.Variable.from_numpy_array(
             np.zeros((self._batch_size, self._cols_size)))
     if c is None:
         c = nn.Variable.from_numpy_array(
             np.zeros((self._batch_size, n_hidden)))
     h = F.concatenate(h, x_in, axis=1)  # LSTM_Concatenate -> cols_size * 2
     with nn.parameter_scope(name + '_Affine'):  # LSTM_Affine -> n_hidden
         h1 = PF.affine(h, (n_hidden, ), base_axis=1)
     with nn.parameter_scope(name + '_IGate'):  # LSTM_IGate -> n_hidden
         h2 = PF.affine(h, (n_hidden, ), base_axis=1)
     with nn.parameter_scope(name + '_FGate'):  # LSTM_FGate -> n_hidden
         h3 = PF.affine(h, (n_hidden, ), base_axis=1)
     with nn.parameter_scope(name + '_OGate'):  # LSTM_OGate -> n_hidden
         h4 = PF.affine(h, (n_hidden, ), base_axis=1)
     h1 = F.tanh(h1)  # LSTM_Tanh
     h2 = F.sigmoid(h2)  # LSTM_Sigmoid
     h3 = F.sigmoid(h3)  # LSTM_Sigmoid_2
     h4 = F.sigmoid(h4)  # LSTM_Sigmoid_3
     h5 = F.mul2(h2, h1)  # LSTM_Mul2 -> n_hidden
     h6 = F.mul2(h3, c)  # LSTM_Mul2_2 -> n_hidden
     h7 = F.add2(h5, h6, inplace=True)  # LSTM_Add2 -> n_hidden
     h8 = F.tanh(h7)  # LSTM_Tanh_2 -> n_hidden
     h9 = F.mul2(h4, h8)  # LSTM_Mul2_3 -> n_hidden
     c = h7  # LSTM_C
     h = h9  # LSTM_H
     return (h, c)
Ejemplo n.º 3
0
def LSTMCell(x, h2, h1):

    units = h1.shape[1]

    #first stack  h2=hidden, h1= cell
    h2 = F.concatenate(h2, x, axis=1)

    h3 = PF.affine(h2, (units), name='Affine')

    h4 = PF.affine(h2, (units), name='InputGate')

    h5 = PF.affine(h2, (units), name='ForgetGate')

    h6 = PF.affine(h2, (units), name='OutputGate')

    h3 = F.tanh(h3)

    h4 = F.sigmoid(h4)

    h5 = F.sigmoid(h5)

    h6 = F.sigmoid(h6)

    h4 = F.mul2(h4, h3)

    h5 = F.mul2(h5, h1)

    h4 = F.add2(h4, h5, True)

    h7 = F.tanh(h4)

    h6 = F.mul2(h6, h7)

    return h6, h4  # hidden, cell
def network(x, test=False):
    # Input:x -> 1,128,128
    # ImageAugmentation
    h = F.image_augmentation(x, (1,128,128), (0,0), 1, 1, 0, 1, 0, False, False, 0, False, 1, 0.5, False, 0)
    # Convolution -> 16,124,124
    h = PF.convolution(h, 16, (5,5), (0,0), name='Convolution')
    # ReLU
    h = F.relu(h, True)
    # MaxPooling -> 16,62,62
    h = F.max_pooling(h, (2,2), (2,2))
    # Convolution_2 -> 30,60,60
    h = PF.convolution(h, 30, (3,3), (0,0), name='Convolution_2')
    # MaxPooling_2 -> 30,30,30
    h = F.max_pooling(h, (2,2), (2,2))
    # Tanh_2
    h = F.tanh(h)
    # Affine -> 150
    h = PF.affine(h, (150,), name='Affine')
    # ReLU_2
    h = F.relu(h, True)
    # Affine_2 -> 2
    h = PF.affine(h, (2,), name='Affine_2')
    # Softmax
    h = F.softmax(h)
    return h
Ejemplo n.º 5
0
def Bahdanau_attention(query, values, out_features, scope):
    r"""Return the Bahdanau attention mechanism.

    Args:
        query (nn.Variable): A query of size (B, 1, C).
        values (nn.Variable): Values of size (B, T, C).
        out_features (int): The projected dimensionality.
        scope (str): Parameter scope.

    Returns:
        nn.Variable: The context vector.
        nn.Variable: The attention weight vector.
    """
    with nn.parameter_scope(scope):
        x = PF.affine(query, out_features, base_axis=2,
                      with_bias=False, name='query')
        y = PF.affine(values, out_features, base_axis=2,
                      with_bias=False, name='values')
        # scores of shape (B, T, 1)
        scores = PF.affine(F.tanh(x + y), 1, base_axis=2,
                           with_bias=False, name='scores')
        # attention_weights of shape (B, 1, T)
        attention_weights = F.softmax(
            scores, axis=1).reshape((query.shape[0], 1, -1))
        # context_vector shape after sum == (B, 1, C)
        context_vector = F.batch_matmul(attention_weights, values)

    return context_vector, attention_weights
Ejemplo n.º 6
0
def mlp_net(x, n_h, n_y, test=False):
    """
    Function for building multi-layer-perceptron with batch_normalization

    Args:
        x(`~nnabla.Variable`): N-D array 
        n_h(int): number of units in an intermediate layer
        n_y(int): number of classes
        test: operation type train=True, test=False

    Returns:
        ~nnabla.Variable: log(p(y|x))
    """

    h = x
    with nn.parameter_scope("fc1"):
        h = F.relu(PF.batch_normalization(PF.affine(h, n_h),
                                          batch_stat=not test),
                   inplace=True)
    with nn.parameter_scope("fc2"):
        h = F.relu(PF.batch_normalization(PF.affine(h, n_h),
                                          batch_stat=not test),
                   inplace=True)
    with nn.parameter_scope("fc3"):
        h = PF.affine(h, n_y)
    return h
Ejemplo n.º 7
0
def fpq_relu_lenet(image,
                   test=False,
                   n=8,
                   delta=2e-4,
                   name="fixed-point-relu-graph-ref"):
    with nn.parameter_scope(name):
        h = PF.convolution(image,
                           16, (5, 5), (1, 1),
                           with_bias=False,
                           name='conv1')
        h = PF.batch_normalization(h, batch_stat=not test, name='conv1-bn')
        h = F.max_pooling(h, (2, 2))
        h = F.fixed_point_quantize(h, n=n, delta=delta, sign=False)

        h = PF.convolution(h, 16, (5, 5), (1, 1), with_bias=True, name='conv2')
        h = PF.batch_normalization(h, batch_stat=not test, name='conv2-bn')
        h = F.max_pooling(h, (2, 2))
        h = F.fixed_point_quantize(h, n=n, delta=delta, sign=False)

        h = PF.affine(h, 10, with_bias=False, name='fc1')
        h = PF.batch_normalization(h, batch_stat=not test, name='fc1-bn')
        h = F.fixed_point_quantize(h, n=n, delta=delta, sign=False)

        pred = PF.affine(h, 10, with_bias=True, name='fc2')
    return pred
Ejemplo n.º 8
0
def discriminator(x, nf=64):
    '''
    :param x: input to the network
    :param nf: number of output channels
    :return:
    '''
    # [3,128, 128]
    h = F.leaky_relu(PF.convolution(x, nf, kernel=(3, 3), stride=(1, 1), pad=(1, 1), name='conv0_0', with_bias=True),
                     alpha=0.2)
    # [64, 128, 128]
    h = conv_bn_4(h, nf, "conv0_1", False)
    h = conv_bn_3(h, 2 * nf, "conv1_0", False)
    h = conv_bn_4(h, 2 * nf, "conv1_1", False)
    h = conv_bn_3(h, 4 * nf, "conv2_0", False)
    h = conv_bn_4(h, 4 * nf, "conv2_1", False)
    h = conv_bn_3(h, 8 * nf, "conv3_0", False)
    h = conv_bn_4(h, 8 * nf, "conv3_1", False)
    h = conv_bn_3(h, 8 * nf, "conv4_0", False)
    h = conv_bn_4(h, 8 * nf, "conv4_1", False)
    # [512, 4, 4]
    B, C, H, W = h.shape[0], h.shape[1], h.shape[2], h.shape[3]
    h = F.leaky_relu((PF.affine(h, 100, name="affine1")),
                     alpha=0.2)
    h = PF.affine(h, 1, name="affine2")
    return h
Ejemplo n.º 9
0
def q_function(obs, num_actions, scope):
    with nn.parameter_scope(scope):
        out = nature_head(obs)
        advantages = PF.affine(out, num_actions, name='advantage')
        value = PF.affine(out, 1, name='value')
        baseline = F.mean(advantages, axis=1, keepdims=True)
        return value + advantages - baseline
Ejemplo n.º 10
0
def q_mlp(s, num_actions, test=False):
    h = s
    for i, n in enumerate([64]):
        with nn.parameter_scope('fc%d' % (i + 1)):
            h = PF.affine(h, n, fix_parameters=test)
            h = F.relu(h)
    return PF.affine(h, num_actions, name='fc_fin', fix_parameters=test)
Ejemplo n.º 11
0
def get_loss(l1,
             l2,
             x,
             t,
             w_init,
             b_init,
             num_words,
             batch_size,
             state_size,
             dropout=False,
             dropout_rate=0.5,
             embed_name='embed',
             pred_name='pred'):
    e_list = [
        PF.embed(x_elm, num_words, state_size, name=embed_name)
        for x_elm in F.split(x, axis=1)
    ]
    t_list = F.split(t, axis=1)
    loss = 0
    for i, (e_t, t_t) in enumerate(zip(e_list, t_list)):
        if dropout:
            h1 = l1(F.dropout(e_t, dropout_rate), w_init, b_init)
            h2 = l2(F.dropout(h1, dropout_rate), w_init, b_init)
            y = PF.affine(F.dropout(h2, dropout_rate),
                          num_words,
                          name=pred_name)
        else:
            h1 = l1(e_t, w_init, b_init)
            h2 = l2(h1, w_init, b_init)
            y = PF.affine(h2, num_words, name=pred_name)
        t_t = F.reshape(t_t, [batch_size, 1])
        loss += F.mean(F.softmax_cross_entropy(y, t_t))
    loss /= float(i + 1)

    return loss
Ejemplo n.º 12
0
def small_multiple_inputs_outputs_resnet(images, test=False, w_bias=False):
    # Branches
    outputs = []
    for i, image in enumerate(images):
        h = image
        h /= 255.0
        h = PF.convolution(h, 16, kernel=(3, 3), pad=(1, 1),
                           with_bias=w_bias, name='first-mo-conv-{}'.format(i))
        h = PF.batch_normalization(
            h, axes=[1], batch_stat=not test, name='first-mo-bn-{}'.format(i))
        h = F.relu(h)
        h = F.max_pooling(h, (2, 2))
        outputs.append(h)
    # Merge branches
    z = sum(outputs)

    h = multiple_inputs_outputs_resblock(
        z, maps=16, w_bias=w_bias, test=test, name='mo-cb1')
    h = F.average_pooling(h, (2, 2))
    pred1 = PF.affine(h, 10, name='mo-fc1')

    h = multiple_inputs_outputs_resblock(
        z, maps=16, w_bias=w_bias, test=test, name='mo-cb2')
    h = F.average_pooling(h, (2, 2))
    pred2 = PF.affine(h, 10, name='mo-fc2')
    return [pred1, pred2]
Ejemplo n.º 13
0
def test_graph_unlink_backward(seed):
    rng = np.random.RandomState(seed)
    x0 = nn.Variable([2, 4], need_grad=True)
    x1 = nn.Variable([2, 4], need_grad=True)
    x0.d = rng.randn(*x0.shape)
    x1.d = rng.randn(*x1.shape)
    x0.grad.zero()
    x1.grad.zero()
    with nn.parameter_scope("fc0"):
        h0 = PF.affine(x0, 2)
        h0.need_grad = False
    with nn.parameter_scope("fc1"):
        h1 = PF.affine(x1, 2)
    h = h0 + h1
    with nn.parameter_scope("fc"):
        y1 = PF.affine(h, 1)
        y2 = PF.affine(h, 1)
    nn.forward_all([y1, y2])

    y1.backward(clear_buffer=True)
    assert np.all(x0.g == 0)
    assert not np.all(x1.g == 0)

    y2.backward(clear_buffer=True)
    assert np.all(x0.g == 0)
    assert not np.all(x1.g == 0)
Ejemplo n.º 14
0
def convolution(x):
    x = x.reshape([BATCH_SIZE, IMAGE_DEPTH, IMAGE_HEIGHT, IMAGE_WIDTH])
    with nn.parameter_scope("conv1"):
        output = PF.convolution(x, 16, (5, 5), stride=(2, 2), pad=(1, 1))
        output = F.relu(output)

    with nn.parameter_scope("conv2"):
        output = PF.convolution(output, 32, (3, 3), stride=(1, 1), pad=(1, 1))
        output = F.relu(output)

    with nn.parameter_scope("conv3"):
        output = PF.convolution(output, 64, (3, 3), stride=(1, 1), pad=(1, 1))
        output = F.relu(output)

    output = output.reshape([BATCH_SIZE, int(output.size / BATCH_SIZE)])

    with nn.parameter_scope("fc1"):
        output = PF.affine(output, 1024)
        output = F.relu(output)

    with nn.parameter_scope("fc2"):
        output = PF.affine(output, 256)
        output = F.relu(output)

    with nn.parameter_scope("softmax"):
        output = PF.affine(output, 10)
        output = F.softmax(output)

    return output
Ejemplo n.º 15
0
def construct_networks(args, images, model, num_class, test):
    try:
        pooled = model(images,
                       force_global_pooling=1,
                       use_up_to="pool",
                       training=not test)
    except:
        pooled = model(images, use_up_to="pool", training=not test)

    with nn.parameter_scope("finetuning"):
        if args.model == "VGG":
            pooled = F.relu(pooled)

            with nn.parameter_scope("additional_fc_1"):
                pooled = PF.affine(pooled, 4096)
            pooled = F.relu(pooled)
            if not test:
                pooled = F.dropout(pooled, 0.5)

            with nn.parameter_scope("additional_fc_2"):
                pooled = PF.affine(pooled, 4096)
            pooled = F.relu(pooled)
            if not test:
                pooled = F.dropout(pooled, 0.5)

        with nn.parameter_scope("last_fc"):
            pred = PF.affine(pooled, num_class)

    return pred
Ejemplo n.º 16
0
def bn_folding_lenet(image,
                     test=False,
                     channel_last=False,
                     name="bn-folding-graph-ref"):
    h = PF.convolution(image,
                       16, (5, 5), (1, 1),
                       with_bias=True,
                       channel_last=channel_last,
                       name='conv1')
    h = F.max_pooling(h, (2, 2), channel_last=channel_last)
    h = F.relu(h)

    h = PF.convolution(h,
                       16, (5, 5), (1, 1),
                       with_bias=True,
                       channel_last=channel_last,
                       name='conv2')
    h = F.max_pooling(h, (2, 2), channel_last=channel_last)
    h = F.relu(h)

    h = PF.affine(h, 10, with_bias=True, name='fc1')
    h = F.relu(h)

    pred = PF.affine(h, 10, with_bias=True, name='fc2')
    return pred
Ejemplo n.º 17
0
def bn_lenet(image, test=False, channel_last=False, w_bias=False):
    axes = get_channel_axes(channel_last)
    h = PF.convolution(image,
                       16, (5, 5), (1, 1),
                       with_bias=w_bias,
                       channel_last=channel_last,
                       name='conv1')
    h = PF.batch_normalization(h,
                               axes=axes,
                               batch_stat=not test,
                               name='conv1-bn')
    h = F.max_pooling(h, (2, 2))
    h = F.relu(h)

    h = PF.convolution(h,
                       16, (5, 5), (1, 1),
                       with_bias=True,
                       channel_last=channel_last,
                       name='conv2')
    h = PF.batch_normalization(h,
                               axes=axes,
                               batch_stat=not test,
                               name='conv2-bn')
    h = F.max_pooling(h, (2, 2), channel_last=channel_last)
    h = F.relu(h)

    h = PF.affine(h, 10, with_bias=True, name='fc1')
    h = PF.batch_normalization(h,
                               axes=axes,
                               batch_stat=not test,
                               name='fc1-bn')
    h = F.relu(h)

    pred = PF.affine(h, 10, with_bias=True, name='fc2')
    return pred
Ejemplo n.º 18
0
def cnn(x):
    """Unnecessarily Deep CNN.

    Args:
        x : Variable, shape (B, 1, 8, 8)

    Returns:
        y : Variable, shape (B, 10)
    """
    with nn.parameter_scope("cnn"):  # Parameter scope can be nested
        with nn.parameter_scope("conv1"):
            h = F.tanh(
                PF.batch_normalization(PF.convolution(x, 64, (3, 3), pad=(1, 1)))
            )
        for i in range(10):  # unnecessarily deep
            with nn.parameter_scope("conv{}".format(i + 2)):
                h = F.tanh(
                    PF.batch_normalization(PF.convolution(h, 128, (3, 3), pad=(1, 1)))
                )
        with nn.parameter_scope("conv_last"):
            h = F.tanh(
                PF.batch_normalization(PF.convolution(h, 512, (3, 3), pad=(1, 1)))
            )
            h = F.average_pooling(h, (2, 2))
        with nn.parameter_scope("fc"):
            h = F.tanh(PF.affine(h, 1024))
        with nn.parameter_scope("classifier"):
            y = PF.affine(h, 10)
    return y
Ejemplo n.º 19
0
def test_function_references():
    import nnabla as nn
    import nnabla.parametric_functions as PF

    v = nn.Variable.from_numpy_array(np.random.randn(2, 4))

    assert len(v.function_references) == 0

    h1 = PF.affine(v, 10, name="affine1")

    assert len(v.function_references) == 1
    assert h1.parent in v.function_references

    h2 = PF.affine(v, 10, name="affine2")

    assert len(v.function_references) == 2
    assert h1.parent in v.function_references
    assert h2.parent in v.function_references

    del h1

    assert len(v.function_references) == 1
    assert h2.parent in v.function_references

    del h2

    assert len(v.function_references) == 0
Ejemplo n.º 20
0
def v_network(obs, name):
    with nn.parameter_scope(name):
        out = PF.affine(obs, 256, name='fc1')
        out = F.relu(out)
        out = PF.affine(out, 256, name='fc2')
        out = F.relu(out)
        return PF.affine(out, 1, name='fc3')
Ejemplo n.º 21
0
def test_generate_tmp_nnp():
    nn.clear_parameters()
    batch_size = 16
    x0 = nn.Variable([batch_size, 100])
    x1 = nn.Variable([batch_size, 100])
    h1_0 = PF.affine(x0, 100, name='affine1_0')
    h1_1 = PF.affine(x1, 100, name='affine1_0')
    h1 = F.tanh(h1_0 + h1_1)
    h2 = F.tanh(PF.affine(h1, 50, name='affine2'))
    y0 = PF.affine(h2, 10, name='affiney_0')
    y1 = PF.affine(h2, 10, name='affiney_1')

    contents = {
        'networks': [{
            'name': 'net1',
            'batch_size': batch_size,
            'outputs': {
                'y0': y0,
                'y1': y1
            },
            'names': {
                'x0': x0,
                'x1': x1
            }
        }],
        'executors': [{
            'name': 'runtime',
            'network': 'net1',
            'data': ['x0', 'x1'],
            'output': ['y0', 'y1']
        }]
    }
    nnabla.utils.save.save('tmp.nnp', contents)
Ejemplo n.º 22
0
    def gated_conv(self,
                   x,
                   kernel_shape,
                   h=None,
                   mask_type='',
                   gated=True,
                   payload=None,
                   return_payload=False,
                   scope_name='gated_conv'):
        pad_dim_0 = (kernel_shape[0] - 1) / 2
        pad_dim_1 = (kernel_shape[1] - 1) / 2
        if mask_type == '':
            mask_type = self.mask_type_B
        with nn.parameter_scope(scope_name):
            if gated:
                out_f = PF.convolution(x,
                                       self.num_features,
                                       kernel_shape,
                                       pad=(pad_dim_0, pad_dim_1),
                                       apply_w=mask_type,
                                       name='conv_f')
                out_g = PF.convolution(x,
                                       self.num_features,
                                       kernel_shape,
                                       pad=(pad_dim_0, pad_dim_1),
                                       apply_w=mask_type,
                                       name='conv_g')
                if isinstance(payload, nn.Variable):
                    out_f += payload[:, :self.num_features, :, :]
                    out_g += payload[:, self.num_features:, :, :]
                if self.conditional:
                    h_out_f = PF.affine(h, self.num_features, name='h_out_f')
                    h_out_f = h_out_f.reshape(
                        (h_out_f.shape[0], h_out_f.shape[1], 1, 1))
                    h_out_g = PF.affine(h, self.num_features, name='h_out_g')
                    h_out_g = h_out_g.reshape(
                        (h_out_g.shape[0], h_out_g.shape[1], 1, 1))
                    out = F.tanh(out_f + h_out_f) * F.sigmoid(out_g + h_out_g)
                else:
                    out = F.tanh(out_f) * F.sigmoid(out_g)
                if return_payload:
                    payload = PF.convolution(F.concatenate(out_f,
                                                           out_g,
                                                           axis=1),
                                             2 * self.num_features, (1, 1),
                                             name='conv_1x1')
                    payload = F.relu(payload)
                    return out, payload

            else:
                out = PF.convolution(x,
                                     self.num_features,
                                     kernel_shape,
                                     stride=(1, 1),
                                     pad=(pad_dim_0, pad_dim_1),
                                     apply_w=mask_type)
                out = F.relu(out)

        return out
Ejemplo n.º 23
0
def mlp_module(x0, x1):
    h1_0 = PF.affine(x0, 100, name='affine1_0')
    h1_1 = PF.affine(x1, 100, name='affine1_0')
    h1 = F.tanh(h1_0 + h1_1)
    h2 = F.tanh(PF.affine(h1, 50, name='affine2'))
    y0 = PF.affine(h2, 10, name='affiney_0')
    y1 = PF.affine(h2, 10, name='affiney_1')
    return y0, y1
Ejemplo n.º 24
0
def q_network(obs, action, name):
    with nn.parameter_scope(name):
        out = F.concatenate(obs, action, axis=1)
        out = PF.affine(out, 256, name='fc1')
        out = F.relu(out)
        out = PF.affine(out, 256, name='fc2')
        out = F.relu(out)
        return PF.affine(out, 1, name='fc3')
def mlp(image, test=False):
    image /= 255.0
    c1 = F.relu(PF.convolution(image, 32, (3, 3), name='conv1'), inplace=True)
    c2 = F.relu(PF.convolution(c1, 128, (3, 3), name='conv2'), inplace=True)
    c3 = F.relu(PF.convolution(c2, 256, (3, 3), name='conv3'), inplace=True)
    c4 = F.relu(PF.affine(c3, 512, name='fc3'), inplace=True)
    c5 = PF.affine(c3, 10, name='fc4')
    return F.softmax(c5)
Ejemplo n.º 26
0
def policy_network(obs, action_size, name):
    with nn.parameter_scope(name):
        out = PF.affine(obs, 256, name='fc1')
        out = F.relu(out)
        out = PF.affine(out, 256, name='fc2')
        out = F.relu(out)
        out = PF.affine(out, action_size, name='fc3')
    return F.tanh(out)
Ejemplo n.º 27
0
def cnn(x, n_class):
    c1 = PF.convolution(x, 16, (5, 5), name='conv1')
    c1 = F.relu(F.max_pooling(c1, (2, 2)), inplace=True)
    c2 = PF.convolution(c1, 16, (5, 5), name='conv2')
    c2 = F.relu(F.max_pooling(c2, (2, 2)), inplace=True)
    c3 = F.relu(PF.affine(c2, 50, name='fc3'), inplace=True)
    c4 = PF.affine(c3, n_class, name='fc4')

    return c4
Ejemplo n.º 28
0
def mlp(x, maps, num_res=4, num_layers=2, name="mlp"):
    h = x
    with nn.parameter_scope(name):
        h = PF.affine(h, maps, name="affine-first")
        h = F.relu(h, True)
        h = PF.affine(h, maps, name="affine-mid")
        h = F.relu(h, True)
        h = PF.affine(h, 2 * maps * num_res * num_layers, name="affine-last")
    return h
Ejemplo n.º 29
0
def q_network(obs, action, name):
    with nn.parameter_scope(name):
        out = PF.affine(obs, 64, name='fc1')
        out = F.tanh(out)
        out = F.concatenate(out, action, axis=1)
        out = PF.affine(out, 64, name='fc2')
        out = F.tanh(out)
        out = PF.affine(out, 1, name='fc3')
    return out
Ejemplo n.º 30
0
def network_LSTM(x, D, C, InputShape, HiddenSize, test=False):
    # Input_2:x -> 687
    # Delya_in:D -> 100
    # Cell_in:C -> 100

    # Concatenate -> 787
    h = F.concatenate(D, x, axis=1)

    # Affine -> 100
    h1 = PF.affine(h, HiddenSize, name='Affine')

    # InputGate -> 100
    h2 = PF.affine(h, HiddenSize, name='InputGate')

    # OutputGate -> 100
    h3 = PF.affine(h, HiddenSize, name='OutputGate')

    # ForgetGate -> 100
    h4 = PF.affine(h, HiddenSize, name='ForgetGate')
    # Sigmoid
    h1 = F.sigmoid(h1)
    # Sigmoid_2
    h2 = F.sigmoid(h2)

    # Sigmoid_3
    h3 = F.sigmoid(h3)
    # Sigmoid_4
    h4 = F.sigmoid(h4)

    # Mul2 -> 100
    h1 = F.mul2(h1, h2)

    # Mul2_3 -> 100
    h4 = F.mul2(h4, C)

    # Add2 -> 100
    h1 = F.add2(h1, h4, True)

    # Tanh
    h5 = F.tanh(h1)

    # Cell_out
    h6 = F.identity(h1)

    # Mul2_2 -> 100
    h5 = F.mul2(h5, h3)
    # Dropout
    if not test:
        h5 = F.dropout(h5)

    # Output
    h5 = F.identity(h5)

    # Concatenate_2 -> 200
    h5 = F.concatenate(h5, h6, axis=1)
    return h5
Ejemplo n.º 31
0
def policy_network(obs, action_size, name):
    with nn.parameter_scope(name):
        out = PF.affine(obs, 256, name='fc1')
        out = F.relu(out)
        out = PF.affine(out, 256, name='fc2')
        out = F.relu(out)
        mean = PF.affine(out, action_size, name='mean')
        logstd = PF.affine(out, action_size, name='logstd')
        clipped_logstd = F.clip_by_value(logstd, -20, 2)
    return Normal(mean, F.exp(clipped_logstd))
Ejemplo n.º 32
0
def mnist_lenet_feature(image, test=False):
    """
    Construct LeNet for MNIST.
    """
    c1 = F.elu(PF.convolution(image, 20, (5, 5), name='conv1'))
    c1 = F.average_pooling(c1, (2, 2))
    c2 = F.elu(PF.convolution(c1, 50, (5, 5), name='conv2'))
    c2 = F.average_pooling(c2, (2, 2))
    c3 = F.elu(PF.affine(c2, 500, name='fc3'))
    c4 = PF.affine(c3, 10, name='fc4')
    c5 = PF.affine(c4, 2, name='fc_embed')
    return c5
Ejemplo n.º 33
0
def discriminator(x, maxh=256, test=False, output_hidden=False):
    """
    Building discriminator network which maps a (B, 1, 28, 28) input to
    a (B, 1).
    """
    # Define shortcut functions
    def bn(xx):
        # Batch normalization
        return PF.batch_normalization(xx, batch_stat=not test)

    def downsample2(xx, c):
        return PF.convolution(xx, c, (3, 3), pad=(1, 1), stride=(2, 2), with_bias=False)

    assert maxh / 8 > 0
    with nn.parameter_scope("dis"):
        # (1, 28, 28) --> (32, 16, 16)
        with nn.parameter_scope("conv1"):
            c1 = F.elu(bn(PF.convolution(x, maxh / 8,
                                         (3, 3), pad=(3, 3), stride=(2, 2), with_bias=False)))
        # (32, 16, 16) --> (64, 8, 8)
        with nn.parameter_scope("conv2"):
            c2 = F.elu(bn(downsample2(c1, maxh / 4)))
        # (64, 8, 8) --> (128, 4, 4)
        with nn.parameter_scope("conv3"):
            c3 = F.elu(bn(downsample2(c2, maxh / 2)))
        # (128, 4, 4) --> (256, 4, 4)
        with nn.parameter_scope("conv4"):
            c4 = bn(PF.convolution(c3, maxh, (3, 3),
                                   pad=(1, 1), with_bias=False))
        # (256, 4, 4) --> (1,)
        with nn.parameter_scope("fc1"):
            f = PF.affine(c4, 1)
    if output_hidden:
        return f, [c1, c2, c3, c4]
    return f
Ejemplo n.º 34
0
def cifar10_resnet23_prediction(ctx, image, test=False):
    """
    Construct ResNet 23
    """
    # Residual Unit
    def res_unit(x, scope_name, dn=False, test=False):
        C = x.shape[1]
        with nn.parameter_scope(scope_name):

            # Conv -> BN -> Relu
            with nn.parameter_scope("conv1"):
                h = PF.convolution(x, C / 2, kernel=(1, 1), pad=(0, 0),
                                   with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
                h = F.relu(h)
            # Conv -> BN -> Relu
            with nn.parameter_scope("conv2"):
                h = PF.convolution(h, C / 2, kernel=(3, 3), pad=(1, 1),
                                   with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
                h = F.relu(h)
            # Conv -> BN
            with nn.parameter_scope("conv3"):
                h = PF.convolution(h, C, kernel=(1, 1), pad=(0, 0),
                                   with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
            # Residual -> Relu
            h = F.relu(h + x)

            # Maxpooling
            if dn:
                h = F.max_pooling(h, kernel=(2, 2), stride=(2, 2))
            return h

    # Random generator for using the same init parameters in all devices
    nmaps = 64
    ncls = 10

    # Conv -> BN -> Relu
    with nn.context_scope(ctx):
        with nn.parameter_scope("conv1"):
            h = PF.convolution(image, nmaps, kernel=(3, 3), pad=(1, 1),
                               with_bias=False)
            h = PF.batch_normalization(h, batch_stat=not test)
            h = F.relu(h)

        h = res_unit(h, "conv2", False)    # -> 32x32
        h = res_unit(h, "conv3", True)     # -> 16x16
        h = bn_dropout(h, "bn_dropout1", test)
        h = res_unit(h, "conv4", False)    # -> 16x16
        h = res_unit(h, "conv5", True)     # -> 8x8
        h = bn_dropout(h, "bn_dropout2", test)
        h = res_unit(h, "conv6", False)    # -> 8x8
        h = res_unit(h, "conv7", True)     # -> 4x4
        h = bn_dropout(h, "bn_dropout3",  test)
        h = res_unit(h, "conv8", False)    # -> 4x4
        h = F.average_pooling(h, kernel=(4, 4))  # -> 1x1
        pred = PF.affine(h, ncls)

    return pred
Ejemplo n.º 35
0
def resnet_model(ctx, x, inmaps=64, act=F.relu, test=False):
    # Conv -> BN -> Relu
    with nn.context_scope(ctx):
        with nn.parameter_scope("conv1"):
            h = PF.convolution(x, inmaps, kernel=(3, 3), pad=(1, 1), with_bias=False)
            h = PF.batch_normalization(h, decay_rate=0.9, batch_stat=not test)
            h = act(h)
        
        h = res_unit(h, "conv2", act, False) # -> 32x32
        h = res_unit(h, "conv3", act, True)  # -> 16x16
        with nn.parameter_scope("bn0"):
            h = PF.batch_normalization(h, batch_stat=not test)
        if not test:
            h = F.dropout(h)
        h = res_unit(h, "conv4", act, False) # -> 16x16
        h = res_unit(h, "conv5", act, True)  # -> 8x8
        with nn.parameter_scope("bn1"):
            h = PF.batch_normalization(h, batch_stat=not test)
        if not test:
            h = F.dropout(h)
        h = res_unit(h, "conv6", act, False) # -> 8x8
        h = res_unit(h, "conv7", act, True)  # -> 4x4
        with nn.parameter_scope("bn2"):
            h = PF.batch_normalization(h, batch_stat=not test)
        if not test:
            h = F.dropout(h)
        h = res_unit(h, "conv8", act, False) # -> 4x4
        h = F.average_pooling(h, kernel=(4, 4))  # -> 1x1
        
        pred = PF.affine(h, 10)
    return pred
Ejemplo n.º 36
0
def test_graph_unlink_backward(seed):
    rng = np.random.RandomState(seed)
    x0 = nn.Variable([2, 4], need_grad=True)
    x1 = nn.Variable([2, 4], need_grad=True)
    x0.d = rng.randn(*x0.shape)
    x1.d = rng.randn(*x1.shape)
    x0.grad.zero()
    x1.grad.zero()
    with nn.auto_forward():
        with nn.parameter_scope("fc0"):
            h0 = PF.affine(x0, 2)
        with nn.parameter_scope("fc1"):
            h1 = PF.affine(x1, 2)
        h0.need_grad = False
        h = h0 + h1
        with nn.parameter_scope("fc"):
            y = PF.affine(h, 1)
    y.backward(clear_buffer=True)
    assert np.all(x0.g == 0)
    assert not np.all(x1.g == 0)
Ejemplo n.º 37
0
def test_graph_model(model, seed):
    np.random.seed(313)
    rng = np.random.RandomState(seed)
    x = nn.Variable([2, 3, 4, 4], need_grad=True)
    t = nn.Variable([2, 1])
    x.d = rng.randn(*x.shape)
    t.d = rng.randint(0, 5, size=t.shape)

    nn.set_default_context(nn.Context())

    # Forwardprop by definintion
    nn.clear_parameters()
    if model == "mlp":
        with nn.parameter_scope('fc1'):
            z = PF.affine(x, 3)
        z2 = F.relu(z, inplace=True)
        with nn.parameter_scope('fc2'):
            z3 = PF.affine(z2, 5)
    elif model == "recurrent":
        with nn.parameter_scope('fc1'):
            z = PF.affine(x, 3)
            z2 = F.relu(z, inplace=True)
        h = z2
        for _ in range(2):
            with nn.parameter_scope('fc2'):
                h = PF.affine(h, 3)
                h = F.relu(h, inplace=True)
        with nn.parameter_scope('fc3'):
            z3 = PF.affine(h, 5)
    elif model == "convolution":
        with nn.parameter_scope('conv1'):
            z = PF.convolution(x, 3, (2, 2))
            z2 = F.relu(z, inplace=True)
        with nn.parameter_scope('fc2'):
            z3 = PF.affine(z2, 5)
    else:
        raise ValueError()
    l = F.softmax_cross_entropy(z3, t, 1)
    L = F.mean(l)

    # Forwardprop
    L.forward(clear_no_need_grad=True)

    # Backprop
    # Diff should be initialized since they are always accumulated
    x.grad.zero()
    L.backward(clear_buffer=True)
    x.g = rng.randn(*x.shape)
    parameters = nn.get_parameters()
    for param in parameters.values():
        param.grad.zero()
    inputs = [x] + list(parameters.values())

    from nbla_test_utils import \
        compute_analytical_and_numerical_grad_graph as grads
    agrad, ngrad = grads(L, inputs, 1e-3)
    assert np.allclose(ngrad, agrad, atol=1.05e-2)
Ejemplo n.º 38
0
Archivo: vat.py Proyecto: zwsong/nnabla
def mlp_net(x, n_h, n_y, test=False):
    """
    Function for building multi-layer-perceptron with batch_normalization

    Args:
        x(`~nnabla.Variable`): N-D array 
        n_h(int): number of units in an intermediate layer
        n_y(int): number of classes
        test: operation type train=True, test=False

    Returns:
        ~nnabla.Variable: log(p(y|x))
    """

    h = x
    with nn.parameter_scope("fc1"):
        h = F.relu(PF.batch_normalization(
            PF.affine(h, n_h), batch_stat=not test), inplace=True)
    with nn.parameter_scope("fc2"):
        h = F.relu(PF.batch_normalization(
            PF.affine(h, n_h), batch_stat=not test), inplace=True)
    with nn.parameter_scope("fc3"):
        h = PF.affine(h, n_y)
    return h
Ejemplo n.º 39
0
def test_graph_clear_buffer(seed):
    np.random.seed(313)
    rng = np.random.RandomState(seed)
    x = nn.Variable([2, 3, 4, 4])
    t = nn.Variable([2, 1])
    x.d = rng.randn(*x.shape)
    t.d = rng.randint(0, 5, size=t.shape)

    # Network definition
    nn.set_default_context(nn.Context())
    nn.clear_parameters()
    x1 = x + 1
    x2 = x1 - 1
    with nn.parameter_scope('conv1'):
        z = PF.convolution(x2, 3, (2, 2))
        z2 = F.relu(z, inplace=True)
    with nn.parameter_scope('fc2'):
        z3 = PF.affine(z2, 5)
    l = F.softmax_cross_entropy(z3, t, 1)
    L = F.mean(l)

    # Forwardprop
    import tempfile
    import os
    tmpd = tempfile.mkdtemp()
    nn.save_parameters(os.path.join(tmpd, 'parameter.h5'))
    first = False
    for cnng in [False, True]:
        for cb in [False, True]:
            _ = nn.load_parameters(os.path.join(tmpd, 'parameter.h5'))
            for v in nn.get_parameters().values():
                v.grad.zero()
            L.forward(clear_no_need_grad=cnng)
            L.backward(clear_buffer=cb)
            if not first:
                first = True
                g = list(nn.get_parameters().values())[0].g.copy()
            else:
                g2 = list(nn.get_parameters().values())[0].g.copy()
                assert np.all(g == g2)
Ejemplo n.º 40
0
def mnist_resnet_prediction(image, test=False):
    """
    Construct ResNet for MNIST.
    """
    image /= 255.0

    def bn(x):
        return PF.batch_normalization(x, batch_stat=not test)

    def res_unit(x, scope):
        C = x.shape[1]
        with nn.parameter_scope(scope):
            with nn.parameter_scope('conv1'):
                h = F.elu(bn(PF.convolution(x, C / 2, (1, 1), with_bias=False)))
            with nn.parameter_scope('conv2'):
                h = F.elu(
                    bn(PF.convolution(h, C / 2, (3, 3), pad=(1, 1), with_bias=False)))
            with nn.parameter_scope('conv3'):
                h = bn(PF.convolution(h, C, (1, 1), with_bias=False))
        return F.elu(F.add2(h, x, inplace=True))
    # Conv1 --> 64 x 32 x 32
    with nn.parameter_scope("conv1"):
        c1 = F.elu(
            bn(PF.convolution(image, 64, (3, 3), pad=(3, 3), with_bias=False)))
    # Conv2 --> 64 x 16 x 16
    c2 = F.max_pooling(res_unit(c1, "conv2"), (2, 2))
    # Conv3 --> 64 x 8 x 8
    c3 = F.max_pooling(res_unit(c2, "conv3"), (2, 2))
    # Conv4 --> 64 x 8 x 8
    c4 = res_unit(c3, "conv4")
    # Conv5 --> 64 x 4 x 4
    c5 = F.max_pooling(res_unit(c4, "conv5"), (2, 2))
    # Conv5 --> 64 x 4 x 4
    c6 = res_unit(c5, "conv6")
    pl = F.average_pooling(c6, (4, 4))
    with nn.parameter_scope("classifier"):
        y = PF.affine(pl, 10)
    return y
def cifar100_resnet23_prediction(image,
                                 ctx, test=False):
    """
    Construct ResNet 23
    """
    # Residual Unit
    def res_unit(x, scope_name, rng, dn=False, test=False):
        C = x.shape[1]
        with nn.parameter_scope(scope_name):

            # Conv -> BN -> Relu
            with nn.parameter_scope("conv1"):
                w_init = UniformInitializer(
                    calc_uniform_lim_glorot(C, C / 2, kernel=(1, 1)),
                    rng=rng)
                h = PF.convolution(x, C / 2, kernel=(1, 1), pad=(0, 0),
                                   w_init=w_init, with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
                h = F.relu(h)
            # Conv -> BN -> Relu
            with nn.parameter_scope("conv2"):
                w_init = UniformInitializer(
                    calc_uniform_lim_glorot(C / 2, C / 2, kernel=(3, 3)),
                    rng=rng)
                h = PF.convolution(h, C / 2, kernel=(3, 3), pad=(1, 1),
                                   w_init=w_init, with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
                h = F.relu(h)
            # Conv -> BN
            with nn.parameter_scope("conv3"):
                w_init = UniformInitializer(
                    calc_uniform_lim_glorot(C / 2, C, kernel=(1, 1)),
                    rng=rng)
                h = PF.convolution(h, C, kernel=(1, 1), pad=(0, 0),
                                   w_init=w_init, with_bias=False)
                h = PF.batch_normalization(h, batch_stat=not test)
            # Residual -> Relu
            h = F.relu(h + x)

            # Maxpooling
            if dn:
                h = F.max_pooling(h, kernel=(2, 2), stride=(2, 2))

            return h

    # Random generator for using the same init parameters in all devices
    rng = np.random.RandomState(0)
    nmaps = 384
    ncls = 100

    # Conv -> BN -> Relu
    with nn.context_scope(ctx):
        with nn.parameter_scope("conv1"):
            # Preprocess
            if not test:

                image = F.image_augmentation(image, contrast=1.0,
                                             angle=0.25,
                                             flip_lr=True)
                image.need_grad = False

            w_init = UniformInitializer(
                calc_uniform_lim_glorot(3, nmaps, kernel=(3, 3)),
                rng=rng)
            h = PF.convolution(image, nmaps, kernel=(3, 3), pad=(1, 1),
                               w_init=w_init, with_bias=False)
            h = PF.batch_normalization(h, batch_stat=not test)
            h = F.relu(h)

        h = res_unit(h, "conv2", rng, False)    # -> 32x32
        h = res_unit(h, "conv3", rng, True)     # -> 16x16
        h = res_unit(h, "conv4", rng, False)    # -> 16x16
        h = res_unit(h, "conv5", rng, True)     # -> 8x8
        h = res_unit(h, "conv6", rng, False)    # -> 8x8
        h = res_unit(h, "conv7", rng, True)     # -> 4x4
        h = res_unit(h, "conv8", rng, False)    # -> 4x4
        h = F.average_pooling(h, kernel=(4, 4))  # -> 1x1

        w_init = UniformInitializer(
            calc_uniform_lim_glorot(int(np.prod(h.shape[1:])), ncls, kernel=(1, 1)), rng=rng)
        pred = PF.affine(h, ncls, w_init=w_init)

    return pred
def test_data_parallel_communicator():
    try:
        import nnabla_ext
        import nnabla_ext.cuda
        from nnabla.contrib.context import extension_context

    except:
        pytest.skip("DataParallelCommunicator are only supported in CUDA now.")

    n_devices = nnabla_ext.cuda.init.get_device_count()
    if n_devices < 2:
        pytest.skip("Number of cuda devices is less than 2.")

    # Contexts and Computation Graph
    extension_module = "cuda"
    ctxs = []
    for d in range(n_devices):
        ctx = extension_context(extension_module,
                                device_id="{}".format(d))
        ctxs.append(ctx)
        with nn.context_scope(ctx):
            x_data = np.random.rand(4, 5)
            x = nn.Variable(x_data.shape)
            with nn.parameter_scope("gpu{}".format(d)):
                with nn.parameter_scope("affine1"):
                    z = PF.affine(x, 6)
                with nn.parameter_scope("affine2"):
                    y = PF.affine(z, 5)

    # Init w.g
    grads = []
    for d in range(n_devices):
        with nn.parameter_scope("gpu{}".format(d)):
            params = nn.get_parameters()
            grad = []
            for i, elm in enumerate(params.items()):
                k, v = elm
                grad_ = np.random.randn(*v.shape)
                v.g = grad_
                v.grad.cast(np.float32, ctxs[d])
                grad.append(grad_)
            grads.append(grad)

    # Reference
    ref_grads = []
    with nn.parameter_scope("gpu{}".format(d)):
        params = nn.get_parameters()
        for i in range(len(params)):
            ave_grad = 0
            for d in range(n_devices):
                ave_grad += grads[d][i]
            ave_grad /= n_devices
            ref_grads.append(ave_grad)

    # Communicator
    try:
        comm = C.DataParalellCommunicator(ctxs[0])
    except:
        pytest.skip(
            "DataParalellCommunicator is not supported in cpu or not linux platform.")

    for d in range(n_devices):
        with nn.parameter_scope("gpu{}".format(d)):
            comm.add_context_and_parameters(
                (ctxs[d], nn.get_parameters()))
    comm.init()
    comm.allreduce(division=True)

    # Check
    atol = 1e-6
    for d in range(n_devices):
        with nn.parameter_scope("gpu{}".format(d)):
            params = nn.get_parameters()
            for i, elm in enumerate(params.items()):
                k, v = elm
                assert np.allclose(ref_grads[i], v.g, atol=atol)