예제 #1
0
def test_nnabla_models_fixed_size(model_class, up_to_list, batch_size,
                                  training, seed):
    model_module = importlib.import_module("nnabla.models.imagenet")
    nn.clear_parameters()
    rng = np.random.RandomState(seed)
    model = getattr(model_module, model_class)()

    # Determine input shape and create input variable
    input_shape = list(model.input_shape)
    input_shape = tuple(input_shape)
    x = nn.Variable.from_numpy_array(
        rng.randint(0, 256, size=(batch_size, ) + input_shape))

    # Test cases for all intermediate outputs
    for use_up_to in up_to_list:
        returns_net = False

        def _execute():
            y = model(x, training=training, use_up_to=use_up_to)
            y.forward()

        _execute()
        net = model(x,
                    training=training,
                    use_up_to=use_up_to,
                    returns_net=True)
        assert isinstance(net, NnpNetwork)
        assert len(net.inputs.values()) == 1
        assert len(net.outputs.values()) == 1
        y = list(net.outputs.values())[0]
        if training:
            assert _check_trainable_parameters(y)
        else:
            assert not _check_trainable_parameters(y)
예제 #2
0
    def __init__(self, args):
        class Config:
            pass
        self._config = Config()
        self._config.context = 'cpu'
        self._config.device_id = 0
        self._config.columns_size = 2
        self._config.x_length = args.x_length
        self._config.mlp_model_params_path = args.mlp_model_params_path
        self._config.labels_path = args.labels_path

        if not os.path.isfile(self._config.mlp_model_params_path):
            logger.error("Model params path {} is not found.".format(self._config.mlp_model_params_path))
            sys.exit(-1)
        else:
            logger.info("Path of the model parameters file is {}.".format(self._config.mlp_model_params_path))
        if not os.path.isfile(self._config.labels_path):
            logger.error("Labels path {} is not found.".format(self._config.labels_path))
            sys.exit(-1)
        else:
            logger.info("Path of the labels file is {}.".format(self._config.labels_path))

        seed(0)
        logger.info("Running in %s" % self._config.context)
        self._ctx = get_extension_context(self._config.context, device_id = self._config.device_id)
        nn.set_default_context(self._ctx)
        nn.clear_parameters()
        self._mlp = MLP(self._config)
        self._labels = None
        with open(self._config.labels_path) as f:
            self._labels = f.readlines()
        self._points_buf = pointsbuffer.PointsBuffer()
예제 #3
0
def compute(grid_size, D, L, initial_sphere_radius):
    nn.clear_parameters()

    # 2D case
    x = np.linspace(-1, 1, grid_size)
    y = np.linspace(-1, 1, grid_size)
    xx, yy = np.meshgrid(x, y)
    xy = np.asarray([xx.flatten(), yy.flatten()]).T

    x = nn.Variable.from_numpy_array(xy)
    y = implicit_network(x,
                         D,
                         feature_size=256,
                         L=L,
                         initial_sphere_radius=initial_sphere_radius)
    y = y[:, 0]
    y.forward()

    # Plot
    plt.contourf(xx, yy, y.d.reshape(grid_size, grid_size))
    plt.colorbar()
    plt.contourf(xx,
                 yy,
                 y.d.reshape(grid_size, grid_size),
                 colors=('r', ),
                 levels=[-0.01, 0, 0.01])
    plt.title(f"Contour of SDF(x, y) = 0 (D={D:03d} and L={L:03d})")
    plt.savefig(f"sdf_contour_D{D:03d}_L{L:03d}.png")
    plt.clf()

    # error
    gt = np.sqrt(np.sum(xy**2, axis=1)) - 1
    mae = np.sum(np.abs(gt - y.d.flatten())) / len(gt)

    return mae
예제 #4
0
def test_istft(window_size, stride, fft_size, window_type, center):
    # clear all previous STFT conv/deconv kernels
    nn.clear_parameters()

    # Make sure that iSTFT(STFT(x)) = x
    x = np.random.randn(1, window_size * 10)

    nx = nn.Variable.from_numpy_array(x)
    nyr, nyi = F.stft(nx,
                      window_size=window_size,
                      stride=stride,
                      fft_size=fft_size,
                      window_type=window_type,
                      center=center)
    nz = F.istft(nyr,
                 nyi,
                 window_size=window_size,
                 stride=stride,
                 fft_size=fft_size,
                 window_type=window_type,
                 center=center)
    nz.forward()

    invalid = window_size - stride
    assert (np.allclose(nx.d[:, invalid:-invalid],
                        nz.d[:, invalid:-invalid],
                        atol=1e-5,
                        rtol=1e-5))
예제 #5
0
def test_stft(window_size, stride, fft_size, window_type):
    # clear all previous STFT conv/deconv kernels
    nn.clear_parameters()

    # Compare to `scipy.signal.stft` - only done if SciPy available
    x = np.random.randn(1, window_size * 10)

    nx = nn.Variable.from_numpy_array(x)
    nyr, nyi = F.stft(nx,
                      window_size=window_size,
                      stride=stride,
                      fft_size=fft_size,
                      window_type=window_type,
                      center=False)
    nn.forward_all([nyr, nyi])

    stft_nnabla = nyr.d + 1j * nyi.d
    _f, _t, stft_scipy = sig.stft(x,
                                  window=window_type,
                                  nperseg=window_size,
                                  noverlap=window_size - stride,
                                  nfft=fft_size,
                                  boundary=None,
                                  padded=False)

    # scipy does a different scaling - take care here
    stft_nnabla /= fft_size // 2

    assert (np.allclose(stft_nnabla, stft_scipy, atol=1e-5, rtol=1e-5))
예제 #6
0
 def __init__(self,
              func,
              inspecs,
              func_args,
              func_kwargs,
              ext,
              ext_kwargs,
              min_run=1,
              min_time=1.0):
     nn.clear_parameters()
     self.inputs = None
     self.outputs = None
     self.func_ins = None
     self.setup_stat = None
     self.forward_stat = None
     self.backward_stat = None
     self.func = func
     self.module = func.__module__
     self.inspecs = inspecs
     self.inputs_f = create_inputs(inspecs)
     self.func_args = func_args
     self.func_kwargs = func_kwargs
     self.ext = ext
     self.ext_kwargs = ext_kwargs
     self.mod_ext = importlib.import_module('.' + ext, 'nnabla_ext')
     self.ctx = self.mod_ext.context(**ext_kwargs)
     self.min_run = min_run
     self.min_time = min_time
예제 #7
0
def main():
    batch_size, m, h, w = 4, 3, 32, 32
    extension_module = "cpu"
    device_id = 0
    ctx = extension_context(extension_module, device_id=device_id)

    x_l_data = np.random.randn(batch_size, m, h, w)
    y_l_data = (np.random.rand(batch_size, 1) * 10).astype(np.int32)
    x_l = nn.Variable(x_l_data.shape)
    y_l = nn.Variable(y_l_data.shape)
    x_l.d = x_l_data
    y_l.d = y_l_data

    # CNN
    print("# CNN")
    pred = cnn_model_003(ctx, x_l)
    s = 0
    for n, v in nn.get_parameters().iteritems():
        n_params = np.prod(v.shape)
        print(n, n_params)
        s += n_params
    print("n_params={}".format(s))
    nn.clear_parameters()

    # Resnet
    print("# Resnet")
    inmaps = 256
    pred = resnet_model(ctx, x_l, inmaps=inmaps)
    s = 0
    for n, v in nn.get_parameters().iteritems():
        n_params = np.prod(v.shape)
        print(n, n_params)
        s += n_params
    print("n_params={}".format(s))
    nn.clear_parameters()
예제 #8
0
def main():
    batch_size, m, h, w = 4, 3, 32, 32
    extension_module = "cpu"
    device_id = 0
    ctx = extension_context(extension_module, device_id=device_id)

    x_l_data = np.random.randn(batch_size, m, h, w)
    y_l_data = (np.random.rand(batch_size, 1) * 10).astype(np.int32)
    x_l = nn.Variable(x_l_data.shape)
    y_l = nn.Variable(y_l_data.shape)
    x_l.d = x_l_data
    y_l.d = y_l_data

    # CNN
    print("# CNN")
    pred = cnn_model_003(ctx, x_l)
    s = 0
    for n, v in nn.get_parameters().iteritems():
        n_params = np.prod(v.shape)
        print(n, n_params)
        s += n_params
    print("n_params={}".format(s))
    nn.clear_parameters()
    
    # Resnet
    print("# Resnet")
    inmaps = 256
    pred = resnet_model(ctx, x_l, inmaps=inmaps)
    s = 0
    for n, v in nn.get_parameters().iteritems():
        n_params = np.prod(v.shape)
        print(n, n_params)
        s += n_params
    print("n_params={}".format(s))
    nn.clear_parameters()
예제 #9
0
def test_nnabla_models_resnet(num_layers, image_size, batch_size, training, seed):
    from nnabla.models.imagenet import ResNet
    nn.clear_parameters()
    rng = np.random.RandomState(seed)
    model = ResNet(num_layers)
    x = nn.Variable.from_numpy_array(rng.randint(
        0, 256, size=(batch_size, 3, image_size, image_size)))
    for use_up_to in ('classifier', 'pool', 'lastconv', 'lastconv+relu'):
        check_global_pooling = True
        force_global_pooling = False
        returns_net = False

        def _execute():
            y = model(x, training=training, use_up_to=use_up_to, force_global_pooling=force_global_pooling,
                      check_global_pooling=check_global_pooling)
            y.forward()

        if image_size == 448 and use_up_to in ('classifier', 'pool'):
            with pytest.raises(ValueError):
                _execute()
            if use_up_to == 'pool':
                check_global_pooling = False
                _execute()
            force_global_pooling = True
        _execute()
        net = model(x, training=training, use_up_to=use_up_to, force_global_pooling=force_global_pooling,
                    check_global_pooling=check_global_pooling, returns_net=True)
        assert isinstance(net, NnpNetwork)
        assert len(net.inputs.values()) == 1
        assert len(net.outputs.values()) == 1
        y = list(net.outputs.values())[0]
        if training:
            assert _check_trainable_parameters(y)
        else:
            assert not _check_trainable_parameters(y)
예제 #10
0
def test_time_profiler(batch_size, n_class, ext_name, tmpdir):
    nn.clear_parameters()

    ctx = get_extension_context(ext_name)
    nn.set_default_context(ctx)

    x = nn.Variable.from_numpy_array(
        np.random.normal(size=(batch_size, 3, 16, 16)))
    t = nn.Variable.from_numpy_array(
        np.random.randint(low=0, high=n_class, size=(batch_size, 1)))

    y = simple_cnn(x, t, n_class)

    tp = TimeProfiler(ext_name, device_id=ctx.device_id)
    for i in range(5):
        with tp.scope("forward"):
            y.forward(clear_no_need_grad=True,
                      function_pre_hook=tp.pre_hook,
                      function_post_hook=tp.post_hook)

        with tp.scope("backward"):
            y.backward(clear_buffer=True,
                       function_pre_hook=tp.pre_hook,
                       function_post_hook=tp.post_hook)

        tp.calc_elapsed_time(["forward", "backward", "summary"])

    tp()
    tp.to_csv(out_dir=str(tmpdir))
예제 #11
0
def test_grad_grad_resnet(seed, ctx, auto_forward, inplace, shared):
    nn.clear_parameters()

    # Settings
    nn.set_default_context(ctx)
    nn.set_auto_forward(auto_forward)
    b, c, h, w = 4, 3, 32, 32
    n_cls = 10
    rng = np.random.RandomState(seed)

    # Network
    x = nn.Variable.from_numpy_array(rng.randn(b, c, h,
                                               w)).apply(need_grad=True)
    y = SmallResNet(x, inplace=inplace, shared=shared)

    # Grad of grad
    dx = nn.grad([y], [x])
    ddx = nn.grad([dx[0]], [x])
    ddx[0].forward() if not auto_forward else None
    # Backward of grad
    x.grad.zero()
    dx[0].forward() if not auto_forward else None
    dx[0].backward()

    # Check between results of var.backward and nn.grad
    backend = ctx.backend[0].split(":")[0]
    if backend == 'cuda':
        pytest.skip(
            'CUDA Convolution N-D is only supported in CUDNN extension')
    assert_allclose(x.g, ddx[0].d, atol=1e-6)
예제 #12
0
def test_generate_tmp_nnp():
    nn.clear_parameters()
    batch_size = 16
    x0 = nn.Variable([batch_size, 100])
    x1 = nn.Variable([batch_size, 100])
    h1_0 = PF.affine(x0, 100, name='affine1_0')
    h1_1 = PF.affine(x1, 100, name='affine1_0')
    h1 = F.tanh(h1_0 + h1_1)
    h2 = F.tanh(PF.affine(h1, 50, name='affine2'))
    y0 = PF.affine(h2, 10, name='affiney_0')
    y1 = PF.affine(h2, 10, name='affiney_1')

    contents = {
        'networks': [{
            'name': 'net1',
            'batch_size': batch_size,
            'outputs': {
                'y0': y0,
                'y1': y1
            },
            'names': {
                'x0': x0,
                'x1': x1
            }
        }],
        'executors': [{
            'name': 'runtime',
            'network': 'net1',
            'data': ['x0', 'x1'],
            'output': ['y0', 'y1']
        }]
    }
    nnabla.utils.save.save('tmp.nnp', contents)
예제 #13
0
파일: test_stft.py 프로젝트: donproc/nnabla
def test_istft(ctx, window_size, stride, fft_size, window_type, center):
    backend = ctx.backend[0].split(":")[0]
    if backend == 'cuda':
        pytest.skip(
            'CUDA Convolution N-D is only supported in CUDNN extension')

    # clear all previous STFT conv/deconv kernels
    nn.clear_parameters()

    # Make sure that iSTFT(STFT(x)) = x
    x = np.random.randn(1, window_size * 10)

    nx = nn.Variable.from_numpy_array(x)
    with nn.context_scope(ctx):
        nyr, nyi = F._stft_v1(nx,
                              window_size=window_size,
                              stride=stride,
                              fft_size=fft_size,
                              window_type=window_type,
                              center=center)
        nz = F._istft_v1(nyr,
                         nyi,
                         window_size=window_size,
                         stride=stride,
                         fft_size=fft_size,
                         window_type=window_type,
                         center=center)
    nz.forward()

    invalid = window_size - stride
    assert (np.allclose(nx.d[:, invalid:-invalid],
                        nz.d[:, invalid:-invalid],
                        atol=1e-5,
                        rtol=1e-5))
예제 #14
0
def test_get_parameter_with_initializer():
    """Testing with initializer
    """
    import nnabla as nn
    from nnabla.parameter import get_parameter_or_create
    nn.clear_parameters()
    rng = np.random.RandomState(seed=313)
    shape = (8, 8, 3, 3)

    # Instnace inherited from BaseInitializer
    initializer = UniformInitializer(lim=(-1, 1), rng=rng)
    param1 = get_parameter_or_create('param1',
                                     shape,
                                     initializer=initializer,
                                     need_grad=True)
    assert np.min(param1.d > -1) and np.max(param1.d < 1)

    # Numpy array
    initializer = rng.randn(*shape)
    param2 = get_parameter_or_create('param2',
                                     initializer=initializer,
                                     need_grad=True)
    np.allclose(initializer, param2.d)

    # Random
    param3 = get_parameter_or_create('param3', shape, need_grad=True)

    nn.clear_parameters()
예제 #15
0
def test_parametric_function_api():
    """
    Testing :function:`nnabla.parametric_functions.parametric_function_api`.
    """
    import nnabla as nn
    import inspect
    nn.clear_parameters()
    shape = (2, 3, 4)

    # Signature check
    spec = inspect.getargspec(dummy_parametric_function)
    assert spec.args == ['shape', 'f', 'i', 's', 'name']
    assert spec.defaults == (10, 1, 'dummy', None)
    assert dummy_parametric_function.__doc__.splitlines()[0] == 'Doc'

    # Verify two different ways does the same thing.
    # Using name argument
    v = dummy_parametric_function(shape, name='group1')
    # Using parameter_scope
    with nn.parameter_scope('group1'):
        v = dummy_parametric_function(shape)

    params = nn.get_parameters()
    assert len(params) == 2
    assert list(iterkeys(params)) == ['group1/dummy/p1', 'group1/dummy/p2']

    # No scope
    v = dummy_parametric_function(shape)

    params = nn.get_parameters()
    len(params) == 4
    assert list(iterkeys(params)) == ['group1/dummy/p1', 'group1/dummy/p2',
                                      'dummy/p1', 'dummy/p2']
    nn.clear_parameters()
예제 #16
0
def test_get_parameter_or_create_need_grad():
    """Testing if need_grad flag works not not.
    """
    import nnabla as nn
    from nnabla.parameter import get_parameter_or_create
    nn.clear_parameters()
    param1 = get_parameter_or_create('p/param1', (2, 3, 4, 5), need_grad=True)
    p1d = np.random.randn(*param1.shape).astype(np.float32)
    p1g = np.random.randn(*param1.shape).astype(np.float32)
    param1.d = p1d
    param1.g = p1g
    param1_f = get_parameter_or_create('p/param1',
                                       param1.shape,
                                       need_grad=False)
    assert not param1_f.need_grad
    assert not param1.need_grad
    assert np.all(param1.d == p1d)
    assert np.all(param1.d == param1_f.d)
    param1.d = 1
    assert np.all(param1_f.d == 1)
    param1_f2 = get_parameter_or_create('p/param1',
                                        param1.shape,
                                        need_grad=True,
                                        as_need_grad=False)
    assert param1.need_grad
    assert param1_f.need_grad
    assert not param1_f2.need_grad
    nn.clear_parameters()
예제 #17
0
def test_parametric_function_api():
    """
    Testing :function:`nnabla.parametric_functions.parametric_function_api`.
    """
    import nnabla as nn
    import inspect
    nn.clear_parameters()
    shape = (2, 3, 4)

    # Signature check
    spec = inspect.getargspec(dummy_parametric_function)
    assert spec.args == ['shape', 'f', 'i', 's', 'name']
    assert spec.defaults == (10, 1, 'dummy', None)
    assert dummy_parametric_function.__doc__.splitlines()[0] == 'Doc'

    # Verify two different ways does the same thing.
    # Using name argument
    v = dummy_parametric_function(shape, name='group1')
    # Using parameter_scope
    with nn.parameter_scope('group1'):
        v = dummy_parametric_function(shape)

    params = nn.get_parameters()
    assert len(params) == 2
    assert list(iterkeys(params)) == ['group1/dummy/p1', 'group1/dummy/p2']

    # No scope
    v = dummy_parametric_function(shape)

    params = nn.get_parameters()
    len(params) == 4
    assert list(iterkeys(params)) == [
        'group1/dummy/p1', 'group1/dummy/p2', 'dummy/p1', 'dummy/p2'
    ]
    nn.clear_parameters()
예제 #18
0
def test_graph_more_than_2_outputs(seed, clear_buffer):
    count = 0

    def func_hook(f):
        nonlocal count
        if f.name == 'Split':
            count += 1

    nn.clear_parameters()

    a = nn.Variable.from_numpy_array(np.ones((10, )))
    b = nn.Variable.from_numpy_array(np.ones((10, )))
    c = F.add2(a, b, inplace=True, outputs=[a.data])
    y = F.split(c, axis=0)
    nn.forward_all(y, function_pre_hook=func_hook)

    assert count == 1

    res = [x.d for x in y]
    assert_allclose(res, [2.0] * 10)

    a = nn.Variable.from_numpy_array(np.ones((10, )))
    b = nn.Variable.from_numpy_array(np.ones((10, )))
    c = F.add2(a, b, inplace=True, outputs=[a.data])
    y = F.split(c, axis=0)
    for yy in y:
        yy.forward()
    res = [x.d for x in y]
    assert_allclose(res, [11.0] * 10)
예제 #19
0
def test_simple_loop():
    nn.clear_parameters()

    x = nn.Variable.from_numpy_array(np.random.randn(10, 3, 128, 128))
    t = nn.Variable.from_numpy_array(np.random.randint(0, 100, (10, )))

    unet = UNet(num_classes=1,
                model_channels=128,
                output_channels=3,
                num_res_blocks=2,
                attention_resolutions=(16, 8),
                attention_num_heads=4,
                channel_mult=(1, 1, 2, 2, 4, 4))
    y = unet(x, t)

    loss = F.mean(F.squared_error(y, x))

    import nnabla.solvers as S
    solver = S.Sgd()
    solver.set_parameters(nn.get_parameters())

    from tqdm import trange
    tr = trange(100)
    for i in tr:
        loss.forward(clear_no_need_grad=True)
        solver.zero_grad()
        loss.backward(clear_buffer=True)
        solver.update()

        tr.set_description(f"diff: {loss.d.copy():.5f}")
예제 #20
0
def test_nnabla_models_vgg(num_layers, batch_size, training, seed):
    from nnabla.models.imagenet import VGG
    nn.clear_parameters()
    rng = np.random.RandomState(seed)
    model = VGG(num_layers)

    # Determine input shape and create input variable
    input_shape = list(model.input_shape)
    input_shape = tuple(input_shape)
    x = nn.Variable.from_numpy_array(
        rng.randint(0, 256, size=(batch_size, ) + input_shape))

    # Test cases for all intermediate outputs
    for use_up_to in ('classifier', 'pool', 'lastconv', 'lastconv+relu'):
        returns_net = False

        def _execute():
            y = model(x, training=training, use_up_to=use_up_to)
            y.forward()

        _execute()
        net = model(x,
                    training=training,
                    use_up_to=use_up_to,
                    returns_net=True)
        assert isinstance(net, NnpNetwork)
        assert len(net.inputs.values()) == 1
        assert len(net.outputs.values()) == 1
        y = list(net.outputs.values())[0]
        if training:
            assert _check_trainable_parameters(y)
        else:
            assert not _check_trainable_parameters(y)
예제 #21
0
def test_no_grad(seed, graph):
    from .graph_converter_test_utils import structure_tester, value_tester

    nn.clear_parameters()

    # Random number
    np.random.seed(seed)
    rng = np.random.RandomState(seed)

    # Graph
    x_data = rng.randn(4, 3, 32, 32)
    x0 = nn.Variable.from_numpy_array(x_data)\
        .apply(need_grad=False) \
        .apply(persistent=True)
    y0 = graph(x0)
    y1 = y0.no_grad()

    # Test
    def assert_need_grad_flase(f):
        for inp in f.inputs:
            assert inp.need_grad == False, "need_grad must be false"
        for out in f.outputs:
            assert out.need_grad == False, "need_grad must be false"

    y1.visit(assert_need_grad_flase)
    structure_tester(y0, y1)
    value_tester(y0, y1, clear_no_need_grad=True)
def test_batch_normalization_linear(seed, test, graph_ref, graph_act):
    from graph_converter_test_utils import structure_tester, value_tester, print_params
    # because linearized parameters (c0, c1) are to be shared among models
    nn.clear_parameters()

    # Random number
    np.random.seed(seed)
    rng = np.random.RandomState(seed)

    # Graph
    x_data = rng.randn(4, 3, 32, 32)
    x = nn.Variable.from_numpy_array(x_data)
    y_tgt = graph_act(x, test=test)

    # Convert
    name = "bn-linear-graph"
    converter = GC.BatchNormalizationLinearConverter(name=name)
    y_act = converter.convert(y_tgt, [x])

    # Ref Graph
    name = "bn-linear-graph-ref"
    y_ref = graph_ref(x, name=name)

    # Test
    structure_tester(y_ref, y_act)
    value_tester(y_tgt, y_act)
예제 #23
0
def test_nnabla_models_imagenet_etc(model_class, up_to_list, image_size_factor,
                                    batch_size, training, seed):
    nn.clear_parameters()
    rng = np.random.RandomState(seed)

    # Load model
    from nnabla.models import imagenet
    Model = getattr(imagenet, model_class)
    model = Model()

    # Determine input shape and create input variable
    input_shape = list(model.input_shape)
    input_shape[1] *= image_size_factor
    input_shape[2] *= image_size_factor
    input_shape = tuple(input_shape)
    x = nn.Variable.from_numpy_array(
        rng.randint(0, 256, size=(batch_size, ) + input_shape))

    # Test cases for all intermediate outputs
    for use_up_to in up_to_list:
        check_global_pooling = True
        force_global_pooling = False
        returns_net = False

        def _execute():
            y = model(x,
                      training=training,
                      use_up_to=use_up_to,
                      force_global_pooling=force_global_pooling,
                      check_global_pooling=check_global_pooling)
            y.forward()

        # Need special care for SENet because it contains global average
        # pooling in various points in a network.
        if image_size_factor != 1 and (model_class == 'SENet'
                                       or use_up_to in ('classifier', 'pool')):
            with pytest.raises(ValueError):
                _execute()
            if use_up_to == 'pool' and model_class != 'SENet':
                check_global_pooling = False
                _execute()
            force_global_pooling = True
        _execute()
        net = model(x,
                    training=training,
                    use_up_to=use_up_to,
                    force_global_pooling=force_global_pooling,
                    check_global_pooling=check_global_pooling,
                    returns_net=True)
        assert isinstance(net, NnpNetwork)
        assert len(net.inputs.values()) == 1
        assert len(net.outputs.values()) == 1
        y = list(net.outputs.values())[0]
        if training:
            assert _check_trainable_parameters(y)
        else:
            assert not _check_trainable_parameters(y)
예제 #24
0
파일: test_graph.py 프로젝트: zwsong/nnabla
def test_graph_model(model, seed):
    np.random.seed(313)
    rng = np.random.RandomState(seed)
    x = nn.Variable([2, 3, 4, 4], need_grad=True)
    t = nn.Variable([2, 1])
    x.d = rng.randn(*x.shape)
    t.d = rng.randint(0, 5, size=t.shape)

    nn.set_default_context(nn.Context())

    # Forwardprop by definintion
    nn.clear_parameters()
    if model == "mlp":
        with nn.parameter_scope('fc1'):
            z = PF.affine(x, 3)
        z2 = F.relu(z, inplace=True)
        with nn.parameter_scope('fc2'):
            z3 = PF.affine(z2, 5)
    elif model == "recurrent":
        with nn.parameter_scope('fc1'):
            z = PF.affine(x, 3)
            z2 = F.relu(z, inplace=True)
        h = z2
        for _ in range(2):
            with nn.parameter_scope('fc2'):
                h = PF.affine(h, 3)
                h = F.relu(h, inplace=True)
        with nn.parameter_scope('fc3'):
            z3 = PF.affine(h, 5)
    elif model == "convolution":
        with nn.parameter_scope('conv1'):
            z = PF.convolution(x, 3, (2, 2))
            z2 = F.relu(z, inplace=True)
        with nn.parameter_scope('fc2'):
            z3 = PF.affine(z2, 5)
    else:
        raise ValueError()
    l = F.softmax_cross_entropy(z3, t, 1)
    L = F.mean(l)

    # Forwardprop
    L.forward(clear_no_need_grad=True)

    # Backprop
    # Diff should be initialized since they are always accumulated
    x.grad.zero()
    L.backward(clear_buffer=True)
    x.g = rng.randn(*x.shape)
    parameters = nn.get_parameters()
    for param in parameters.values():
        param.grad.zero()
    inputs = [x] + list(parameters.values())

    from nbla_test_utils import \
        compute_analytical_and_numerical_grad_graph as grads
    agrad, ngrad = grads(L, inputs, 1e-3)
    assert np.allclose(ngrad, agrad, atol=1.05e-2)
예제 #25
0
def test_graph_model(model, seed):
    np.random.seed(313)
    rng = np.random.RandomState(seed)
    x = nn.Variable([2, 3, 4, 4], need_grad=True)
    t = nn.Variable([2, 1])
    x.d = rng.randn(*x.shape)
    t.d = rng.randint(0, 5, size=t.shape)

    nn.set_default_context(nn.Context())

    # Forwardprop by definition
    nn.clear_parameters()
    if model == "mlp":
        with nn.parameter_scope('fc1'):
            z = PF.affine(x, 3)
        z2 = F.relu(z, inplace=True)
        with nn.parameter_scope('fc2'):
            z3 = PF.affine(z2, 5)
    elif model == "recurrent":
        with nn.parameter_scope('fc1'):
            z = PF.affine(x, 8)
            z2 = F.relu(z, inplace=True)
        h = z2
        for _ in range(2):
            with nn.parameter_scope('fc2'):
                h = PF.affine(h, 8)
                h = F.relu(h, inplace=True)
        with nn.parameter_scope('fc3'):
            z3 = PF.affine(h, 5)
    elif model == "convolution":
        with nn.parameter_scope('conv1'):
            z = PF.convolution(x, 3, (2, 2))
            z2 = F.relu(z, inplace=True)
        with nn.parameter_scope('fc2'):
            z3 = PF.affine(z2, 5)
    else:
        raise ValueError()
    l = F.softmax_cross_entropy(z3, t, 1)
    L = F.mean(l)

    # Forwardprop
    L.forward(clear_no_need_grad=True)

    # Backprop
    # Diff should be initialized since they are always accumulated
    x.grad.zero()
    L.backward(clear_buffer=True)
    x.g = rng.randn(*x.shape)
    parameters = nn.get_parameters()
    for param in parameters.values():
        param.grad.zero()
    inputs = [x] + list(parameters.values())

    from nbla_test_utils import \
        compute_analytical_and_numerical_grad_graph as grads
    agrad, ngrad = grads(L, inputs, 1e-3)
    assert_allclose(ngrad, agrad, atol=1.05e-2)
예제 #26
0
파일: helper.py 프로젝트: sony/nnabla
def get_saved_test_model(module):
    module_func, module_input_shapes = module
    with create_temp_with_dir(NNP_FILE) as nnp_file:
        with nn.graph_def.graph() as g:
            variables = [
                nn.ProtoVariable(shape) for _, shape in module_input_shapes
            ]
            outputs = module_func(*variables)
        g.save(nnp_file)
        nn.clear_parameters()
        yield nnp_file
예제 #27
0
def test_parametric_function_1d(inshape, kernel, multiplier, outshape):
    base_axis = len(inshape) - 2
    sample_channels = inshape[base_axis]
    outmap_channels = sample_channels * multiplier
    x = nn.Variable(inshape)
    y = PF.depthwise_convolution(x, kernel, multiplier=multiplier)
    p = nn.get_parameters()
    assert y.shape == outshape
    assert p['depthwise_conv/W'].shape == (outmap_channels,) + kernel
    assert p['depthwise_conv/b'].shape == (outmap_channels,)
    nn.clear_parameters()
def test_parametric_function_2d(inshape, kernel, divisor, outshape):
    base_axis = len(inshape) - 3
    sample_channels = inshape[base_axis]
    outmap_channels = sample_channels // divisor
    x = nn.Variable(inshape)
    y = PF.depthwise_deconvolution(x, kernel, divisor=divisor)
    p = nn.get_parameters()
    assert y.shape == outshape
    assert p['depthwise_deconv/W'].shape == (sample_channels, ) + kernel
    assert p['depthwise_deconv/b'].shape == (outmap_channels, )
    nn.clear_parameters()
예제 #29
0
def test(args):
    """
    Training
    """

    ##  ~~~~~~~~~~~~~~~~~~~
    ##   Initial settings
    ##  ~~~~~~~~~~~~~~~~~~~

    #   Input Variable
    nn.clear_parameters()  # Clear
    Input = nn.Variable([1, 3, 64, 64])  # Input
    Trues = nn.Variable([1, 1])  # True Value

    #   Network Definition
    Name = "CNN"  # Name of scope which includes network models (arbitrary)
    Output_test = network(Input, scope=Name, test=True)  # Network & Output
    Loss_test = F.mean(F.absolute_error(
        Output_test, Trues))  # Loss Function (Squared Error)

    #   Load data
    with nn.parameter_scope(Name):
        nn.load_parameters(
            os.path.join(args.model_save_path,
                         "network_param_{:04}.h5".format(args.epoch)))

    # Training Data Setting
    image_data, mos_data = dt.data_loader(test=True)
    batches = dt.create_batch(image_data, mos_data, 1)
    del image_data, mos_data

    truth = []
    result = []
    for j in range(batches.iter_n):
        Input.d, tures = next(batches)
        Loss_test.forward(clear_no_need_grad=True)
        result.append(Loss_test.d)
        truth.append(tures)

    result = np.array(result)
    truth = np.squeeze(np.array(truth))

    # Evaluation of performance
    mae = np.average(np.abs(result - truth))
    SRCC, p1 = stats.spearmanr(truth,
                               result)  # Spearman's Correlation Coefficient
    PLCC, p2 = stats.pearsonr(truth, result)

    #   Display
    print("\n Model Parameter [epoch={0}]".format(args.epoch))
    print(" Mean Absolute Error with Truth: {0:.4f}".format(mae))
    print(" Speerman's Correlation Coefficient: {0:.3f}".format(SRCC))
    print(" Pearson's Linear Correlation Coefficient: {0:.3f}".format(PLCC))
    def __init__(self):
        #パラメタの初期化
        nn.clear_parameters()

        #入力変数の準備
        self.x = nn.Variable((1, 3, 256, 256))  #(枚数,色,高さ,幅)

        #パラメタの読み込みparam
        nn.load_parameters(os.path.join(current_dir, "parameters.h5"))

        #推論ネットワークの構築
        self.y = self.network(self.x, test=True)
예제 #31
0
def test_graph_rewire(seed, clear_buffer):
    nn.clear_parameters()

    # A. defining graph definition utility
    def mlp2(x, scope):
        with nn.parameter_scope(scope):
            h = F.tanh(PF.affine(x, 10, name='a1'))
            h = F.tanh(PF.affine(h, 10, name='a1'))
            return h

    # A. Create a graph A.
    xa = nn.Variable((2, 10), need_grad=True)
    ya = mlp2(xa, 'a')

    # B. Create a graph B.
    xb = nn.Variable((2, 10), need_grad=True)
    yb = mlp2(xb, 'b')

    # C. Create directly connected graph.
    xc = nn.Variable((2, 10))
    yc = mlp2(mlp2(xc, 'a'), 'b')

    # D. Rewire the graphs A and B.
    xb.rewire_on(ya)

    # E. Check whether the results are the same.
    rng = np.random.RandomState(seed)
    data = rng.randn(*xa.shape)
    xa.d = data
    xc.d = data
    params = nn.get_parameters()

    def zero_grad():
        for p in params.values():
            p.grad.zero()

    def backup_params():
        return [p.g.copy() for p in params.values()]

    # Checking forward
    yb.forward(clear_no_need_grad=clear_buffer)
    yc.forward(clear_no_need_grad=clear_buffer)
    assert_allclose(yb.d, yc.d)
    # Checking backward
    zero_grad()
    yb.backward(clear_buffer=clear_buffer)
    gb = backup_params()
    zero_grad()
    yc.backward(clear_buffer=clear_buffer)
    gc = backup_params()
    assert_allclose(xa.d, xc.d)
    for b, c in zip(gb, gc):
        assert_allclose(b, c)
예제 #32
0
def test_all_reduce_callback(seed, pack_size, division, comm_nccl_opts):
    if comm_nccl_opts is None:
        pytest.skip(
            "Communicator test is disabled. You can turn it on by an option `--test-communicator`."
        )
    if len(comm_nccl_opts.devices) < 2:
        pytest.skip("Communicator test is disabled. Use more than 1 gpus.")

    comm = comm_nccl_opts.comm
    device_id = int(comm_nccl_opts.device_id)
    nn.set_default_context(comm_nccl_opts.ctx)

    nn.clear_parameters()
    x_data_list = []
    num_layers = 20
    rng = np.random.RandomState(seed)
    for l in range(num_layers):
        x_data = rng.rand(3, 4)
        x_data_list.append(x_data)

    # all_reduce_callback
    x_list1 = []
    n1 = nn.Variable([3, 4])
    n1.d = 0
    for l in range(num_layers):
        x = nn.Variable([3, 4], need_grad=True)
        n1 = F.add2(n1, x)
        x.d = x_data_list[l] * (device_id + 1)
        x.g = 0
        x_list1.append(x)
    n1.backward(clear_buffer=True,
                communicator_callbacks=comm.all_reduce_callback(
                    [v.grad for v in x_list1], pack_size, division))

    # Ref AllReduce
    x_list2 = []
    n2 = nn.Variable([3, 4])
    n2.d = 0
    for l in range(num_layers):
        x = nn.Variable([3, 4], need_grad=True)
        n2 = F.add2(n2, x)
        x.d = x_data_list[l] * (device_id + 1)
        x.g = 0
        x_list2.append(x)
    n2.backward(clear_buffer=True)
    comm.all_reduce([v.grad for v in x_list2],
                    inplace=False,
                    division=division)

    # Check
    for x, ref in zip(x_list1, x_list2):
        assert np.allclose(x.g, ref.g)
예제 #33
0
def test_get_set_pop_parameter():
    import nnabla as nn
    from nnabla.parameter import set_parameter, pop_parameter, get_parameter
    nn.clear_parameters()
    x = nn.Variable((2, 3, 4, 5))
    key = 'a/b/c'
    set_parameter(key, x)
    x2 = get_parameter(key)
    assert x is x2
    x3 = pop_parameter(key)
    assert x is x3
    x4 = get_parameter(key)
    assert x4 is None
예제 #34
0
def test_parameter_scope_slash():
    """Testing if parameter_scope('aaa/bbb') works.
    """
    import nnabla as nn
    from nnabla.parameter import get_parameter_or_create
    nn.clear_parameters()
    with nn.parameter_scope('aaa/bbb'):
        param = get_parameter_or_create('ccc', (2, 3, 4, 5))
    ref = np.random.randn(*param.shape).astype(np.float32)
    param.d = ref

    with nn.parameter_scope('aaa'):
        with nn.parameter_scope('bbb'):
            param = get_parameter_or_create('ccc', (2, 3, 4, 5))
    assert np.all(param.d == ref)
    nn.clear_parameters()
예제 #35
0
def test_get_parameter_or_create_need_grad():
    """Testing if need_grad flag works not not.
    """
    import nnabla as nn
    from nnabla.parameter import get_parameter_or_create
    nn.clear_parameters()
    param1 = get_parameter_or_create('param1', (2, 3, 4, 5), need_grad=True)
    p1d = np.random.randn(*param1.shape).astype(np.float32)
    p1g = np.random.randn(*param1.shape).astype(np.float32)
    param1.d = p1d
    param1.g = p1g
    param1_f = get_parameter_or_create('param1', param1.shape, need_grad=False)
    assert not param1_f.need_grad
    assert param1.need_grad
    assert np.all(param1.d == p1d)
    assert np.all(param1.d == param1_f.d)
    param1.d = 1
    assert np.all(param1_f.d == 1)
    nn.clear_parameters()
예제 #36
0
파일: test_graph.py 프로젝트: zwsong/nnabla
def test_graph_clear_buffer(seed):
    np.random.seed(313)
    rng = np.random.RandomState(seed)
    x = nn.Variable([2, 3, 4, 4])
    t = nn.Variable([2, 1])
    x.d = rng.randn(*x.shape)
    t.d = rng.randint(0, 5, size=t.shape)

    # Network definition
    nn.set_default_context(nn.Context())
    nn.clear_parameters()
    x1 = x + 1
    x2 = x1 - 1
    with nn.parameter_scope('conv1'):
        z = PF.convolution(x2, 3, (2, 2))
        z2 = F.relu(z, inplace=True)
    with nn.parameter_scope('fc2'):
        z3 = PF.affine(z2, 5)
    l = F.softmax_cross_entropy(z3, t, 1)
    L = F.mean(l)

    # Forwardprop
    import tempfile
    import os
    tmpd = tempfile.mkdtemp()
    nn.save_parameters(os.path.join(tmpd, 'parameter.h5'))
    first = False
    for cnng in [False, True]:
        for cb in [False, True]:
            _ = nn.load_parameters(os.path.join(tmpd, 'parameter.h5'))
            for v in nn.get_parameters().values():
                v.grad.zero()
            L.forward(clear_no_need_grad=cnng)
            L.backward(clear_buffer=cb)
            if not first:
                first = True
                g = list(nn.get_parameters().values())[0].g.copy()
            else:
                g2 = list(nn.get_parameters().values())[0].g.copy()
                assert np.all(g == g2)
예제 #37
0
 def __init__(self, func, inspecs, func_args, func_kwargs,
              ext, ext_kwargs, min_run=1, min_time=1.0):
     nn.clear_parameters()
     self.inputs = None
     self.outputs = None
     self.func_ins = None
     self.setup_stat = None
     self.forward_stat = None
     self.backward_stat = None
     self.func = func
     self.module = func.__module__
     self.inspecs = inspecs
     self.inputs_f = create_inputs(inspecs)
     self.func_args = func_args
     self.func_kwargs = func_kwargs
     self.ext = ext
     self.ext_kwargs = ext_kwargs
     self.mod_ext = importlib.import_module(
         '.' + ext, 'nnabla.extensions')
     self.ctx = self.mod_ext.context(**ext_kwargs)
     self.min_run = min_run
     self.min_time = min_time