예제 #1
0
def test_get_layouts(config):
    test_transformer = ngt.make_transformer_factory('gpu')()

    t = config
    with ng.metadata(parallel=t['parallel_axis']):
        test_ops = [
            GPUCudaScatterSendOp(
                TensorValueOp(ng.placeholder(t['axes']), metadata=dict(device='gpu',
                              device_id='0', parallel=t['parallel_axis'],
                              transformer='gpu0', host_transformer=None)),
                ng.Op(metadata=dict(device='gpu', device_id=('0', '1'),
                      parallel=t['parallel_axis'], transformer=['gpu0', 'gpu1'],
                      host_transformer=None))
            ),
            GPUCudaScatterRecvOp(
                ng.Op(metadata=dict(device='gpu', device_id=('0', '1'),
                      parallel=t['parallel_axis'], transformer=['gpu0', 'gpu1'],
                      host_transformer=None)),
                GPUCudaScatterSendOp(
                    TensorValueOp(ng.placeholder(t['axes']), metadata=dict(device='gpu',
                                  device_id='0', parallel=t['parallel_axis'],
                                  transformer='gpu0', host_transformer=None)),
                    ng.Op(metadata=dict(device='gpu', device_id=('0', '1'),
                          parallel=t['parallel_axis'], transformer=['gpu0', 'gpu1'],
                          host_transformer=None))
                )
            ),
            GPUCudaGatherRecvOp(
                ng.Op(metadata=dict(device='gpu', device_id=('0', '1'),
                                    parallel=t['parallel_axis'], transformer=['gpu0', 'gpu1'],
                                    host_transformer=None)),
                ng.Op(metadata=dict(device='gpu', device_id='0', parallel=t['parallel_axis'],
                      transformer='gpu0', host_transformer=None)),
                GPUCudaScatterSendOp(
                    TensorValueOp(ng.placeholder(t['axes']), metadata=dict(device='gpu',
                                  device_id='0', parallel=t['parallel_axis'],
                                  transformer='gpu0', host_transformer=None)),
                    ng.Op(metadata=dict(device='gpu', device_id=('0', '1'),
                          parallel=t['parallel_axis'], transformer=['gpu0', 'gpu1'],
                          host_transformer=None))
                )
            ),
            GPUCudaGatherSendOp(
                TensorValueOp(ng.placeholder(t['axes']), metadata=dict(device='gpu',
                              device_id='0', transformer='gpu0',
                              host_transformer=None, parallel=t['parallel_axis']))
            ),
            GPUCudaAllReduceOp(
                input_node=TensorValueOp(ng.placeholder(t['axes']), metadata=dict(device='gpu',
                                         device_id='0', transformer='gpu0', host_transformer=None,
                                         parallel=t['parallel_axis'])),
                func='sum'
            )
        ]
    test_layouts = []
    for op in test_ops:
        test_layouts.append(test_transformer.get_layouts(op)[0].axes)
    np.testing.assert_array_equal(test_layouts, t['expected_layouts'])
예제 #2
0
def test_metadata():
    n = ng.Op(metadata=dict(something=3))
    m = ng.Op()
    assert len(m.metadata) == 0
    assert n.metadata['something'] == 3
예제 #3
0
def test_calculate_new_axes_null_axes():
    with pytest.raises(TypeError):
        set_parallel_axes(axes=None, parallel_axis=ax_B)


def test_calculate_new_axes_null_parallel_axis():
    new_axes = set_parallel_axes(axes=axes, parallel_axis=None)
    # Checks null parallel axis. The axes calculated should have the same length as original
    assert new_axes.full_lengths == axes.full_lengths


@pytest.mark.parametrize("from_node, to_node, expected_type", [
    (None, None, None),
    (
        ng.Op(metadata=dict(device='cpu', device_id='0', transformer='cpu0')),
        ng.Op(metadata=dict(device='cpu', device_id='0', transformer='cpu0')),
        None
    ),
    (
        ng.Op(metadata=dict(device='cpu', device_id='0', transformer='cpu0')),
        ng.Op(metadata=dict(device='cpu', device_id='1', transformer='cpu1')),
        'direct'
    ),
    (
        ng.Op(metadata=dict(device='cpu', device_id='0', transformer='cpu0')),
        ng.Op(metadata=dict(device='gpu', device_id='0', transformer='gpu0')),
        'direct'
    ),
    (
        TensorValueOp(ng.constant(1),
예제 #4
0
def test_calculate_new_axes_null_axes():
    with pytest.raises(TypeError):
        calculate_scatter_axes(axes=None, scatter_axis=ax_B, num_devices=2)


def test_calculate_new_axes_null_parallel_axis():
    new_axes = calculate_scatter_axes(axes=axes,
                                      scatter_axis=None,
                                      num_devices=1)
    # Checks null parallel axis. The axes calculated should have the same length as original
    assert new_axes.full_lengths == axes.full_lengths


@pytest.mark.parametrize("from_node, to_node, expected_type", [
    (None, None, None),
    (ng.Op(metadata=dict(device='cpu', device_id='0', transformer='cpu0')),
     ng.Op(metadata=dict(device='cpu', device_id='0', transformer='cpu0')),
     None),
    (ng.Op(metadata=dict(device='cpu', device_id='0', transformer='cpu0')),
     ng.Op(metadata=dict(device='cpu', device_id='1', transformer='cpu1')),
     'direct'),
    (ng.Op(metadata=dict(device='cpu', device_id='0', transformer='cpu0')),
     ng.Op(metadata=dict(device='gpu', device_id='0', transformer='gpu0')),
     'direct'),
    (TensorValueOp(ng.constant(1),
                   metadata=dict(
                       device='cpu', device_id='0', transformer='cpu0')),
     ng.Op(metadata=dict(device='cpu',
                         device_id=('1', '2'),
                         parallel=ax_B,
                         transformer=['cpu1', 'cpu2'])), None),
예제 #5
0
def test_calculate_new_axes_null_axes():
    with pytest.raises(TypeError):
        calculate_scatter_axes(axes=None, scatter_axis=ax_B, num_devices=2)


def test_calculate_new_axes_null_parallel_axis():
    new_axes = calculate_scatter_axes(axes=axes,
                                      scatter_axis=None,
                                      num_devices=1)
    # Checks null parallel axis. The axes calculated should have the same length as original
    assert new_axes.full_lengths == axes.full_lengths


@pytest.mark.parametrize("from_node, to_node, expected_type", [
    (None, None, None),
    (ng.Op(metadata=dict(device='cpu', device_id='0', transformer='cpu0')),
     ng.Op(metadata=dict(device='cpu', device_id='0', transformer='cpu0')),
     None),
    (ng.Op(metadata=dict(device='cpu', device_id='0', transformer='cpu0')),
     ng.Op(metadata=dict(device='cpu', device_id='1', transformer='cpu1')),
     'direct'),
    (ng.Op(metadata=dict(device='cpu', device_id='0', transformer='cpu0')),
     ng.Op(metadata=dict(device='gpu', device_id='0', transformer='gpu0')),
     'direct'),
    (TensorValueOp(ng.constant(1),
                   metadata=dict(
                       device='cpu', device_id='0', transformer='cpu0')),
     ng.Op(metadata=dict(device='cpu',
                         device_id=('1', '2'),
                         parallel=ax_B,
                         transformer=['cpu1', 'cpu2'])), None),