Ejemplo n.º 1
0
def test_spatial_convolution_transpose_with_output(input_size, conv_size, result, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]
    dev = cntk_device(device_id)

    # fill input operand with a sequence 1,2,3,... til total size and then
    # resize to input_size
    total_size = np.prod(input_size)
    x = np.arange(total_size, dtype=dt)
    input_operand = x.reshape(input_size)

    a = C.input_variable(shape=input_operand.shape[1:],
                dtype=sanitize_dtype_cntk(precision),
                needs_gradient=False,
                name='a')

    # do the same for convolution kernel
    total_size = np.prod(conv_size)
    y = np.arange(total_size, dtype=dt)
    conv_map = constant(value=y.reshape(conv_size), device=dev)

    from cntk import convolution_transpose
    input_op = convolution_transpose(conv_map, a, auto_padding=[True], strides=2, output_shape=(1,5,6), reduction_rank=0)

    forward_input = {a: input_operand}
    expected_forward = AA(result)

    unittest_helper(input_op, forward_input, expected_forward,
                    None, device_id=device_id, precision=precision)
Ejemplo n.º 2
0
def test_convolution_transpose(input_size, conv_size, result, device_id,
                               precision):
    dt = PRECISION_TO_TYPE[precision]
    dev = cntk_device(device_id)

    # fill input operand with a sequence 1,2,3,... til total size and then
    # resize to input_size
    total_size = np.prod(input_size)
    x = np.arange(total_size, dtype=dt)
    input_operand = x.reshape(input_size)

    a = C.input_variable(shape=input_operand.shape[1:],
                         dtype=sanitize_dtype_cntk(precision),
                         needs_gradient=False,
                         name='a')

    # do the same for convolution kernel
    total_size = np.prod(conv_size)
    y = np.arange(total_size, dtype=dt)
    conv_map = constant(value=y.reshape(conv_size), device=dev)

    from cntk import convolution_transpose
    input_op = convolution_transpose(conv_map, a, auto_padding=[False])

    forward_input = {a: input_operand}
    expected_forward = AA(result)

    unittest_helper(input_op,
                    forward_input,
                    expected_forward,
                    None,
                    device_id=device_id,
                    precision=precision)
def test_convolution_transpose(tmpdir, dtype, device_id):
    pytest.skip('Needs to be fixed after removal of batch axis change.')
    if device_id == -1 and dtype == np.float16:
        pytest.skip('Test only runs on GPU')
    device = cntk_device(device_id)
    with C.default_options(dtype=dtype):
        img_shape = (1, 3, 3)
        img = np.asarray(np.random.uniform(-1, 1, img_shape), dtype=dtype)

        x = C.input_variable(img.shape)
        filter = np.reshape(np.array([2, -1, -1, 2], dtype=dtype), (1, 2, 2))
        kernel = C.constant(value=filter)
        root_node = C.convolution_transpose(kernel,
                                            x,
                                            auto_padding=[False],
                                            output_shape=(1, 4, 4))

        filename = os.path.join(str(tmpdir), R'conv_transpose.onnx')
        root_node.save(filename, format=C.ModelFormat.ONNX)

        loaded_node = C.Function.load(filename, format=C.ModelFormat.ONNX)
        assert root_node.shape == loaded_node.shape

        x_ = loaded_node.arguments[0]
        assert np.allclose(loaded_node.eval({x_: [img]}, device=device),
                           root_node.eval({x: [img]}, device=device))
Ejemplo n.º 4
0
def test_ConvTranspose(tmpdir, dtype, device_id):
    if device_id == -1 and dtype == np.float16:
        pytest.skip('Test is skipped on CPU with float16 data')
    device = cntk_device(device_id)
    with C.default_options(dtype=dtype):
        # Keep the shapes below as they are, because this tests an earlier bug.
        input_shape = (48, 16, 16)
        img = np.reshape(np.arange(np.prod(input_shape), dtype=dtype),
                         input_shape)

        x = C.input_variable(input_shape)

        kernel_shape = (
            48, 32, 3, 3
        )  # For convolution_transpose the shape is (I x O x W x H)
        kernel = C.constant(value=np.ones(shape=(kernel_shape), dtype=dtype))

        conv_trans_model = C.convolution_transpose(
            kernel,
            x,
            strides=(2, 2),
            output_shape=(32, 32, 32),
            auto_padding=[False, True, True])

        verify_one_input(conv_trans_model, img, tmpdir, 'ConvTranspose_0',
                         device)
Ejemplo n.º 5
0
def test_ConvTranspose(tmpdir):
    # Keep the shapes below as they are, because this tests an earlier bug.
    input_shape = (48, 16, 16) 
    img = np.reshape(np.arange(np.prod(input_shape), dtype = np.float32), input_shape) 

    x = C.input_variable(input_shape)

    kernel_shape = (48, 32, 3, 3) # For convolution_transpose the shape is (I x O x W x H)
    kernel = C.constant(value = np.ones(shape=(kernel_shape), dtype = np.float32))

    conv_trans_model = C.convolution_transpose(kernel, x, strides=(2, 2), output_shape=(32, 32, 32), auto_padding = [False, True, True])

    verify_one_input(conv_trans_model, img, tmpdir, 'ConvTranspose_0')
Ejemplo n.º 6
0
def test_convolution_transpose_attributes():
    x = C.input_variable( (1, 5, 5) )
    filter = np.reshape(np.array([2, -1, -1, 2], dtype = np.float32), (1, 2, 2))
    kernel = C.constant(value = filter)
    f = C.convolution_transpose(kernel , x, auto_padding = [False])
    d = f.root_function.attributes
    expected = {'autoPadding': [False, False, False], 
        'sharing': [True, True, True], 
        'strides': (1, 1, 1), 
        'maxTempMemSizeInSamples': 0, 
        'upperPad': (0, 0, 0), 
        'lowerPad': (0, 0, 0),
        'transpose': True,
        'outputShape': (0,)
        }
    _check(expected, d)
Ejemplo n.º 7
0
def test_convolution_transpose_attributes():
    x = C.input( (1, 5, 5) )
    filter = np.reshape(np.array([2, -1, -1, 2], dtype = np.float32), (1, 2, 2))
    kernel = C.constant(value = filter)
    f = C.convolution_transpose(kernel , x, auto_padding = [False])
    d = f.root_function.attributes
    expected = {'autoPadding': [False, False, False], 
        'sharing': [True, True, True], 
        'strides': (1, 1, 1), 
        'maxTempMemSizeInSamples': 0, 
        'upperPad': (0, 0, 0), 
        'lowerPad': (0, 0, 0),
        'transpose': True,
        'outputShape': (0,)
        }
    _check(expected, d)
Ejemplo n.º 8
0
def test_ConvTranspose(tmpdir, dtype, device_id):
    if device_id == -1 and dtype == np.float16:
        pytest.skip('Test is skipped on CPU with float16 data')
    device = cntk_device(device_id)
    with C.default_options(dtype=dtype):
        # Keep the shapes below as they are, because this tests an earlier bug.
        input_shape = (48, 16, 16) 
        img = np.reshape(np.arange(np.prod(input_shape), dtype = dtype), input_shape) 

        x = C.input_variable(input_shape)

        kernel_shape = (48, 32, 3, 3) # For convolution_transpose the shape is (I x O x W x H)
        kernel = C.constant(value = np.ones(shape=(kernel_shape), dtype = dtype))

        conv_trans_model = C.convolution_transpose(kernel, x, strides=(2, 2), output_shape=(32, 32, 32), auto_padding = [False, True, True])

        verify_one_input(conv_trans_model, img, tmpdir, 'ConvTranspose_0', device)
Ejemplo n.º 9
0
def test_convolution_transpose(tmpdir):
    img_shape = (1, 3, 3)
    img = np.asarray(np.random.uniform(-1, 1, img_shape), dtype=np.float32)

    x = C.input_variable(img.shape)
    filter = np.reshape(np.array([2, -1, -1, 2], dtype = np.float32), (1, 2, 2))
    kernel = C.constant(value = filter)
    root_node = C.convolution_transpose(kernel, x, auto_padding=[False], output_shape=(1, 4, 4))
    
    filename = os.path.join(str(tmpdir), R'conv_transpose.onnx')
    root_node.save(filename, format=C.ModelFormat.ONNX)

    loaded_node = C.Function.load(filename, format=C.ModelFormat.ONNX)
    assert root_node.shape == loaded_node.shape

    x_ = loaded_node.arguments[0]
    assert np.allclose(loaded_node.eval({x_:[img]}), root_node.eval({x:[img]}))
Ejemplo n.º 10
0
def test_convolution_transpose(tmpdir):
    img_shape = (1, 3, 3)
    img = np.asarray(np.random.uniform(-1, 1, img_shape), dtype=np.float32)

    x = C.input_variable(img.shape)
    filter = np.reshape(np.array([2, -1, -1, 2], dtype = np.float32), (1, 2, 2))
    kernel = C.constant(value = filter)
    root_node = C.convolution_transpose(kernel, x, auto_padding=[False], output_shape=(1, 4, 4))
    
    filename = os.path.join(str(tmpdir), R'conv_transpose.onnx')
    root_node.save(filename, format=C.ModelFormat.ONNX)

    loaded_node = C.Function.load(filename, format=C.ModelFormat.ONNX)
    assert root_node.shape == loaded_node.shape

    x_ = loaded_node.arguments[0]
    assert np.allclose(loaded_node.eval({x_:[img]}), root_node.eval({x:[img]}))
Ejemplo n.º 11
0
def test_convolution_transpose(tmpdir, dtype, device_id):
    if device_id == -1 and dtype == np.float16:
        pytest.skip('Test only runs on GPU')
    device = cntk_device(device_id)
    with C.default_options(dtype=dtype):
        img_shape = (1, 3, 3)
        img = np.asarray(np.random.uniform(-1, 1, img_shape), dtype=dtype)

        x = C.input_variable(img.shape)
        filter = np.reshape(np.array([2, -1, -1, 2], dtype=dtype), (1, 2, 2))
        kernel = C.constant(value = filter)
        root_node = C.convolution_transpose(kernel, x, auto_padding=[False], output_shape=(1, 4, 4))

        filename = os.path.join(str(tmpdir), R'conv_transpose.onnx')
        root_node.save(filename, format=C.ModelFormat.ONNX)

        loaded_node = C.Function.load(filename, format=C.ModelFormat.ONNX)
        assert root_node.shape == loaded_node.shape

        x_ = loaded_node.arguments[0]
        assert np.allclose(loaded_node.eval({x_:[img]}, device=device), root_node.eval({x:[img]}, device=device))