Example #1
0
def test_conv_with_freedim_model(tmpdir):    
    img_shape = (3, 32, 32)
    img = np.asarray(np.random.uniform(-1, 1, img_shape), dtype=np.float32)

    x = C.input_variable((3, C.FreeDimension, C.FreeDimension))

    conv_size1 = (32, 3, 5, 5)
    conv_map1 = C.constant(value=np.arange(np.prod(conv_size1), dtype=np.float32).reshape(conv_size1))
    conv_op1 = C.convolution(conv_map1, x, auto_padding=(False, True, True))
    relu_op1 = C.relu(conv_op1)
    maxpool_op1 = C.pooling(relu_op1, C.MAX_POOLING, (2, 2), (2, 2))

    conv_size2 = (64, 32, 3, 3)
    conv_map2 = C.constant(value=np.arange(np.prod(conv_size2), dtype=np.float32).reshape(conv_size2))
    conv_op2 = C.convolution(conv_map2, maxpool_op1, auto_padding=(False, True, True))
    relu_op2 = C.relu(conv_op2)
    root_node = C.pooling(relu_op2, C.MAX_POOLING, (2, 2), (2, 2))

    filename = os.path.join(str(tmpdir), R'conv_with_freedim.onnx')
    root_node.save(filename, format=C.ModelFormat.ONNX)

    loaded_node = C.Function.load(filename, format=C.ModelFormat.ONNX)
    assert root_node.shape == loaded_node.shape

    x_ = loaded_node.arguments[0]
    assert np.allclose(loaded_node.eval({x_:img}), root_node.eval({x:img}))

    # Additional test to ensure that loaded_node can be saved as both ONNX and CNTKv2 again.
    filename2 = os.path.join(str(tmpdir), R'conv_with_freedim2.onnx')
    loaded_node.save(filename2, format=C.ModelFormat.ONNX)

    filename3 = os.path.join(str(tmpdir), R'conv_with_freedim2.cntkmodel')
    loaded_node.save(filename3, format=C.ModelFormat.CNTKv2)
Example #2
0
def test_max_unpooling_free_static_axes(warmup_input_size, second_input_size,
                                        pooling_window, strides, device_id,
                                        precision):
    dt = PRECISION_TO_TYPE[precision]
    dev = cntk_device(device_id)

    # Exercise operation twice - once with warmup input, second time to get the test output.
    x = C.input_variable(
        (warmup_input_size[0:2] + tuple([C.FreeDimension] *
                                        (len(warmup_input_size) - 2))))
    y = C.pooling(x, C.MAX_POOLING, pooling_window, strides)
    z = C.unpooling(y, x, C.MAX_UNPOOLING, pooling_window, strides)

    x_data_warmup = np.arange(np.prod(warmup_input_size), dtype=dt)
    x_data_warmup = x_data_warmup.reshape(warmup_input_size)
    output_warmup = z.eval({x: x_data_warmup}, device=dev)

    x_data_test = np.arange(np.prod(second_input_size), dtype=dt)
    x_data_test = x_data_test.reshape(second_input_size)
    output_test = z.eval({x: x_data_test}, device=dev)

    # Generate reference output using fixed axes.
    x_ref = C.input_variable(second_input_size)
    y_ref = C.pooling(x_ref, C.MAX_POOLING, pooling_window, strides)
    z_ref = C.unpooling(y_ref, x_ref, C.MAX_UNPOOLING, pooling_window, strides)
    output_ref = z_ref.eval({x_ref: x_data_test}, device=dev)

    assert np.allclose(output_test, output_ref, atol=1e-4)
Example #3
0
def test_op_average_pooling_include_pad(input_size, pooling_window, strides,
                                        result, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]

    total_size = np.prod(input_size)
    x = np.arange(1, total_size + 1, 1, dtype=dt)
    input_operand = x.reshape(input_size)

    a = C.input_variable(shape=input_operand.shape[1:],
                         dtype=sanitize_dtype_cntk(precision),
                         needs_gradient=True,
                         name='a')

    backward = (1 / np.prod(pooling_window)) * np.ones_like(input_operand)

    from cntk import pooling
    input_op = pooling(a,
                       AVG_POOLING,
                       pooling_window,
                       strides,
                       auto_padding=[True],
                       include_pad=True)

    forward_input = {a: input_operand}

    expected_forward = AA(result)
    expected_backward = {a: backward}

    unittest_helper(input_op,
                    forward_input,
                    expected_forward,
                    expected_backward,
                    device_id=device_id,
                    precision=precision)
Example #4
0
def test_op_avg_pooling(input_size, pooling_window, strides, result, device_id,
                        precision):
    dt = PRECISION_TO_TYPE[precision]

    # fill input operand with a sequence 1,2,3,... til total size and then
    # resize to input_size
    total_size = np.prod(input_size)
    x = np.arange(1, total_size + 1, 1, dtype=dt)
    input_operand = x.reshape(input_size)

    a = I(shape=input_operand.shape[2:],
          dtype=sanitize_dtype_cntk(precision),
          needs_gradient=True,
          name='a')

    backward = (1 / np.prod(pooling_window)) * np.ones_like(input_operand)

    from cntk import pooling
    input_op = pooling(a,
                       AVG_POOLING,
                       pooling_window,
                       strides,
                       auto_padding=[True])

    forward_input = {a: input_operand}

    expected_forward = AA([result])
    expected_backward = {a: backward}

    unittest_helper(input_op,
                    forward_input,
                    expected_forward,
                    expected_backward,
                    device_id=device_id,
                    precision=precision)
Example #5
0
def test_op_pooling_geometry(input_size, pooling_window, strides, padding,
                             result, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]

    # fill input operand with a sequence 1,2,3,... til total size and then
    # resize to input_size
    total_size = np.prod(input_size)
    x = np.arange(total_size, dtype=dt)
    input_operand = x.reshape(input_size)

    a = I(shape=input_operand.shape[2:],
          dtype=sanitize_dtype_cntk(precision),
          needs_gradient=False,
          name='a')

    from cntk import pooling
    input_op = pooling(a,
                       MAX_POOLING,
                       pooling_window,
                       strides,
                       auto_padding=padding)

    forward_input = {a: input_operand}
    expected_forward = AA([result])

    unittest_helper(input_op,
                    forward_input,
                    expected_forward,
                    None,
                    device_id=device_id,
                    precision=precision)
Example #6
0
def test_op_max_pooling(input_size, pooling_window, strides, result, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]

    # fill input operand with a sequence 1,2,3,... til total size and then
    # resize to input_size
    total_size = np.prod(input_size)
    x = np.arange(1, total_size + 1, 1, dtype=dt)
    input_operand = x.reshape(input_size)

    a = I(shape=input_operand.shape[2:],
        dtype=sanitize_dtype_cntk(precision),
        needs_gradient=True,
        name='a')

    result_array = np.asarray(result, dtype=dt)
    max_elements = result_array.reshape(result_array.size).tolist()

    # place 1.0s where maximum elements are
    backward = np.zeros_like(input_operand)
    for element in max_elements:
        backward += np.asarray(input_operand == element)

    from cntk import pooling
    input_op = pooling(a, MAX_POOLING, pooling_window, strides)

    forward_input = {a: input_operand}

    expected_forward = AA([result])
    expected_backward = {a: backward}

    unittest_helper(input_op,
                forward_input, expected_forward, expected_backward,
                device_id=device_id, precision=precision)
Example #7
0
def test_op_avg_pooling(input_size, pooling_window, strides, result, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]

    # fill input operand with a sequence 1,2,3,... til total size and then
    # resize to input_size
    total_size = np.prod(input_size)
    x = np.arange(1, total_size + 1, 1, dtype=dt)
    input_operand = x.reshape(input_size)

    a = C.sequence.input_variable(shape=input_operand.shape[2:],
                         dtype=sanitize_dtype_cntk(precision),
                         needs_gradient=True,
                         name='a')

    backward = (1 / np.prod(pooling_window)) * np.ones_like(input_operand)

    from cntk import pooling
    input_op = pooling(a, AVG_POOLING, pooling_window, strides, auto_padding=[True])

    forward_input = {a: input_operand}

    expected_forward = AA([result])
    expected_backward = {a: backward}

    unittest_helper(input_op, forward_input, expected_forward,
                expected_backward, device_id=device_id, precision=precision)
Example #8
0
def test_op_pooling_geometry(input_size, pooling_window, strides, padding, result, use_input_shape_with_inferred_dimension, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]

    # fill input operand with a sequence 1,2,3,... til total size and then
    # resize to input_size
    total_size = np.prod(input_size)
    x = np.arange(total_size, dtype=dt)
    input_operand = x.reshape(input_size)

    pool_input_shape = input_operand.shape[1:]
    if use_input_shape_with_inferred_dimension:
        pool_input_shape = tuple(-1 for x in pool_input_shape)

    a = C.input_variable(shape=pool_input_shape,
                dtype=sanitize_dtype_cntk(precision),
                needs_gradient=False,
                name='a')

    from cntk import pooling
    input_op = pooling(a, MAX_POOLING, pooling_window, strides, auto_padding=padding)

    forward_input = {a: input_operand}
    expected_forward = AA(result)

    unittest_helper(input_op, forward_input, expected_forward,
                    None, device_id=device_id, precision=precision)
Example #9
0
def test_op_pooling_ceil(input_size, pooling_window, strides, result, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]

    # fill input operand with a sequence 1,2,3,... til total size and then
    # resize to input_size
    total_size = np.prod(input_size)
    x = np.arange(1, total_size + 1, 1, dtype=dt)
    input_operand = x.reshape(input_size)

    a = C.input_variable(shape=input_operand.shape[1:], dtype=sanitize_dtype_cntk(precision), needs_gradient=True, name='a')

    result_array = np.asarray(result, dtype=dt)
    max_elements = result_array.reshape(result_array.size).tolist()

    # place 1.0s where maximum elements are
    backward = np.zeros_like(input_operand)
    for element in max_elements:
        backward += np.asarray(input_operand == element)

    from cntk import pooling
    input_op = pooling(a, MAX_POOLING, pooling_window, strides, ceil_out_dim=True)

    forward_input = {a: input_operand}

    expected_forward = AA(result)
    expected_backward = {a: backward}

    unittest_helper(input_op, forward_input, expected_forward, expected_backward, device_id=device_id,
                    precision=precision)
def test_conv_with_freedim_model(tmpdir, dtype, device_id):
    pytest.skip('Needs to be fixed after removal of batch axis change.')
    if device_id == -1 and dtype == np.float16:
        pytest.skip('Test only runs on GPU')
    device = cntk_device(device_id)
    with C.default_options(dtype=dtype):
        img_shape = (3, 32, 32)
        img = np.asarray(np.random.uniform(-1, 1, img_shape), dtype=dtype)

        x = C.input_variable((3, C.FreeDimension, C.FreeDimension))

        conv_size1 = (32, 3, 5, 5)
        conv_map1 = C.constant(value=np.arange(
            np.prod(conv_size1), dtype=dtype).reshape(conv_size1))
        conv_op1 = C.convolution(conv_map1,
                                 x,
                                 auto_padding=(False, True, True))
        relu_op1 = C.relu(conv_op1)
        maxpool_op1 = C.pooling(relu_op1, C.MAX_POOLING, (2, 2), (2, 2))

        conv_size2 = (64, 32, 3, 3)
        conv_map2 = C.constant(value=np.arange(
            np.prod(conv_size2), dtype=dtype).reshape(conv_size2))
        conv_op2 = C.convolution(conv_map2,
                                 maxpool_op1,
                                 auto_padding=(False, True, True))
        relu_op2 = C.relu(conv_op2)
        root_node = C.pooling(relu_op2, C.MAX_POOLING, (2, 2), (2, 2))

        filename = os.path.join(str(tmpdir), R'conv_with_freedim.onnx')
        root_node.save(filename, format=C.ModelFormat.ONNX)

        loaded_node = C.Function.load(filename, format=C.ModelFormat.ONNX)
        assert root_node.shape == loaded_node.shape

        x_ = loaded_node.arguments[0]
        assert np.allclose(loaded_node.eval({x_: img}, device=device),
                           root_node.eval({x: img}, device=device))

        # Additional test to ensure that loaded_node can be saved as both ONNX and CNTKv2 again.
        filename2 = os.path.join(str(tmpdir), R'conv_with_freedim2.onnx')
        loaded_node.save(filename2, format=C.ModelFormat.ONNX)

        filename3 = os.path.join(str(tmpdir), R'conv_with_freedim2.cntkmodel')
        loaded_node.save(filename3, format=C.ModelFormat.CNTKv2)
Example #11
0
def test_MaxPool(tmpdir, dtype, device_id):
    if device_id == -1 and dtype == np.float16:
        pytest.skip('Test is skipped on CPU with float16 data')
    device = cntk_device(device_id)
    with C.default_options(dtype=dtype):
        img = np.reshape(np.arange(16, dtype=dtype), [1, 4, 4])
        x = C.input_variable(img.shape)
        model = C.pooling(x, C.MAX_POOLING, (2, 2), (3, 3))
        verify_one_input(model, img, tmpdir, 'MaxPool_1', device)
Example #12
0
def test_MaxPool(tmpdir, dtype, device_id):
    if device_id == -1 and dtype == np.float16:
        pytest.skip('Test is skipped on CPU with float16 data')
    device = cntk_device(device_id)
    with C.default_options(dtype=dtype):
        img = np.reshape(np.arange(16, dtype = dtype), [1, 4, 4])
        x = C.input_variable(img.shape)
        model = C.pooling(x, C.MAX_POOLING, (2,2), (3,3))
        verify_one_input(model, img, tmpdir, 'MaxPool_1', device)
Example #13
0
def test_free_static_pooling(input_shape, pooling_type, window_shape, strides,
                             expected):
    img = np.reshape(np.arange(16, dtype=np.float32), [1, 4, 4])
    x = C.input_variable(input_shape)
    avg_pooling = C.pooling(x, pooling_type, window_shape, strides)
    assert avg_pooling.shape == (C.FreeDimension, C.FreeDimension,
                                 C.FreeDimension)
    assert np.allclose(avg_pooling.eval({x: [img]}),
                       np.asarray(expected, dtype=np.float32))
Example #14
0
def test_op_max_unpooling(input_size, pooling_window, strides, autopad, result,
                          device_id, precision):
    dt = PRECISION_TO_TYPE[precision]

    # fill input operand with a sequence 1,2,3,... til total size and then
    # resize to input_size
    total_size = np.prod(input_size)
    x = np.arange(1, total_size + 1, 1, dtype=dt)
    input_operand = x.reshape(input_size)

    a = C.sequence.input_variable(shape=input_operand.shape[2:],
                                  dtype=sanitize_dtype_cntk(precision),
                                  needs_gradient=True,
                                  name='a')

    pooling_result = np.asarray(result, dtype=dt)
    max_elements = pooling_result.reshape(pooling_result.size).tolist()

    # place 1.0s where maximum elements are
    backward = np.zeros_like(input_operand)
    for element in max_elements:
        backward += np.asarray(input_operand == element)

    from cntk import pooling, unpooling
    p = pooling(a, MAX_POOLING, pooling_window, strides, autopad)
    u = unpooling(p, a, MAX_UNPOOLING, pooling_window, strides, autopad)
    q = pooling(u, MAX_POOLING, pooling_window, strides, autopad)

    forward_input = {a: input_operand}

    expected_forward = backward * input_operand
    expected_backward = {a: backward}

    unittest_helper(u,
                    forward_input,
                    expected_forward,
                    expected_backward,
                    device_id=device_id,
                    precision=precision)
    assert np.allclose(p.eval(forward_input), q.eval(forward_input))
Example #15
0
def test_avg_pooling_free_static_axes(warmup_input_size, second_input_size, pooling_window, strides, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]
    dev = cntk_device(device_id)

    # Exercise operation twice - once with warmup input, second time to get the test output.
    x = C.input_variable((warmup_input_size[0:2]+ tuple([C.FreeDimension]*(len(warmup_input_size)-2))))
    y = C.pooling(x, C.AVG_POOLING, pooling_window, strides)
    
    x_data_warmup = np.arange(np.prod(warmup_input_size), dtype=dt)
    x_data_warmup = x_data_warmup.reshape(warmup_input_size)
    output_warmup = y.eval({x:x_data_warmup}, device=dev)
    
    x_data_test = np.arange(np.prod(second_input_size), dtype=dt)
    x_data_test = x_data_test.reshape(second_input_size)
    output_test = y.eval({x:x_data_test}, device=dev)
    
    # Generate reference output using fixed axes.
    x_ref = C.input_variable(second_input_size)
    y_ref = C.pooling(x_ref, C.AVG_POOLING, pooling_window, strides)
    output_ref = y_ref.eval({x_ref:x_data_test}, device=dev)

    assert np.allclose(output_test, output_ref, atol=1e-4)
Example #16
0
def test_conv_with_freedim_model(tmpdir, dtype, device_id):
    if device_id == -1 and dtype == np.float16:
        pytest.skip('Test only runs on GPU')
    device = cntk_device(device_id)
    with C.default_options(dtype=dtype):
        img_shape = (3, 32, 32)
        img = np.asarray(np.random.uniform(-1, 1, img_shape), dtype=dtype)

        x = C.input_variable((3, C.FreeDimension, C.FreeDimension))

        conv_size1 = (32, 3, 5, 5)
        conv_map1 = C.constant(value=np.arange(np.prod(conv_size1), dtype=dtype).reshape(conv_size1))
        conv_op1 = C.convolution(conv_map1, x, auto_padding=(False, True, True))
        relu_op1 = C.relu(conv_op1)
        maxpool_op1 = C.pooling(relu_op1, C.MAX_POOLING, (2, 2), (2, 2))

        conv_size2 = (64, 32, 3, 3)
        conv_map2 = C.constant(value=np.arange(np.prod(conv_size2), dtype=dtype).reshape(conv_size2))
        conv_op2 = C.convolution(conv_map2, maxpool_op1, auto_padding=(False, True, True))
        relu_op2 = C.relu(conv_op2)
        root_node = C.pooling(relu_op2, C.MAX_POOLING, (2, 2), (2, 2))

        filename = os.path.join(str(tmpdir), R'conv_with_freedim.onnx')
        root_node.save(filename, format=C.ModelFormat.ONNX)

        loaded_node = C.Function.load(filename, format=C.ModelFormat.ONNX)
        assert root_node.shape == loaded_node.shape

        x_ = loaded_node.arguments[0]
        assert np.allclose(loaded_node.eval({x_:img}, device=device), root_node.eval({x:img}, device=device))

        # Additional test to ensure that loaded_node can be saved as both ONNX and CNTKv2 again.
        filename2 = os.path.join(str(tmpdir), R'conv_with_freedim2.onnx')
        loaded_node.save(filename2, format=C.ModelFormat.ONNX)

        filename3 = os.path.join(str(tmpdir), R'conv_with_freedim2.cntkmodel')
        loaded_node.save(filename3, format=C.ModelFormat.CNTKv2)
Example #17
0
def test_op_max_unpooling(input_size, pooling_window, strides, autopad, result, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]


    # fill input operand with a sequence 1,2,3,... til total size and then
    # resize to input_size
    total_size = np.prod(input_size)
    x = np.arange(1, total_size + 1, 1, dtype=dt)
    input_operand = x.reshape(input_size)

    a = C.sequence.input_variable(shape=input_operand.shape[2:],
                         dtype=sanitize_dtype_cntk(precision),
                         needs_gradient=True,
                         name='a')

    pooling_result = np.asarray(result, dtype=dt)
    max_elements = pooling_result.reshape(pooling_result.size).tolist()

    # place 1.0s where maximum elements are
    backward = np.zeros_like(input_operand)
    for element in max_elements:
        backward += np.asarray(input_operand == element)

    from cntk import pooling, unpooling
    p = pooling(a, MAX_POOLING, pooling_window, strides, autopad)
    u = unpooling(p, a, MAX_UNPOOLING, pooling_window, strides, autopad)
    q = pooling(u, MAX_POOLING, pooling_window, strides, autopad)

    forward_input = {a: input_operand}

    expected_forward = backward * input_operand
    expected_backward = {a: backward}

    unittest_helper(u,
                forward_input, expected_forward, expected_backward,
                device_id=device_id, precision=precision)
    assert np.allclose(p.eval(forward_input), q.eval(forward_input))
Example #18
0
def test_pooling(tmpdir, auto_padding, pooling_type):
    img_shape = (1, 5, 5)
    img = np.asarray(np.random.uniform(-1, 1, img_shape), dtype=np.float32)

    x = C.input_variable(img.shape)    
    pool_type = C.MAX_POOLING if pooling_type else C.AVG_POOLING
    root_node = C.pooling(x, pool_type, (2, 2), auto_padding=auto_padding)

    filename = os.path.join(str(tmpdir), R'conv.onnx')
    root_node.save(filename, format=C.ModelFormat.ONNX)

    loaded_node = C.Function.load(filename, format=C.ModelFormat.ONNX)
    assert root_node.shape == loaded_node.shape

    x_ = loaded_node.arguments[0]
    assert np.allclose(loaded_node.eval({x_:[img]}), root_node.eval({x:[img]}))
Example #19
0
def test_pooling(tmpdir, auto_padding, pooling_type):
    img_shape = (1, 5, 5)
    img = np.asarray(np.random.uniform(-1, 1, img_shape), dtype=np.float32)

    x = C.input_variable(img.shape)    
    pool_type = C.MAX_POOLING if pooling_type else C.AVG_POOLING
    root_node = C.pooling(x, pool_type, (2, 2), auto_padding=auto_padding)

    filename = os.path.join(str(tmpdir), R'conv.onnx')
    root_node.save(filename, format=C.ModelFormat.ONNX)

    loaded_node = C.Function.load(filename, format=C.ModelFormat.ONNX)
    assert root_node.shape == loaded_node.shape

    x_ = loaded_node.arguments[0]
    assert np.allclose(loaded_node.eval({x_:[img]}), root_node.eval({x:[img]}))
Example #20
0
def test_pooling(tmpdir, auto_padding, pooling_type, dtype, device_id):
    if device_id == -1 and dtype == np.float16:
        pytest.skip('Test only runs on GPU')
    device = cntk_device(device_id)
    with C.default_options(dtype=dtype):
        img_shape = (1, 5, 5)
        img = np.asarray(np.random.uniform(-1, 1, img_shape), dtype=dtype)

        x = C.input_variable(img.shape)    
        pool_type = C.MAX_POOLING if pooling_type else C.AVG_POOLING
        root_node = C.pooling(x, pool_type, (2, 2), auto_padding=auto_padding)

        filename = os.path.join(str(tmpdir), R'conv.onnx')
        root_node.save(filename, format=C.ModelFormat.ONNX)

        loaded_node = C.Function.load(filename, format=C.ModelFormat.ONNX)
        assert root_node.shape == loaded_node.shape

        x_ = loaded_node.arguments[0]
        assert np.allclose(loaded_node.eval({x_:[img]}, device=device), root_node.eval({x:[img]}, device=device))
Example #21
0
def test_pooling(tmpdir, auto_padding, pooling_type, dtype, device_id):
    if device_id == -1 and dtype == np.float16:
        pytest.skip('Test only runs on GPU')
    device = cntk_device(device_id)
    with C.default_options(dtype=dtype):
        img_shape = (1, 5, 5)
        img = np.asarray(np.random.uniform(-1, 1, img_shape), dtype=dtype)

        x = C.input_variable(img.shape)
        pool_type = C.MAX_POOLING if pooling_type else C.AVG_POOLING
        root_node = C.pooling(x, pool_type, (2, 2), auto_padding=auto_padding)

        filename = os.path.join(str(tmpdir), R'conv.onnx')
        root_node.save(filename, format=C.ModelFormat.ONNX)

        loaded_node = C.Function.load(filename, format=C.ModelFormat.ONNX)
        assert root_node.shape == loaded_node.shape

        x_ = loaded_node.arguments[0]
        assert np.allclose(loaded_node.eval({x_: [img]}, device=device),
                           root_node.eval({x: [img]}, device=device))
Example #22
0
def test_op_average_pooling_include_pad(input_size, pooling_window, strides, result, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]

    total_size = np.prod(input_size)
    x = np.arange(1, total_size + 1, 1, dtype=dt)
    input_operand = x.reshape(input_size)

    a = C.input_variable(shape=input_operand.shape[1:], dtype=sanitize_dtype_cntk(precision), needs_gradient=True, name='a')

    backward = (1 / np.prod(pooling_window)) * np.ones_like(input_operand)

    from cntk import pooling
    input_op = pooling(a, AVG_POOLING, pooling_window, strides, auto_padding=[True], include_pad=True)

    forward_input = {a: input_operand}

    expected_forward = AA(result)
    expected_backward = {a: backward}

    unittest_helper(input_op, forward_input, expected_forward, expected_backward,
                    device_id=device_id, precision=precision)
Example #23
0
def pooling(operand, pooling_type, pooling_window_shape, strides=(1,), auto_padding=[False], 
            lower_pad=(0,), upper_pad=(0,), name=''):
    '''
    TODO: 
    Args:                
        operand:
        pooling_type:   
        pooling_window_shape:
        strides:
        auto_padding:
        lower_pad:
        upper_pad:
        name (str): the name of the node in the network
    Returns:
        :class:`cntk.Function`
    '''
    from cntk import pooling
    operand = sanitize_input(operand)
    pooling_window_shape = sanitize_shape(pooling_window_shape)
    strides = sanitize_shape(strides)
    lower_pad = sanitize_shape(lower_pad)
    upper_pad = sanitize_shape(upper_pad)
    return pooling(operand, pooling_type, pooling_window_shape, strides, auto_padding,
                   lower_pad, upper_pad, name).output()
Example #24
0
def pooling(operand, pooling_type, pooling_window_shape, strides=(1,), auto_padding=[False], 
            lower_pad=(0,), upper_pad=(0,), name=''):
    '''
    TODO: 
    Args:                
        operand:
        pooling_type:   
        pooling_window_shape:
        strides:
        auto_padding:
        lower_pad:
        upper_pad:
        name (str): the name of the node in the network
    Returns:
        :class:`cntk.Function`
    '''
    from cntk import pooling
    operand = sanitize_input(operand)
    pooling_window_shape = sanitize_shape(pooling_window_shape)
    strides = sanitize_shape(strides)
    lower_pad = sanitize_shape(lower_pad)
    upper_pad = sanitize_shape(upper_pad)
    return pooling(operand, pooling_type, pooling_window_shape, strides, auto_padding,
                   lower_pad, upper_pad, name).output()
Example #25
0
def test_free_static_pooling(input_shape, pooling_type, window_shape, strides, expected):
    img = np.reshape(np.arange(16, dtype=np.float32), [1, 4, 4])
    x = C.input_variable(input_shape)
    avg_pooling = C.pooling(x, pooling_type, window_shape, strides)
    assert avg_pooling.shape == (C.FreeDimension, C.FreeDimension, C.FreeDimension)
    assert np.allclose(avg_pooling.eval({x:[img]}), np.asarray(expected, dtype=np.float32))
Example #26
0
def test_MaxPool(tmpdir):
    pytest.skip('Need to support new ONNX spec.')
    img = np.reshape(np.arange(16, dtype = np.float32), [1, 4, 4])
    x = C.input_variable(img.shape)
    model = C.pooling(x, C.MAX_POOLING, (2,2), (3,3))
    verify_one_input(model, img, tmpdir, 'MaxPool_1')
Example #27
0
def test_AveragePool(tmpdir):
    img = np.reshape(np.arange(16, dtype=np.float32), [1, 4, 4])
    x = C.input_variable(img.shape)
    model = C.pooling(x, C.AVG_POOLING, (2, 2), (2, 2))

    verify_one_input(model, img, tmpdir, 'AveragePool')
Example #28
0
def test_MaxPool(tmpdir):
    img = np.reshape(np.arange(16, dtype=np.float32), [1, 4, 4])
    x = C.input_variable(img.shape)
    model = C.pooling(x, C.MAX_POOLING, (2, 2), (3, 3))
    verify_one_input(model, img, tmpdir, 'MaxPool_1')
Example #29
0
 def pooling_unpooling(x):
     y = pooling(x, C.AVG_POOLING, (2,2), (2,2), auto_padding=[True])
     return unpooling(y, x, C.MAX_UNPOOLING, (2,2), (2,2), auto_padding=[True])
Example #30
0
def test_MaxPool(tmpdir):
    img = np.reshape(np.arange(16, dtype = np.float32), [1, 4, 4])
    x = C.input_variable(img.shape)
    model = C.pooling(x, C.MAX_POOLING, (2,2), (3,3))
    verify_one_input(model, img, tmpdir, 'MaxPool_1')
def vggpool(x):
    return C.pooling(x, C.AVG_POOLING, (2, 2), (2, 2))
Example #32
0
def test_AveragePool(tmpdir):
    img = np.reshape(np.arange(16, dtype = np.float32), [1, 4, 4])
    x = C.input_variable(img.shape)
    model = C.pooling(x, C.AVG_POOLING, (2,2), (2,2))

    verify_one_input(model, img, tmpdir, 'AveragePool')