Ejemplo n.º 1
0
def test_convolution_attributes():
    x = C.input((1, 5, 5))
    filter = np.reshape(np.array([2, -1, -1, 2], dtype=np.float32), (1, 2, 2))
    kernel = C.constant(value=filter)
    f = C.convolution(kernel, x, auto_padding=[False])
    d = f.root_function.attributes
    expected = {
        'autoPadding': [False, False, False],
        'sharing': [True, True, True],
        'strides': (1, 1, 1),
        'maxTempMemSizeInSamples': 0,
        'upperPad': (0, 0, 0),
        'lowerPad': (0, 0, 0),
        'transpose': False,
        'outputShape': (0, )
    }
    _check(expected, d)

    f = C.convolution(kernel, x, auto_padding=[False, True])
    d = f.root_function.attributes
    expected = {
        'autoPadding': [False, False, True],
        'sharing': [True, True, True],
        'strides': (1, 1, 1),
        'maxTempMemSizeInSamples': 0,
        'upperPad': (0, 0, 0),
        'lowerPad': (0, 0, 0),
        'transpose': False,
        'outputShape': (0, )
    }
    _check(expected, d)
Ejemplo n.º 2
0
def test_conv_free_static_axes(warmup_input_size, free_dimension_increment, filter_size, num_output_channels, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]
    dev = cntk_device(device_id)

    conv_size = tuple([num_output_channels, warmup_input_size[0]]+filter_size)
    total_size = np.prod(conv_size)
    y = np.arange(total_size, dtype=dt)
    conv_map = constant(value=y.reshape(conv_size), device=dev)

    reference_input_size = tuple(warmup_input_size[:-len(free_dimension_increment)] +
                           [x+y for x,y in zip(warmup_input_size[-len(free_dimension_increment):], free_dimension_increment)])

    a_ref = C.input_variable(shape=reference_input_size,
                dtype=dt,
                needs_gradient=False,
                name='a_ref')
    a_test = C.input_variable(shape=tuple(warmup_input_size[:-len(free_dimension_increment)] + [C.FreeDimension]*len(free_dimension_increment)),
                dtype=dt,
                needs_gradient=False,
                name='a_test')

    from cntk import convolution
    conv_op_without_free_dim = convolution(conv_map, a_ref, auto_padding=[False] + [True]*len(filter_size))
    conv_op_with_free_dim = convolution(conv_map, a_test, auto_padding=[False] + [True]*len(filter_size))

    input_img_ref = np.ones(reference_input_size, dtype=dt)
    output_ref = conv_op_without_free_dim.eval({a_ref: input_img_ref}, device=dev)

    input_img_warmup = np.ones(warmup_input_size, dtype=dt)
    _ = conv_op_with_free_dim.eval({a_test: input_img_warmup}, device=dev)
        
    output_test = conv_op_with_free_dim.eval({a_test: input_img_ref}, device=dev)

    assert np.allclose(output_test, output_ref, atol = 1e-4)
Ejemplo n.º 3
0
def test_convolution_attributes():
    x = C.input_variable( (1, 5, 5) )
    filter = np.reshape(np.array([2, -1, -1, 2], dtype = np.float32), (1, 2, 2))
    kernel = C.constant(value = filter)
    f = C.convolution(kernel , x, auto_padding = [False])
    d = f.root_function.attributes
    expected = {'autoPadding': [False, False, False], 
        'sharing': [True, True, True], 
        'strides': (1, 1, 1), 
        'maxTempMemSizeInSamples': 0, 
        'upperPad': (0, 0, 0), 
        'lowerPad': (0, 0, 0),
        'transpose': False,
        'outputShape': (0,)
        }
    _check(expected, d)

    f = C.convolution(kernel , x, auto_padding = [False, True])
    d = f.root_function.attributes
    expected = {'autoPadding': [False, False, True], 
        'sharing': [True, True, True], 
        'strides': (1, 1, 1), 
        'maxTempMemSizeInSamples': 0, 
        'upperPad': (0, 0, 0), 
        'lowerPad': (0, 0, 0),
        'transpose': False,
        'outputShape': (0,)
        }
    _check(expected, d)
Ejemplo n.º 4
0
def test_conv_free_static_with_sequence_unpack(num_features, sequence_len,
                                               filter_size,
                                               num_output_channels, batch_size,
                                               device_id, precision):
    dt = PRECISION_TO_TYPE[precision]
    dev = cntk_device(device_id)

    x_ref = C.input_variable((1, sequence_len, num_features), dtype=dt)
    conv_map_ref = C.constant(np.random.randn(num_output_channels, 1,
                                              filter_size[0],
                                              filter_size[1]).astype(dt),
                              device=dev)
    w2_ref = C.convolution(conv_map_ref, x_ref, auto_padding=[False])
    x0_ref = np.arange(batch_size * 1 * sequence_len *
                       num_features).astype(dt).reshape(
                           batch_size, 1, sequence_len, num_features)
    output_ref = w2_ref.eval({x_ref: x0_ref}, device=dev)

    x_test = C.sequence.input_variable(num_features, dtype=dt)
    y_test, mask_test = C.sequence.unpack(x_test, 0).outputs
    z_test = C.reshape(y_test, (1, ), 0, 0)
    w2_test = C.convolution(conv_map_ref, z_test, auto_padding=[False])
    output_test = w2_test.eval({x_test: np.squeeze(x0_ref)}, device=dev)

    assert np.allclose(output_test, output_ref, atol=1e-4)
Ejemplo n.º 5
0
def total_variation_loss(x):
    xx = C.reshape(x, (1,)+x.shape)
    delta = np.array([-1, 1], dtype=np.float32)
    kh = C.constant(value=delta.reshape(1, 1, 1, 1, 2))
    kv = C.constant(value=delta.reshape(1, 1, 1, 2, 1))
    dh = C.convolution(kh, xx, auto_padding=[False])
    dv = C.convolution(kv, xx, auto_padding=[False])
    avg = 0.5 * (C.reduce_mean(C.square(dv)) + C.reduce_mean(C.square(dh)))
    return avg
Ejemplo n.º 6
0
def test_conv_free_static_axes(warmup_input_size, free_dimension_increment,
                               filter_size, num_output_channels, device_id,
                               precision):
    dt = PRECISION_TO_TYPE[precision]
    dev = cntk_device(device_id)

    np.random.seed(0)

    conv_size = tuple([num_output_channels, warmup_input_size[0]] +
                      filter_size)
    total_size = np.prod(conv_size)
    y = np.arange(total_size, dtype=dt)
    conv_map = constant(value=y.reshape(conv_size), device=dev)

    reference_input_size = tuple(
        warmup_input_size[:-len(free_dimension_increment)] + [
            x + y
            for x, y in zip(warmup_input_size[-len(free_dimension_increment):],
                            free_dimension_increment)
        ])

    a_ref = C.input_variable(shape=reference_input_size,
                             dtype=dt,
                             needs_gradient=False,
                             name='a_ref')
    a_test = C.input_variable(
        shape=tuple(warmup_input_size[:-len(free_dimension_increment)] +
                    [C.FreeDimension] * len(free_dimension_increment)),
        dtype=dt,
        needs_gradient=False,
        name='a_test')

    from cntk import convolution
    conv_op_without_free_dim = convolution(conv_map,
                                           a_ref,
                                           auto_padding=[False] +
                                           [True] * len(filter_size))
    conv_op_with_free_dim = convolution(conv_map,
                                        a_test,
                                        auto_padding=[False] +
                                        [True] * len(filter_size))

    input_img_ref = np.ones(reference_input_size, dtype=dt)
    output_ref = conv_op_without_free_dim.eval({a_ref: input_img_ref},
                                               device=dev)

    input_img_warmup = np.ones(warmup_input_size, dtype=dt)
    _ = conv_op_with_free_dim.eval({a_test: input_img_warmup}, device=dev)

    output_test = conv_op_with_free_dim.eval({a_test: input_img_ref},
                                             device=dev)

    assert np.allclose(output_test, output_ref, atol=1e-4)
Ejemplo n.º 7
0
def test_conv_pooling_free_static_and_dynamic_axes(warmup_input_size, free_dimension_increment, filter_size, num_output_channels, batch_size_range, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]
    dev = cntk_device(device_id)

    np.random.seed(0)

    conv_size = tuple([num_output_channels, warmup_input_size[0]]+filter_size)
    total_size = np.prod(conv_size)
    y = np.arange(total_size, dtype=dt)
    conv_map = constant(value=y.reshape(conv_size), device=dev)

    warmup_batchsize = np.random.randint(batch_size_range[0],batch_size_range[1])
    ref_batchsize = np.random.randint(batch_size_range[0],batch_size_range[1])

    reference_input_size = tuple(warmup_input_size[:-len(free_dimension_increment)] +
                           [x+y for x,y in zip(warmup_input_size[-len(free_dimension_increment):], free_dimension_increment)])

    a_ref = C.sequence.input_variable(shape=reference_input_size,
                dtype=dt,
                needs_gradient=False,
                sequence_axis=C.Axis.new_unique_dynamic_axis('c'))
    a_test = C.sequence.input_variable(shape=tuple(warmup_input_size[:-len(free_dimension_increment)] + [C.FreeDimension]*len(free_dimension_increment)),
                dtype=dt,
                needs_gradient=False,
                sequence_axis=C.Axis.new_unique_dynamic_axis('c'))

    from cntk import convolution, pooling, unpooling

    def pooling_unpooling(x):
        y = pooling(x, C.AVG_POOLING, (2,2), (2,2), auto_padding=[True])
        return unpooling(y, x, C.MAX_UNPOOLING, (2,2), (2,2), auto_padding=[True])

    conv_ops = [ [convolution(conv_map, a_ref, auto_padding=[False] + [True]*len(filter_size)),
                  convolution(conv_map, a_test, auto_padding=[False] + [True]*len(filter_size))],
                 [pooling_unpooling(a_ref),
                  pooling_unpooling(a_test)] ]

    for op_pair in conv_ops:
        conv_op_without_free_dim, conv_op_with_free_dim = op_pair
        input_img_ref = np.random.random((ref_batchsize,) + reference_input_size).astype(dt)
        output_ref = conv_op_without_free_dim.eval({a_ref: input_img_ref}, device=dev)

        input_img_warmup = np.random.random((warmup_batchsize,) + tuple(warmup_input_size)).astype(dt)
        _ = conv_op_with_free_dim.eval({a_test: input_img_warmup}, device=dev)

        output_test = conv_op_with_free_dim.eval({a_test: input_img_ref}, device=dev)

        assert np.allclose(output_test, output_ref, atol = 1e-4)
Ejemplo n.º 8
0
def test_asym_convolution(input_size, conv_size, result, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]
    dev = cntk_device(device_id)

    # fill input operand with a sequence 1,2,3,... til total size and then
    # resize to input_size
    total_size = np.prod(input_size)
    x = np.arange(total_size, dtype=dt)
    input_operand = x.reshape(input_size)

    a = C.input_variable(shape=input_operand.shape[1:],
                dtype=sanitize_dtype_cntk(precision),
                needs_gradient=False,
                name='a')

    # do the same for convolution kernel
    total_size = np.prod(conv_size)
    y = np.arange(total_size, dtype=dt)
    conv_map = constant(value=y.reshape(conv_size), device=dev)

    from cntk import convolution
    input_op = convolution(conv_map, a, auto_padding=[True])

    forward_input = {a: input_operand}
    expected_forward = AA(result)

    unittest_helper(input_op, forward_input, expected_forward,
                    None, device_id=device_id, precision=precision)
Ejemplo n.º 9
0
def test_asym_convolution(input_size, conv_size, result, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]
    dev = cntk_device(device_id)

    # fill input operand with a sequence 1,2,3,... til total size and then
    # resize to input_size
    total_size = np.prod(input_size)
    x = np.arange(total_size, dtype=dt)
    input_operand = x.reshape(input_size)

    a = C.input_variable(shape=input_operand.shape[1:],
                         dtype=sanitize_dtype_cntk(precision),
                         needs_gradient=False,
                         name='a')

    # do the same for convolution kernel
    total_size = np.prod(conv_size)
    y = np.arange(total_size, dtype=dt)
    conv_map = constant(value=y.reshape(conv_size), device=dev)

    from cntk import convolution
    input_op = convolution(conv_map, a, auto_padding=[True])

    forward_input = {a: input_operand}
    expected_forward = AA(result)

    unittest_helper(input_op,
                    forward_input,
                    expected_forward,
                    None,
                    device_id=device_id,
                    precision=precision)
def BinaryConvolution(operand,
                      filter_shape,
                      num_filters=1,
                      channels = 1,
                      init=C.glorot_uniform(),
                      pad=False,
                      strides=1,
                      bias=True,
                      init_bias=0,
                      op_name='BinaryConvolution', name=''):
    """ arguments:
            operand: tensor to convolve
            filter_shape: tuple indicating filter size
            num_filters: number of filters to use 
            channels: number of incoming channels
            init: type of initialization to use for weights
    """
    kernel_shape = (num_filters, channels) + filter_shape
    W = C.parameter(shape=kernel_shape, init=init, name="filter")

    binary_convolve_operand_p = C.placeholder(operand.shape, operand.dynamic_axes, name="operand")
    binary_convolve = C.convolution(CustomMultibit(W, 1), CustomMultibit(binary_convolve_operand_p, 1), auto_padding=[False, pad, pad], strides=[strides])
    r = C.as_block(binary_convolve, [(binary_convolve_operand_p, operand)], 'binary_convolve')

    bias_shape = (num_filters, 1, 1)
    b = C.parameter(shape=bias_shape, init=init_bias, name="bias")
    r = r + b

    # apply learnable param relu
    P = C.parameter(shape=r.shape, init=init, name="prelu")
    r = C.param_relu(P, r)
    return r
Ejemplo n.º 11
0
def test_op_convolution_without_padding(convolution_map, convolution_input, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]

    conv_map = AA(convolution_map, dtype=dt)
    conv_input = AA(convolution_input, dtype=dt)

    conv_input.shape = (1,1) + conv_input.shape # adding batch and channel axis
    conv_map.shape = (1,1) + conv_map.shape

    flipped_conv_map = conv_map[...,::-1,::-1]

    from scipy import signal
    expected_forward = AA([[signal.convolve(flipped_conv_map, conv_input, mode='valid')]])

    backward = AA([[conv_map]])

    a = I(shape=conv_input.shape,
        data_type=sanitize_dtype_cntk(precision),
        needs_gradient=True,
        name='a')

    constant_map = constant(value=conv_map)

    from cntk import convolution
    input_op = convolution(constant_map, a, auto_padding=[False])

    conv_input.shape = (1, 1) + conv_input.shape
    forward_input = {a: conv_input}
    expected_backward = {a: backward}

    unittest_helper(input_op,
                    forward_input, expected_forward, expected_backward,
                    device_id=device_id, precision=precision)
Ejemplo n.º 12
0
def test_op_convolution_with_padding(convolution_map, convolution_input,
                                     forward, backward, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]

    conv_map = AA(convolution_map, dtype=dt)
    conv_input = AA(convolution_input, dtype=dt)

    # adding batch and channel axis
    conv_input.shape = (1, 1) + conv_input.shape
    conv_map.shape = (1, 1) + conv_map.shape

    expected_forward = AA([[forward]], dtype=dt)

    a = I(shape=conv_input.shape,
          dtype=sanitize_dtype_cntk(precision),
          needs_gradient=True,
          name='a')

    constant_map = constant(value=conv_map)

    from cntk import convolution
    input_op = convolution(constant_map, a, auto_padding=[True])

    forward_input = {a: conv_input}
    expected_backward = {a: AA([[backward]], dtype=dt)}

    unittest_helper(input_op,
                    forward_input,
                    expected_forward,
                    expected_backward,
                    device_id=device_id,
                    precision=precision)
Ejemplo n.º 13
0
def BinaryConvolution(operand,
                      filter_shape,
                      num_filters=1,
                      channels = 1,
                      init=C.glorot_uniform(),
                      pad=False,
                      strides=1,
                      bias=True,
                      init_bias=0,
                      op_name='BinaryConvolution', name=''):
    """ arguments:
            operand: tensor to convolve
            filter_shape: tuple indicating filter size
            num_filters: number of filters to use 
            channels: number of incoming channels
            init: type of initialization to use for weights
    """
    kernel_shape = (num_filters, channels) + filter_shape
    W = C.parameter(shape=kernel_shape, init=init, name="filter")

    binary_convolve_operand_p = C.placeholder(operand.shape, operand.dynamic_axes, name="operand")
    binary_convolve = C.convolution(CustomMultibit(W, 1), CustomMultibit(binary_convolve_operand_p, 1), auto_padding=[False, pad, pad], strides=[strides])
    r = C.as_block(binary_convolve, [(binary_convolve_operand_p, operand)], 'binary_convolve')

    bias_shape = (num_filters, 1, 1)
    b = C.parameter(shape=bias_shape, init=init_bias, name="bias")
    r = r + b

    # apply learnable param relu
    P = C.parameter(shape=r.shape, init=init, name="prelu")
    r = C.param_relu(P, r)
    return r
Ejemplo n.º 14
0
def test_op_convolution_without_padding(convolution_map, convolution_input, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]

    conv_map = AA(convolution_map, dtype=dt)
    conv_input = AA(convolution_input, dtype=dt)

    flipped_conv_map = conv_map[..., ::-1, ::-1]

    from scipy import signal
    expected_forward = AA(
        [[signal.convolve(flipped_conv_map, conv_input, mode='valid')]])

    backward = AA(conv_map)

    a = I(shape=conv_input.shape,
        dtype=sanitize_dtype_cntk(precision),
        needs_gradient=True,
        name='a')

    conv_input.shape = (1,1) + conv_input.shape # adding batch and channel axis
    conv_map.shape = (1,1) + conv_map.shape

    constant_map = constant(value=conv_map)

    from cntk import convolution
    input_op = convolution(constant_map, a, auto_padding=[False])

    forward_input = {a: conv_input}
    expected_backward = {a: backward}

    unittest_helper(input_op, forward_input, expected_forward,
                    expected_backward, device_id=device_id, precision=precision)
Ejemplo n.º 15
0
def vggblock(x, arrays, layer_map, name):
    f = arrays[0]
    b = arrays[1]
    k = C.constant(value=f)
    t = C.constant(value=np.reshape(b, (-1, 1, 1)))
    y = C.relu(C.convolution(k, x, auto_padding=[False, True, True]) + t)
    layer_map[name] = y
    return y
def test_conv_with_freedim_model(tmpdir, dtype, device_id):
    pytest.skip('Needs to be fixed after removal of batch axis change.')
    if device_id == -1 and dtype == np.float16:
        pytest.skip('Test only runs on GPU')
    device = cntk_device(device_id)
    with C.default_options(dtype=dtype):
        img_shape = (3, 32, 32)
        img = np.asarray(np.random.uniform(-1, 1, img_shape), dtype=dtype)

        x = C.input_variable((3, C.FreeDimension, C.FreeDimension))

        conv_size1 = (32, 3, 5, 5)
        conv_map1 = C.constant(value=np.arange(
            np.prod(conv_size1), dtype=dtype).reshape(conv_size1))
        conv_op1 = C.convolution(conv_map1,
                                 x,
                                 auto_padding=(False, True, True))
        relu_op1 = C.relu(conv_op1)
        maxpool_op1 = C.pooling(relu_op1, C.MAX_POOLING, (2, 2), (2, 2))

        conv_size2 = (64, 32, 3, 3)
        conv_map2 = C.constant(value=np.arange(
            np.prod(conv_size2), dtype=dtype).reshape(conv_size2))
        conv_op2 = C.convolution(conv_map2,
                                 maxpool_op1,
                                 auto_padding=(False, True, True))
        relu_op2 = C.relu(conv_op2)
        root_node = C.pooling(relu_op2, C.MAX_POOLING, (2, 2), (2, 2))

        filename = os.path.join(str(tmpdir), R'conv_with_freedim.onnx')
        root_node.save(filename, format=C.ModelFormat.ONNX)

        loaded_node = C.Function.load(filename, format=C.ModelFormat.ONNX)
        assert root_node.shape == loaded_node.shape

        x_ = loaded_node.arguments[0]
        assert np.allclose(loaded_node.eval({x_: img}, device=device),
                           root_node.eval({x: img}, device=device))

        # Additional test to ensure that loaded_node can be saved as both ONNX and CNTKv2 again.
        filename2 = os.path.join(str(tmpdir), R'conv_with_freedim2.onnx')
        loaded_node.save(filename2, format=C.ModelFormat.ONNX)

        filename3 = os.path.join(str(tmpdir), R'conv_with_freedim2.cntkmodel')
        loaded_node.save(filename3, format=C.ModelFormat.CNTKv2)
Ejemplo n.º 17
0
def test_sequence_unpack_with_convolution(device_id, precision):
    x = C.sequence.input((20, 20))
    y = C.sequence.unpack(x, 0, no_mask_output=True)
    z = C.reshape(y, (3, 20, 20))
    kernel = C.constant(1.0, (4, 3, 3, 3))
    t = C.convolution(kernel, z, auto_padding=[False, True, True])
    val = np.random.random((2, 3, 20, 20)).astype(np.float32)
    result = t.eval({x: val})
    assert np.array_equal(result.shape, (2, 4, 20, 20))
Ejemplo n.º 18
0
def test_sequence_unpack_with_convolution(device_id, precision): 
    x = C.sequence.input((20, 20))
    y = C.sequence.unpack(x, 0, no_mask_output=True)
    z = C.reshape(y, (3, 20, 20))
    kernel = C.constant(1.0, (4, 3, 3, 3))
    t = C.convolution(kernel, z, auto_padding=[False, True, True])
    val = np.random.random((2, 3, 20, 20)).astype(np.float32)
    result = t.eval({x: val})
    assert np.array_equal(result.shape, (2, 4, 20, 20))
Ejemplo n.º 19
0
def test_group_conv(groups, num_output_channels, num_input_channels,
                    input_tensor_size, filter_size, batch_size, device_id,
                    precision):
    dt = PRECISION_TO_TYPE[precision]
    dev = cntk_device(device_id)

    # Generate result from CNTK API
    conv_size = tuple([num_output_channels, num_input_channels] + filter_size)
    total_size = np.prod(conv_size)
    y = np.arange(total_size, dtype=dt).reshape(conv_size)
    conv_map = C.constant(value=y, device=dev)

    input_size = (num_input_channels, ) + tuple(input_tensor_size)
    x_test = C.input_variable(input_size, dtype=dt)
    data = np.random.random((batch_size, ) + input_size).astype(dt)

    conv_op = C.convolution(conv_map,
                            x_test,
                            auto_padding=[False] + [True] * len(filter_size),
                            groups=groups)

    output_test = conv_op.eval({x_test: data}, device=dev)

    # Generate reference result. The code below simulates (actually is just another implementation in Python)
    # group convolution using multiple standard convolutions (i.e. groups = 1), to create the reference
    # output for testing the CNTK implementation against.
    num_out_channels_per_group = int(num_output_channels / groups)
    num_in_channels_per_group = int(num_input_channels / groups)
    sub_kernels_init = [
        y[i * num_out_channels_per_group:(i + 1) * num_out_channels_per_group,
          i * num_in_channels_per_group:(i + 1) * num_in_channels_per_group,
          ...] for i in range(0, groups)
    ]
    sub_kernels = [
        C.ops.constant(value=np.ascontiguousarray(sub_kernels_init[i]),
                       device=dev) for i in range(0, groups)
    ]

    x_ref = C.input_variable(input_size, dtype=dt)
    sub_data = [
        C.ops.slice(x_ref,
                    axis=0,
                    begin_index=i * num_in_channels_per_group,
                    end_index=(i + 1) * num_in_channels_per_group)
        for i in range(0, groups)
    ]
    conv_ops_per_group = [
        C.ops.convolution(group_kernel,
                          data_for_groups,
                          auto_padding=[False] + [True] * len(filter_size))
        for group_kernel, data_for_groups in zip(sub_kernels, sub_data)
    ]
    group_conv = C.ops.splice(*conv_ops_per_group, axis=0)

    output_ref = group_conv.eval({x_ref: data}, device=dev)

    assert np.allclose(output_test, output_ref, atol=1e-4)
Ejemplo n.º 20
0
def conv_from_weights(x, weights, bias=None, padding=True, name=""):
    """ weights is a numpy array """
    k = C.parameter(shape=weights.shape, init=weights)
    y = C.convolution(k, x, auto_padding=[False, padding, padding])
    if bias:
        b = C.parameter(shape=bias.shape, init=bias)
        y = y + bias
    y = C.alias(y, name=name)
    return y
Ejemplo n.º 21
0
def test_conv_free_static_with_sequence_unpack(num_features, sequence_len, filter_size, num_output_channels, batch_size, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]
    dev = cntk_device(device_id)

    x_ref = C.input_variable((1, sequence_len, num_features), dtype=dt)
    conv_map_ref = C.constant(np.random.randn(num_output_channels, 1, filter_size[0], filter_size[1]).astype(dt), device=dev)
    w2_ref = C.convolution(conv_map_ref, x_ref, auto_padding=[False])
    x0_ref = np.arange(batch_size*1*sequence_len*num_features).astype(dt).reshape(batch_size, 1, sequence_len, num_features)
    output_ref = w2_ref.eval({x_ref: x0_ref}, device=dev)


    x_test = C.sequence.input_variable(num_features, dtype=dt)
    y_test, mask_test = C.sequence.unpack(x_test, 0).outputs
    z_test = C.reshape(y_test, (1, ), 0, 0)
    w2_test = C.convolution(conv_map_ref, z_test, auto_padding=[False])
    output_test = w2_test.eval({x_test: np.squeeze(x0_ref)}, device=dev)
    
    assert np.allclose(output_test, output_ref, atol=1e-4)
Ejemplo n.º 22
0
 def __local_response_normalization(self, k, n, alpha, beta, name=''):
     x = cntk.placeholder(name='lrn_arg')
     x2 = cntk.square(x)
     x2s = cntk.reshape(x2, (1, cntk.InferredDimension), 0, 1)
     W = cntk.constant(alpha / (2 * n + 1), (1, 2 * n + 1, 1, 1), name='W')
     y = cntk.convolution(W, x2s)
     b = cntk.reshape(y, cntk.InferredDimension, 0, 2)
     den = cntk.exp(beta * cntk.log(k + b))
     apply_x = cntk.element_divide(x, den)
     return apply_x
Ejemplo n.º 23
0
def LocalResponseNormalization(k, n, alpha, beta, name=''):
    x = C.placeholder(name='lrn_arg')
    x2 = C.square(x)
    x2s = C.reshape(x2, (1, C.InferredDimension), 0, 1)
    W = C.constant(alpha / (2 * n + 1), (1, 2 * n + 1, 1, 1), name='W')
    y = C.convolution(W, x2s)
    b = C.reshape(y, C.InferredDimension, 0, 2)
    den = C.exp(beta * C.log(k + b))
    apply_x = C.element_divide(x, den)

    return apply_x
Ejemplo n.º 24
0
 def lrn(x, depth_radius, bias, alpha, beta, name=''):
     x2 = C.square(x)
     # reshape to insert a fake singleton reduction dimension after the 3th axis (channel axis). Note Python axis order and BrainScript are reversed.
     x2s = C.reshape(x2, (1, C.InferredDimension), 0, 1)
     W = C.constant(alpha/(2*depth_radius+1), shape=(1,2*depth_radius+1,1,1), dtype=dtype, name='W')
     # 3D convolution with a filter that has a non 1-size only in the 3rd axis, and does not reduce since the reduction dimension is fake and 1
     y = C.convolution (W, x2s)
     # reshape back to remove the fake singleton reduction dimension
     b = C.reshape(y, C.InferredDimension, 0, 2)
     den = C.exp(beta * C.log(bias + b))
     return C.element_divide(x, den)
Ejemplo n.º 25
0
def test_native_binary_function():
    # user functions need to be registered before being callable by python
    if not nopt.native_convolve_function_registered:
        pytest.skip("Could not find {0} library. "
                    "Please check if HALIDE_PATH is configured properly "
                    "and try building {1} again".format(
                        'Cntk.BinaryConvolution-' + C.__version__.rstrip('+'),
                        'Extnsibiliy\BinaryConvolution'))

    # be sure to only run on CPU, binary convolution does not have GPU support for now
    dev = C.cpu()
    # create an arbitrary input mimicking a realistic cifar input
    x = input((64, 28, 28))
    # random filter weights for testing
    w = parameter((64, 64, 3, 3),
                  init=np.reshape(2 * (np.random.rand(64 * 64 * 3 * 3) - .5),
                                  (64, 64, 3, 3)),
                  dtype=np.float32,
                  device=dev)

    # set the convolution parameters by passing in an attribute dictionary
    #attributes = {'stride' : 1, 'padding' : False, 'size' : 3}

    attributes = {
        'stride': 1,
        'padding': False,
        'size': 3,
        'h': 28,
        'w': 28,
        'channels': 64,
        'filters': 64
    }

    # define the binary convolution op
    op = ops.native_user_function('NativeBinaryConvolveFunction', [w, x],
                                  attributes, 'native_binary_convolve')

    # also define an op using python custom functions that should have the same output
    op2 = C.convolution(CustomMultibitKernel(w, 1),
                        CustomSign(x),
                        auto_padding=[False])
    # create random input data
    x_data = NDArrayView.from_dense(np.asarray(np.reshape(
        2 * (np.random.rand(64 * 28 * 28) - .5), (64, 28, 28)),
                                               dtype=np.float32),
                                    device=dev)
    # evaluate the CPP binary convolve
    result = op.eval({x: x_data}, device=dev)

    # evaluate the python emulator
    result2 = op2.eval({x: x_data}, device=dev)
    native_times_primitive = op.find_by_name('native_binary_convolve')
    # assert that both have the same result
    '''
Ejemplo n.º 26
0
 def lrn(x, depth_radius, bias, alpha, beta, name=''):
     x2 = C.square(x)
     # reshape to insert a fake singleton reduction dimension after the 3th axis (channel axis). Note Python axis order and BrainScript are reversed.
     x2s = C.reshape(x2, (1, C.InferredDimension), 0, 1)
     W = C.constant(alpha/(2*depth_radius+1), shape=(1,2*depth_radius+1,1,1), dtype=dtype, name='W')
     # 3D convolution with a filter that has a non 1-size only in the 3rd axis, and does not reduce since the reduction dimension is fake and 1
     y = C.convolution (W, x2s)
     # reshape back to remove the fake singleton reduction dimension
     b = C.reshape(y, C.InferredDimension, 0, 2)
     den = C.exp(beta * C.log(bias + b))
     return C.element_divide(x, den)
Ejemplo n.º 27
0
def test_sequence_unpack_with_convolution(device_id, precision):
    dt = PRECISION_TO_TYPE[precision]
    dev = cntk_device(device_id)

    x = C.sequence.input((20, 20), dtype=dt)
    y = C.sequence.unpack(x, 0, no_mask_output=True)
    z = C.reshape(y, (3, 20, 20))
    kernel = C.constant(1.0, (4, 3, 3, 3), device=dev)
    t = C.convolution(kernel, z, auto_padding=[False, True, True])
    val = np.random.random((2, 3, 20, 20)).astype(dt)
    result = t.eval({x: val}, device=dev)
    assert np.array_equal(result.shape, (2, 4, 20, 20))
Ejemplo n.º 28
0
    def convolution(operand):
        
        bcv_operand_p = C.placeholder(
            operand.shape, operand.dynamic_axes, name="operand")
        
        bcv = C.convolution(
                    CustomMultibit(W, 1), 
                    CustomMultibit(bcv_operand_p, 1), 
                    auto_padding=[False, pad, pad], 
                    strides=[strides])

        return  C.as_block(bcv, [(bcv_operand_p, operand)], name)
Ejemplo n.º 29
0
    def convolution(operand):

        bcv_operand_p = C.placeholder(operand.shape,
                                      operand.dynamic_axes,
                                      name="operand")

        bcv = C.convolution(CustomMultibit(W, 1),
                            CustomMultibit(bcv_operand_p, 1),
                            auto_padding=[False, pad, pad],
                            strides=[strides])

        return C.as_block(bcv, [(bcv_operand_p, operand)], name)
Ejemplo n.º 30
0
def test_sequence_unpack_with_convolution(device_id, precision):
    dt = PRECISION_TO_TYPE[precision]
    dev = cntk_device(device_id)

    x = C.sequence.input((20, 20), dtype=dt)
    y = C.sequence.unpack(x, 0, no_mask_output=True)
    z = C.reshape(y, (3, 20, 20))
    kernel = C.constant(1.0, (4, 3, 3, 3), device=dev)
    t = C.convolution(kernel, z, auto_padding=[False, True, True])
    val = np.random.random((2, 3, 20, 20)).astype(dt)
    result = t.eval({x: val}, device=dev)
    assert np.array_equal(result.shape, (2, 4, 20, 20))
Ejemplo n.º 31
0
def LocalResponseNormalization(k, n, alpha, beta, name=''):
    x = C.placeholder(name='lrn_arg')
    x2 = C.square(x)
    # reshape to insert a fake singleton reduction dimension after the 3th axis (channel axis). Note Python axis order and BrainScript are reversed.
    x2s = C.reshape(x2, (1, C.InferredDimension), 0, 1)
    W = C.constant(alpha/(2*n+1), (1,2*n+1,1,1), name='W')
    # 3D convolution with a filter that has a non 1-size only in the 3rd axis, and does not reduce since the reduction dimension is fake and 1
    y = C.convolution (W, x2s)
    # reshape back to remove the fake singleton reduction dimension
    b = C.reshape(y, C.InferredDimension, 0, 2)
    den = C.exp(beta * C.log(k + b))
    apply_x = C.element_divide(x, den)
    return apply_x
Ejemplo n.º 32
0
def LocalResponseNormalization(k, n, alpha, beta, name=''):
    x = C.placeholder(name='lrn_arg')
    x2 = C.square(x)
    # reshape to insert a fake singleton reduction dimension after the 3th axis (channel axis). Note Python axis order and BrainScript are reversed.
    x2s = C.reshape(x2, (1, C.InferredDimension), 0, 1)
    W = C.constant(alpha / (2 * n + 1), (1, 2 * n + 1, 1, 1), name='W')
    # 3D convolution with a filter that has a non 1-size only in the 3rd axis, and does not reduce since the reduction dimension is fake and 1
    y = C.convolution(W, x2s)
    # reshape back to remove the fake singleton reduction dimension
    b = C.reshape(y, C.InferredDimension, 0, 2)
    den = C.exp(beta * C.log(k + b))
    apply_x = C.element_divide(x, den)
    return apply_x
Ejemplo n.º 33
0
    def convolve(x):
        r = C.convolution(W,
                          x,
                          auto_padding=[False, pad, pad],
                          strides=[strides])
        r.name = name

        if bias:
            r = r + b
        if activation:
            # apply learnable param relu
            P = C.parameter(shape=r.shape, init=init_activation, name="prelu")
            r = C.param_relu(P, r)
        return r
Ejemplo n.º 34
0
def test_group_conv(groups, num_output_channels, num_input_channels, input_tensor_size, filter_size, kernel_channels, batch_size, device_id, precision):
    if device_id == -1 and len(input_tensor_size) > 2:
        pytest.skip('3D or higher dimensions not supported for group convolution on CPU.')
    if device_id == 0 and should_force_deterministic_algorithms():
        pytest.skip('Deterministic algorithms not supported on GPU for group convolution.')

    dt = PRECISION_TO_TYPE[precision]
    dev = cntk_device(device_id)

    # Generate result from CNTK API
    conv_size = tuple([num_output_channels, kernel_channels]+filter_size)
    total_size = np.prod(conv_size)
    y = np.arange(total_size, dtype=dt).reshape(conv_size)
    conv_map = C.parameter(init=y, device=dev)

    input_size = (num_input_channels, ) + tuple(input_tensor_size)
    x_test = C.input_variable(input_size, needs_gradient=True, dtype=dt)
    data = np.random.random((batch_size,) + input_size).astype(dt)

    conv_op = C.convolution(conv_map, x_test, auto_padding=[False] + [True]*len(filter_size), groups = groups)

    df_test, fv_test = conv_op.forward({x_test:data}, [conv_op.output], set([conv_op.output]), device=dev)
    output_test = list(fv_test.values())[0]
    grad_data = np.random.random(size=output_test.shape)
    grad_test = conv_op.backward(df_test, {conv_op.output: grad_data}, set([x_test]))
    output_grad_test = list(grad_test.values())[0]

    # Generate reference result. The code below simulates (actually is just another implementation in Python)
    # group convolution using multiple standard convolutions (i.e. groups = 1), to create the reference
    # output for testing the CNTK implementation against.     
    num_out_channels_per_group = int(num_output_channels / groups)
    num_in_channels_per_group = int(num_input_channels / groups)
    sub_kernels_init = [y[i * num_out_channels_per_group:(i+1) * num_out_channels_per_group, ...] for i in range(0, groups)]
    sub_kernels = [C.ops.parameter(init=np.ascontiguousarray(sub_kernels_init[i]), device=dev)
                          for i in range(0, groups)]                          

    x_ref = C.input_variable(input_size, needs_gradient=True, dtype=dt)                                             
    sub_data = [C.ops.slice(x_ref, axis=0, begin_index=i * num_in_channels_per_group,
                             end_index=(i + 1) * num_in_channels_per_group) for i in range(0, groups)]
    conv_ops_per_group = [C.ops.convolution(group_kernel, data_for_groups, auto_padding=[False] + [True]*len(filter_size)) 
                 for group_kernel, data_for_groups in zip(sub_kernels, sub_data)]
    group_conv = C.ops.splice(*conv_ops_per_group, axis=0)

    df_ref, fv_ref = group_conv.forward({x_ref:data}, [group_conv.output], set([group_conv.output]), device=dev)
    output_ref = list(fv_ref.values())[0]
    grad_ref = group_conv.backward(df_ref, {group_conv.output: grad_data}, set([x_ref]))
    output_grad_ref = list(grad_ref.values())[0]

    assert np.allclose(output_test, output_ref, atol=1e-4)
    assert np.allclose(output_grad_test, output_grad_ref, atol=1e-4)
Ejemplo n.º 35
0
def test_conv_with_freedim_model(tmpdir, dtype, device_id):
    if device_id == -1 and dtype == np.float16:
        pytest.skip('Test only runs on GPU')
    device = cntk_device(device_id)
    with C.default_options(dtype=dtype):
        img_shape = (3, 32, 32)
        img = np.asarray(np.random.uniform(-1, 1, img_shape), dtype=dtype)

        x = C.input_variable((3, C.FreeDimension, C.FreeDimension))

        conv_size1 = (32, 3, 5, 5)
        conv_map1 = C.constant(value=np.arange(np.prod(conv_size1), dtype=dtype).reshape(conv_size1))
        conv_op1 = C.convolution(conv_map1, x, auto_padding=(False, True, True))
        relu_op1 = C.relu(conv_op1)
        maxpool_op1 = C.pooling(relu_op1, C.MAX_POOLING, (2, 2), (2, 2))

        conv_size2 = (64, 32, 3, 3)
        conv_map2 = C.constant(value=np.arange(np.prod(conv_size2), dtype=dtype).reshape(conv_size2))
        conv_op2 = C.convolution(conv_map2, maxpool_op1, auto_padding=(False, True, True))
        relu_op2 = C.relu(conv_op2)
        root_node = C.pooling(relu_op2, C.MAX_POOLING, (2, 2), (2, 2))

        filename = os.path.join(str(tmpdir), R'conv_with_freedim.onnx')
        root_node.save(filename, format=C.ModelFormat.ONNX)

        loaded_node = C.Function.load(filename, format=C.ModelFormat.ONNX)
        assert root_node.shape == loaded_node.shape

        x_ = loaded_node.arguments[0]
        assert np.allclose(loaded_node.eval({x_:img}, device=device), root_node.eval({x:img}, device=device))

        # Additional test to ensure that loaded_node can be saved as both ONNX and CNTKv2 again.
        filename2 = os.path.join(str(tmpdir), R'conv_with_freedim2.onnx')
        loaded_node.save(filename2, format=C.ModelFormat.ONNX)

        filename3 = os.path.join(str(tmpdir), R'conv_with_freedim2.cntkmodel')
        loaded_node.save(filename3, format=C.ModelFormat.CNTKv2)
def test_conv_with_freedim_model(tmpdir):
    img_shape = (3, 32, 32)
    img = np.asarray(np.random.uniform(-1, 1, img_shape), dtype=np.float32)

    x = C.input_variable((3, C.FreeDimension, C.FreeDimension))

    conv_size1 = (32, 3, 5, 5)
    conv_map1 = C.constant(value=np.arange(
        np.prod(conv_size1), dtype=np.float32).reshape(conv_size1))
    conv_op1 = C.convolution(conv_map1, x, auto_padding=(False, True, True))
    relu_op1 = C.relu(conv_op1)
    maxpool_op1 = C.pooling(relu_op1, C.MAX_POOLING, (2, 2), (2, 2))

    conv_size2 = (64, 32, 3, 3)
    conv_map2 = C.constant(value=np.arange(
        np.prod(conv_size2), dtype=np.float32).reshape(conv_size2))
    conv_op2 = C.convolution(conv_map2,
                             maxpool_op1,
                             auto_padding=(False, True, True))
    relu_op2 = C.relu(conv_op2)
    root_node = C.pooling(relu_op2, C.MAX_POOLING, (2, 2), (2, 2))

    filename = os.path.join(str(tmpdir), R'conv_with_freedim.onnx')
    root_node.save(filename, format=C.ModelFormat.ONNX)

    loaded_node = C.Function.load(filename, format=C.ModelFormat.ONNX)
    assert root_node.shape == loaded_node.shape

    x_ = loaded_node.arguments[0]
    assert np.allclose(loaded_node.eval({x_: img}), root_node.eval({x: img}))

    # Additional test to ensure that loaded_node can be saved as both ONNX and CNTKv2 again.
    filename2 = os.path.join(str(tmpdir), R'conv_with_freedim2.onnx')
    loaded_node.save(filename2, format=C.ModelFormat.ONNX)

    filename3 = os.path.join(str(tmpdir), R'conv_with_freedim2.cntkmodel')
    loaded_node.save(filename3, format=C.ModelFormat.CNTKv2)
Ejemplo n.º 37
0
def test_clone_with_slice():
    i1 = C.input_variable((2, 2), name='i1')
    i2 = C.input_variable((2, 2), name='i2')
    x = C.splice(i1, i2, axis=0)
    W = C.constant(1, (4, 1), name='W')
    y = C.convolution(W, x)
    assert (y.shape == (4, 2))

    from ..functions import CloneMethod
    x1 = C.input_variable((2, 1), name='x1')
    x2 = C.input_variable((2, 1), name='x2')
    p1 = C.placeholder()
    p2 = C.placeholder()
    y_cloned = y.clone('clone', {i1: p1, i2: p2})
    y2 = y_cloned(x1, x2)
    assert (y2.shape == (4, 1))
Ejemplo n.º 38
0
def test_clone_with_slice():
    i1 = C.input_variable((2,2), name='i1')
    i2 = C.input_variable((2,2), name='i2')
    x = C.splice(i1, i2, axis=0)
    W = C.constant(1, (4,1), name='W')
    y = C.convolution(W, x)
    assert(y.shape == (4,2))

    from ..functions import CloneMethod
    x1 = C.input_variable((2,1), name='x1')
    x2 = C.input_variable((2,1), name='x2')
    p1 = C.placeholder()
    p2 = C.placeholder()
    y_cloned = y.clone('clone', {i1:p1, i2:p2})
    y2 = y_cloned(x1, x2)
    assert(y2.shape == (4,1))
Ejemplo n.º 39
0
def test_convolution(tmpdir, auto_padding):
    img_shape = (1, 5, 5)
    img = np.asarray(np.random.uniform(-1, 1, img_shape), dtype=np.float32)

    x = C.input_variable(img.shape)
    filter = np.reshape(np.array([2, -1, -1, 2], dtype = np.float32), (1, 1, 2, 2))
    kernel = C.constant(value = filter)
    root_node = C.convolution(kernel, x, auto_padding=auto_padding)

    filename = os.path.join(str(tmpdir), R'conv.onnx')
    root_node.save(filename, format=C.ModelFormat.ONNX)

    loaded_node = C.Function.load(filename, format=C.ModelFormat.ONNX)
    assert root_node.shape == loaded_node.shape

    x_ = loaded_node.arguments[0]
    assert np.allclose(loaded_node.eval({x_:[img]}), root_node.eval({x:[img]}))
Ejemplo n.º 40
0
def test_convolution(tmpdir, auto_padding):
    img_shape = (1, 5, 5)
    img = np.asarray(np.random.uniform(-1, 1, img_shape), dtype=np.float32)

    x = C.input_variable(img.shape)
    filter = np.reshape(np.array([2, -1, -1, 2], dtype = np.float32), (1, 1, 2, 2))
    kernel = C.constant(value = filter)
    root_node = C.convolution(kernel, x, auto_padding=auto_padding)

    filename = os.path.join(str(tmpdir), R'conv.onnx')
    root_node.save(filename, format=C.ModelFormat.ONNX)

    loaded_node = C.Function.load(filename, format=C.ModelFormat.ONNX)
    assert root_node.shape == loaded_node.shape

    x_ = loaded_node.arguments[0]
    assert np.allclose(loaded_node.eval({x_:[img]}), root_node.eval({x:[img]}))
def test_native_binary_function():
    # user functions need to be registered before being callable by python
    if not nopt.native_convolve_function_registered:
      pytest.skip("Could not find {0} library. "
        "Please check if HALIDE_PATH is configured properly "
        "and try building {1} again"
        .format('Cntk.BinaryConvolution-' + C.__version__.rstrip('+'),
        'Extnsibiliy\\BinaryConvolution'))

    # be sure to only run on CPU, binary convolution does not have GPU support for now
    dev = C.cpu()
    # create an arbitrary input mimicking a realistic cifar input
    x = input((64, 28, 28))
    # random filter weights for testing
    w = parameter((64, 64, 3, 3), init=np.reshape(2*(np.random.rand(64*64*3*3)-.5), (64, 64, 3, 3)), dtype=np.float32, device=dev)

    # set the convolution parameters by passing in an attribute dictionary
    #attributes = {'stride' : 1, 'padding' : False, 'size' : 3}

    attributes = {'stride' : 1,
                  'padding' : False,
                  'size' : 3,                       
                  'h' : 28,
                  'w' : 28,
                  'channels' : 64,
                  'filters' : 64 }

    # define the binary convolution op
    op = ops.native_user_function('NativeBinaryConvolveFunction', [w, x], attributes, 'native_binary_convolve')
    
    # also define an op using python custom functions that should have the same output
    op2 = C.convolution(CustomMultibitKernel(w, 1), CustomSign(x), auto_padding = [False])
    # create random input data
    x_data = NDArrayView.from_dense(np.asarray(np.reshape(2*(np.random.rand(64*28*28)-.5), (64, 28, 28)),dtype=np.float32), device=dev)
    # evaluate the CPP binary convolve
    result = op.eval({x : x_data}, device=dev)

    # evaluate the python emulator
    result2 = op2.eval({x : x_data}, device=dev)
    native_times_primitive = op.find_by_name('native_binary_convolve')
    # assert that both have the same result
    '''
Ejemplo n.º 42
0
def test_group_conv(groups, num_output_channels, num_input_channels, input_tensor_size, filter_size, batch_size, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]
    dev = cntk_device(device_id)

    # Generate result from CNTK API
    conv_size = tuple([num_output_channels, num_input_channels]+filter_size)
    total_size = np.prod(conv_size)
    y = np.arange(total_size, dtype=dt).reshape(conv_size)
    conv_map = C.constant(value=y, device=dev)

    input_size = (num_input_channels, ) + tuple(input_tensor_size)
    x_test = C.input_variable(input_size, dtype=dt)
    data = np.random.random((batch_size,) + input_size).astype(dt)

    conv_op = C.convolution(conv_map, x_test, auto_padding=[False] + [True]*len(filter_size), groups = groups)

    output_test = conv_op.eval({x_test:data}, device=dev)

    # Generate reference result. The code below simulates (actually is just another implementation in Python)
    # group convolution using multiple standard convolutions (i.e. groups = 1), to create the reference
    # output for testing the CNTK implementation against.     
    num_out_channels_per_group = int(num_output_channels / groups)
    num_in_channels_per_group = int(num_input_channels / groups)
    sub_kernels_init = [y[i * num_out_channels_per_group:(i+1) * num_out_channels_per_group, 
                                 i * num_in_channels_per_group:(i+1) * num_in_channels_per_group, ...] for i in range(0, groups)]
    sub_kernels = [C.ops.constant(value=np.ascontiguousarray(sub_kernels_init[i]), device=dev)
                          for i in range(0, groups)]

    x_ref = C.input_variable(input_size, dtype=dt)                                             
    sub_data = [C.ops.slice(x_ref, axis=0, begin_index=i * num_in_channels_per_group,
                             end_index=(i + 1) * num_in_channels_per_group) for i in range(0, groups)]
    conv_ops_per_group = [C.ops.convolution(group_kernel, data_for_groups, auto_padding=[False] + [True]*len(filter_size)) 
                 for group_kernel, data_for_groups in zip(sub_kernels, sub_data)]
    group_conv = C.ops.splice(*conv_ops_per_group, axis=0)

    output_ref = group_conv.eval({x_ref:data}, device = dev)

    assert np.allclose(output_test, output_ref, atol=1e-4)
Ejemplo n.º 43
0
def test_native_binary_function():
    # user functions need to be registered before being callable by python
    ops.register_native_user_function(
        'NativeBinaryConvolveFunction',
        'Cntk.BinaryConvolutionExample-' + C.__version__.rstrip('+'),
        'CreateBinaryConvolveFunction')
    # be sure to only run on CPU, binary convolution does not have GPU support for now
    dev = cpu()
    # create an arbitrary input mimicking a realistic cifar input
    x = input((64, 30, 30))
    # random filter weights for testing
    w = parameter((64, 64, 3, 3),
                  init=np.reshape(2 * (np.random.rand(64 * 64 * 3 * 3) - .5),
                                  (64, 64, 3, 3)),
                  dtype=np.float32,
                  device=dev)
    # set the convolution parameters by passing in an attribute dictionary
    attributes = {'stride': 1, 'padding': False, 'size': 3}
    # define the binary convolution op
    op = ops.native_user_function('NativeBinaryConvolveFunction', [w, x],
                                  attributes,
                                  'native_binary_convolve_function')
    # also define an op using python custom functions that should have the same output
    op2 = C.convolution(CustomMultibitKernel(w, 1),
                        CustomSign(x),
                        auto_padding=[False])
    # create random input data
    x_data = NDArrayView.from_dense(np.asarray(np.reshape(
        2 * (np.random.rand(64 * 30 * 30) - .5), (64, 30, 30)),
                                               dtype=np.float32),
                                    device=dev)
    # evaluate the CPP binary convolve
    result = op.eval({x: x_data}, device=dev)
    # evaluate the python emulator
    result2 = op2.eval({x: x_data}, device=dev)
    native_times_primitive = op.find_by_name('native_binary_convolve_function')
    # assert that both have the same result
    assert np.allclose(result, result2, atol=0.001)
Ejemplo n.º 44
0
def convolution(convolution_map, operand, strides=(1,), sharing=[True], 
                auto_padding=[True], lower_pad=(0,), upper_pad=(0,), transpose=False, 
                max_temp_mem_size_in_samples=0, name=''):
    '''
    TODO: 
    Args:        
        convolution_map:
        operand:
        strides:
        sharing:
        auto_padding:
        lower_pad:
        upper_pad:
        transpose:
        max_temp_mem_size_in_samples:
        name (str): the name of the node in the network
    Returns:
        :class:`cntk.Function`
    '''
    from cntk import convolution
    operand = sanitize_input(operand)    
    return convolution(convolution_map, operand, tuple(reversed(strides)), sharing, auto_padding, 
                        tuple(reversed(lower_pad)), tuple(reversed(upper_pad)), transpose, max_temp_mem_size_in_samples,
                        name).output()
Ejemplo n.º 45
0
def convolution(convolution_map, operand, strides=(1,), sharing=[True], 
                auto_padding=[True], lower_pad=(0,), upper_pad=(0,), transpose=False, 
                max_temp_mem_size_in_samples=0, name=''):
    '''
    TODO: 
    Args:        
        convolution_map:
        operand:
        strides:
        sharing:
        auto_padding:
        lower_pad:
        upper_pad:
        transpose:
        max_temp_mem_size_in_samples:
        name (str): the name of the node in the network
    Returns:
        :class:`cntk.Function`
    '''
    from cntk import convolution
    operand = sanitize_input(operand)    
    return convolution(convolution_map, operand, tuple(reversed(strides)), sharing, auto_padding, 
                        tuple(reversed(lower_pad)), tuple(reversed(upper_pad)), transpose, max_temp_mem_size_in_samples,
                        name).output()
Ejemplo n.º 46
0
def test_op_convolution_without_padding(convolution_map, convolution_input, use_input_shape_with_inferred_dimension, device_id, precision):
    dt = PRECISION_TO_TYPE[precision]
    dev = cntk_device(device_id)

    conv_map = AA(convolution_map, dtype=dt)
    conv_input = AA(convolution_input, dtype=dt)

    flipped_conv_map = conv_map[..., ::-1, ::-1]

    from scipy import signal
    expected_forward = AA([signal.convolve(flipped_conv_map, conv_input, mode='valid')])

    backward = AA(conv_map)

    conv_input_shape = conv_input.shape
    if use_input_shape_with_inferred_dimension:
        conv_input_shape = tuple(-1 for x in conv_input_shape)

    a = C.input_variable(shape=conv_input_shape,
                dtype=sanitize_dtype_cntk(precision),
                needs_gradient=True,
                name='a')

    conv_input.shape = (1,) + conv_input.shape # adding batch and channel axis
    conv_map.shape = (1,) + conv_map.shape

    constant_map = constant(value=conv_map, device=dev)

    from cntk import convolution
    input_op = convolution(constant_map, a, auto_padding=[False])

    forward_input = {a: conv_input}
    expected_backward = {a: backward}

    unittest_helper(input_op, forward_input, expected_forward,
                    expected_backward, device_id=device_id, precision=precision)
def test_native_binary_function():
    # user functions need to be registered before being callable by python
    ops.register_native_user_function('NativeBinaryConvolveFunction', 'Cntk.BinaryConvolutionExample-' + C.__version__.rstrip('+'), 'CreateBinaryConvolveFunction')
    # be sure to only run on CPU, binary convolution does not have GPU support for now
    dev = cpu()
    # create an arbitrary input mimicking a realistic cifar input
    x = input((64, 30, 30))
    # random filter weights for testing
    w = parameter((64, 64, 3, 3), init=np.reshape(2*(np.random.rand(64*64*3*3)-.5), (64, 64, 3, 3)), dtype=np.float32, device=dev)
    # set the convolution parameters by passing in an attribute dictionary
    attributes = {'stride' : 1, 'padding' : False, 'size' : 3}
    # define the binary convolution op
    op = ops.native_user_function('NativeBinaryConvolveFunction', [w, x], attributes, 'native_binary_convolve_function')
    # also define an op using python custom functions that should have the same output
    op2 = C.convolution(CustomMultibitKernel(w, 1), CustomSign(x), auto_padding = [False])
    # create random input data
    x_data = NDArrayView.from_dense(np.asarray(np.reshape(2*(np.random.rand(64*30*30)-.5), (64, 30, 30)),dtype=np.float32), device=dev)
    # evaluate the CPP binary convolve
    result = op.eval({x : x_data}, device=dev)
    # evaluate the python emulator
    result2 = op2.eval({x : x_data}, device=dev)
    native_times_primitive = op.find_by_name('native_binary_convolve_function')
    # assert that both have the same result
    assert np.allclose(result, result2, atol=0.001)