示例#1
0
def create_proposal_layer(rpn_cls_prob_reshape, rpn_bbox_pred, im_info, cfg, use_native_proposal_layer=False):
    layer_config = {}
    layer_config["feat_stride"] = cfg["MODEL"].FEATURE_STRIDE
    layer_config["scales"] = cfg["DATA"].PROPOSAL_LAYER_SCALES

    layer_config["train_pre_nms_topN"] = cfg["TRAIN"].RPN_PRE_NMS_TOP_N
    layer_config["train_post_nms_topN"] = cfg["TRAIN"].RPN_POST_NMS_TOP_N
    layer_config["train_nms_thresh"] = float(cfg["TRAIN"].RPN_NMS_THRESH)
    layer_config["train_min_size"] = float(cfg["TRAIN"].RPN_MIN_SIZE)

    layer_config["test_pre_nms_topN"] = cfg["TEST"].RPN_PRE_NMS_TOP_N
    layer_config["test_post_nms_topN"] = cfg["TEST"].RPN_POST_NMS_TOP_N
    layer_config["test_nms_thresh"] = float(cfg["TEST"].RPN_NMS_THRESH)
    layer_config["test_min_size"] = float(cfg["TEST"].RPN_MIN_SIZE)

    if use_native_proposal_layer:
        cntk.ops.register_native_user_function('ProposalLayerOp',
                                               'Cntk.ProposalLayerLib-' + cntk.__version__.rstrip('+'),
                                               'CreateProposalLayer')
        rpn_rois_raw = ops.native_user_function('ProposalLayerOp', [rpn_cls_prob_reshape, rpn_bbox_pred, im_info],
                                                layer_config, 'native_proposal_layer')
    else:
        rpn_rois_raw = user_function(ProposalLayer(rpn_cls_prob_reshape, rpn_bbox_pred, im_info, layer_config))

    return alias(rpn_rois_raw, name='rpn_rois')
示例#2
0
def create_proposal_layer(rpn_cls_prob_reshape,
                          rpn_bbox_pred,
                          im_info,
                          cfg,
                          use_native_proposal_layer=False):
    layer_config = {}
    layer_config["feat_stride"] = cfg["MODEL"].FEATURE_STRIDE
    layer_config["scales"] = cfg["DATA"].PROPOSAL_LAYER_SCALES

    layer_config["train_pre_nms_topN"] = cfg["TRAIN"].RPN_PRE_NMS_TOP_N
    layer_config["train_post_nms_topN"] = cfg["TRAIN"].RPN_POST_NMS_TOP_N
    layer_config["train_nms_thresh"] = float(cfg["TRAIN"].RPN_NMS_THRESH)
    layer_config["train_min_size"] = float(cfg["TRAIN"].RPN_MIN_SIZE)

    layer_config["test_pre_nms_topN"] = cfg["TEST"].RPN_PRE_NMS_TOP_N
    layer_config["test_post_nms_topN"] = cfg["TEST"].RPN_POST_NMS_TOP_N
    layer_config["test_nms_thresh"] = float(cfg["TEST"].RPN_NMS_THRESH)
    layer_config["test_min_size"] = float(cfg["TEST"].RPN_MIN_SIZE)

    if use_native_proposal_layer:
        cntk.ops.register_native_user_function(
            'ProposalLayerOp',
            'Cntk.ProposalLayerLib-' + cntk.__version__.rstrip('+'),
            'CreateProposalLayer')
        rpn_rois_raw = ops.native_user_function(
            'ProposalLayerOp', [rpn_cls_prob_reshape, rpn_bbox_pred, im_info],
            layer_config, 'native_proposal_layer')
    else:
        rpn_rois_raw = user_function(
            ProposalLayer(rpn_cls_prob_reshape, rpn_bbox_pred, im_info,
                          layer_config))

    return alias(rpn_rois_raw, name='rpn_rois')
示例#3
0
def test_native_binary_function():
    # user functions need to be registered before being callable by python
    if not nopt.native_convolve_function_registered:
        pytest.skip("Could not find {0} library. "
                    "Please check if HALIDE_PATH is configured properly "
                    "and try building {1} again".format(
                        'Cntk.BinaryConvolution-' + C.__version__.rstrip('+'),
                        'Extnsibiliy\BinaryConvolution'))

    # be sure to only run on CPU, binary convolution does not have GPU support for now
    dev = C.cpu()
    # create an arbitrary input mimicking a realistic cifar input
    x = input((64, 28, 28))
    # random filter weights for testing
    w = parameter((64, 64, 3, 3),
                  init=np.reshape(2 * (np.random.rand(64 * 64 * 3 * 3) - .5),
                                  (64, 64, 3, 3)),
                  dtype=np.float32,
                  device=dev)

    # set the convolution parameters by passing in an attribute dictionary
    #attributes = {'stride' : 1, 'padding' : False, 'size' : 3}

    attributes = {
        'stride': 1,
        'padding': False,
        'size': 3,
        'h': 28,
        'w': 28,
        'channels': 64,
        'filters': 64
    }

    # define the binary convolution op
    op = ops.native_user_function('NativeBinaryConvolveFunction', [w, x],
                                  attributes, 'native_binary_convolve')

    # also define an op using python custom functions that should have the same output
    op2 = C.convolution(CustomMultibitKernel(w, 1),
                        CustomSign(x),
                        auto_padding=[False])
    # create random input data
    x_data = NDArrayView.from_dense(np.asarray(np.reshape(
        2 * (np.random.rand(64 * 28 * 28) - .5), (64, 28, 28)),
                                               dtype=np.float32),
                                    device=dev)
    # evaluate the CPP binary convolve
    result = op.eval({x: x_data}, device=dev)

    # evaluate the python emulator
    result2 = op2.eval({x: x_data}, device=dev)
    native_times_primitive = op.find_by_name('native_binary_convolve')
    # assert that both have the same result
    '''
示例#4
0
    def converter(x):
        layer_config = copy.deepcopy(x.attributes)
        layer_config["test_pre_nms_topN"] = cfg["TEST"].RPN_PRE_NMS_TOP_N
        layer_config["test_post_nms_topN"] = cfg["TEST"].RPN_POST_NMS_TOP_N
        layer_config["test_nms_thresh"] = float(cfg["TEST"].RPN_NMS_THRESH)
        layer_config["test_min_size"] = float(cfg["TEST"].RPN_MIN_SIZE)

        layer_config["train_pre_nms_topN"] = cfg["TRAIN"].RPN_PRE_NMS_TOP_N
        layer_config["train_post_nms_topN"] = cfg["TRAIN"].RPN_POST_NMS_TOP_N
        layer_config["train_nms_thresh"] = float(cfg["TRAIN"].RPN_NMS_THRESH)
        layer_config["train_min_size"] = float(cfg["TRAIN"].RPN_MIN_SIZE)

        return ops.native_user_function('ProposalLayerOp', list(x.inputs),
                                        layer_config, 'native_proposal_layer')
def test_native_binary_function():
    # user functions need to be registered before being callable by python
    if not nopt.native_convolve_function_registered:
      pytest.skip("Could not find {0} library. "
        "Please check if HALIDE_PATH is configured properly "
        "and try building {1} again"
        .format('Cntk.BinaryConvolution-' + C.__version__.rstrip('+'),
        'Extnsibiliy\\BinaryConvolution'))

    # be sure to only run on CPU, binary convolution does not have GPU support for now
    dev = C.cpu()
    # create an arbitrary input mimicking a realistic cifar input
    x = input((64, 28, 28))
    # random filter weights for testing
    w = parameter((64, 64, 3, 3), init=np.reshape(2*(np.random.rand(64*64*3*3)-.5), (64, 64, 3, 3)), dtype=np.float32, device=dev)

    # set the convolution parameters by passing in an attribute dictionary
    #attributes = {'stride' : 1, 'padding' : False, 'size' : 3}

    attributes = {'stride' : 1,
                  'padding' : False,
                  'size' : 3,                       
                  'h' : 28,
                  'w' : 28,
                  'channels' : 64,
                  'filters' : 64 }

    # define the binary convolution op
    op = ops.native_user_function('NativeBinaryConvolveFunction', [w, x], attributes, 'native_binary_convolve')
    
    # also define an op using python custom functions that should have the same output
    op2 = C.convolution(CustomMultibitKernel(w, 1), CustomSign(x), auto_padding = [False])
    # create random input data
    x_data = NDArrayView.from_dense(np.asarray(np.reshape(2*(np.random.rand(64*28*28)-.5), (64, 28, 28)),dtype=np.float32), device=dev)
    # evaluate the CPP binary convolve
    result = op.eval({x : x_data}, device=dev)

    # evaluate the python emulator
    result2 = op2.eval({x : x_data}, device=dev)
    native_times_primitive = op.find_by_name('native_binary_convolve')
    # assert that both have the same result
    '''
示例#6
0
def test_native_binary_function():
    # user functions need to be registered before being callable by python
    ops.register_native_user_function(
        'NativeBinaryConvolveFunction',
        'Cntk.BinaryConvolutionExample-' + C.__version__.rstrip('+'),
        'CreateBinaryConvolveFunction')
    # be sure to only run on CPU, binary convolution does not have GPU support for now
    dev = cpu()
    # create an arbitrary input mimicking a realistic cifar input
    x = input((64, 30, 30))
    # random filter weights for testing
    w = parameter((64, 64, 3, 3),
                  init=np.reshape(2 * (np.random.rand(64 * 64 * 3 * 3) - .5),
                                  (64, 64, 3, 3)),
                  dtype=np.float32,
                  device=dev)
    # set the convolution parameters by passing in an attribute dictionary
    attributes = {'stride': 1, 'padding': False, 'size': 3}
    # define the binary convolution op
    op = ops.native_user_function('NativeBinaryConvolveFunction', [w, x],
                                  attributes,
                                  'native_binary_convolve_function')
    # also define an op using python custom functions that should have the same output
    op2 = C.convolution(CustomMultibitKernel(w, 1),
                        CustomSign(x),
                        auto_padding=[False])
    # create random input data
    x_data = NDArrayView.from_dense(np.asarray(np.reshape(
        2 * (np.random.rand(64 * 30 * 30) - .5), (64, 30, 30)),
                                               dtype=np.float32),
                                    device=dev)
    # evaluate the CPP binary convolve
    result = op.eval({x: x_data}, device=dev)
    # evaluate the python emulator
    result2 = op2.eval({x: x_data}, device=dev)
    native_times_primitive = op.find_by_name('native_binary_convolve_function')
    # assert that both have the same result
    assert np.allclose(result, result2, atol=0.001)
def test_native_binary_function():
    # user functions need to be registered before being callable by python
    ops.register_native_user_function('NativeBinaryConvolveFunction', 'Cntk.BinaryConvolutionExample-' + C.__version__.rstrip('+'), 'CreateBinaryConvolveFunction')
    # be sure to only run on CPU, binary convolution does not have GPU support for now
    dev = cpu()
    # create an arbitrary input mimicking a realistic cifar input
    x = input((64, 30, 30))
    # random filter weights for testing
    w = parameter((64, 64, 3, 3), init=np.reshape(2*(np.random.rand(64*64*3*3)-.5), (64, 64, 3, 3)), dtype=np.float32, device=dev)
    # set the convolution parameters by passing in an attribute dictionary
    attributes = {'stride' : 1, 'padding' : False, 'size' : 3}
    # define the binary convolution op
    op = ops.native_user_function('NativeBinaryConvolveFunction', [w, x], attributes, 'native_binary_convolve_function')
    # also define an op using python custom functions that should have the same output
    op2 = C.convolution(CustomMultibitKernel(w, 1), CustomSign(x), auto_padding = [False])
    # create random input data
    x_data = NDArrayView.from_dense(np.asarray(np.reshape(2*(np.random.rand(64*30*30)-.5), (64, 30, 30)),dtype=np.float32), device=dev)
    # evaluate the CPP binary convolve
    result = op.eval({x : x_data}, device=dev)
    # evaluate the python emulator
    result2 = op2.eval({x : x_data}, device=dev)
    native_times_primitive = op.find_by_name('native_binary_convolve_function')
    # assert that both have the same result
    assert np.allclose(result, result2, atol=0.001)
 def converter(x):
     layer_config = copy.deepcopy(x.attributes)
     return ops.native_user_function('ProposalLayerOp', list(x.inputs),
                                     layer_config, 'native_proposal_layer')
示例#9
0
 def converter(x):
     layer_config = copy.deepcopy(x.attributes)
     return ops.native_user_function('ProposalLayerOp', list(x.inputs), layer_config, 'native_proposal_layer')