示例#1
0
def c2_native_run_net(init_net, predict_net, inputs):
    ws = Workspace()
    if init_net:
        ws.RunNetOnce(init_net)

    if isinstance(inputs, dict):
        for key, value in inputs.items():
            ws.FeedBlob(key, value, predict_net.device_option)
    else:
        uninitialized = [
            input_name for input_name in predict_net.external_input
            if not ws.HasBlob(input_name)
        ]
        if len(uninitialized) == len(inputs):
            for key, value in zip(uninitialized, inputs):
                ws.FeedBlob(key, value, predict_net.device_option)
        else:
            # If everything is initialized,
            # we just initialized the first len(inputs) external_input.
            assert (len(inputs) <= len(predict_net.external_input))
            for i in range(len(inputs)):
                ws.FeedBlob(predict_net.external_input[i], inputs[i],
                            predict_net.device_option)

    ws.RunNetOnce(predict_net)

    output_names = predict_net.external_output
    output_values = [ws.FetchBlob(name) for name in output_names]
    return ws, namedtupledict('Outputs', output_names)(*output_values)
示例#2
0
    def run_node(cls,
                 node,
                 inputs,
                 device='CPU',
                 opset_version=_known_opset_version):
        super(Caffe2Backend, cls).run_node(node, inputs, device)

        device_option = get_device_option(Device(device))
        with Workspace(), core.DeviceScope(device_option):  # temporary!
            if isinstance(inputs, dict):
                for key, value in inputs.items():
                    workspace.FeedBlob(key, value)
            else:
                assert len(node.input) == len(
                    inputs), "{}: expected {} but got {}".format(
                        node.op_type, len(node.input), len(inputs))
                for key, value in zip(node.input, inputs):
                    workspace.FeedBlob(key, value)

            cls._inplace_rewrite([node])
            init_ops, ops, _ = cls._onnx_node_to_caffe2_op(
                None, None, node, opset_version or cls._known_opset_version)
            ops = init_ops + ops
            for op in ops:
                op.device_option.CopyFrom(device_option)
            workspace.RunOperatorsOnce(ops)
            output_values = [workspace.FetchBlob(name) for name in node.output]
            return namedtupledict('Outputs', node.output)(*output_values)
示例#3
0
    def prepare(cls, predict_model, device='CPU',
                init_model=None, **kwargs):
        '''
        For Onnx Caffe2Backend, we require that init_graph don't initialize the actual input of the predict_graph,

        for example, if "img" is the input blob for the predict_net, we require that in init_graph and in
        initializer of the predict_graph, "img" is not initalized. We don't have a check for this, since
        there is no way we can know which blob is the input of the predict_graph.
        '''
        super(Caffe2Backend, cls).prepare(predict_model, device, **kwargs)

        if init_model:
            checker.check_model(init_model)

        init_net, predict_net = cls.onnx_graph_to_caffe2_net(predict_model.graph)
        predict_net.device_option.CopyFrom(get_device_option(Device(device)))

        ws = Workspace()
        with ws, core.DeviceScope(predict_net.device_option):
            if init_model:
               _, init_net_from_model = cls.onnx_graph_to_caffe2_net(init_model.graph)
               init_net.op.extend(init_net_from_model.op)
            workspace.RunNetOnce(init_net)
            uninitialized = [x
                             for x in predict_net.external_input
                             if not workspace.HasBlob(x)]

        return Caffe2Rep(predict_net, ws, uninitialized)
示例#4
0
def benchmark_caffe2_model(init_net,
                           predict_net,
                           warmup_iters=3,
                           main_iters=10,
                           layer_details=True):
    '''
        Run the benchmark net on the target model.
        Return the execution time per iteration (millisecond).
    '''
    ws = Workspace()
    if init_net:
        ws.RunNetOnce(init_net)
    ws.CreateNet(predict_net)
    results = ws.BenchmarkNet(predict_net.name, warmup_iters, main_iters,
                              layer_details)
    del ws
    return results[0]
示例#5
0
    def prepare(cls, model, device='CPU', **kwargs):
        '''
        For Onnx Caffe2Backend, we require that init_graph don't initialize the actual input of the predict_graph,

        for example, if "img" is the input blob for the predict_net, we require that in init_graph and in
        initializer of the predict_graph, "img" is not initalized. We don't have a check for this, since
        there is no way we can know which blob is the input of the predict_graph.
        '''
        super(Caffe2Backend, cls).prepare(model, device, **kwargs)

        opset_version = None
        for imp in model.opset_import:
            if not imp.HasField("domain") or imp.domain == "":
                opset_version = imp.version
                if imp.version > cls._known_opset_version:
                    warnings.warn(
                        "This version of onnx-caffe2 targets ONNX operator set version {}, but the model we are trying to import uses version {}.  We will try to import it anyway, but if the model uses operators which had BC-breaking changes in the intervening versions, import will fail."
                        .format(cls._known_opset_version, imp.version))
            else:
                warnings.warn("Unrecognized operator set {}".format(
                    imp.domain))
        if opset_version is None:
            if model.ir_version >= 0x00000003:
                raise RuntimeError(
                    "Model with IR version >= 3 did not specify ONNX operator set version (onnx-caffe2 requires it)"
                )
            else:
                opset_version = 1

        ws = Workspace()
        device_option = get_device_option(Device(device))

        # Directly load initializer data into blobs in workspace
        cls._direct_initialize_parameters(
            model.graph.initializer,
            ws,
            device_option,
        )

        initialized = {init.name for init in model.graph.initializer}

        cls._direct_initialize_inputs(
            model.graph.input,
            initialized,
            ws,
            device_option,
        )

        uninitialized = [
            value_info.name for value_info in model.graph.input
            if value_info.name not in initialized
        ]

        init_net, predict_net = cls._onnx_model_to_caffe2_net(
            model, device, opset_version, False)

        retval = Caffe2Rep(init_net, predict_net, ws, uninitialized)
        return retval
示例#6
0
    def prepare(cls, model, device='CPU', **kwargs):
        '''
        For Onnx Caffe2Backend, we require that init_graph don't initialize the actual input of the predict_graph,

        for example, if "img" is the input blob for the predict_net, we require that in init_graph and in
        initializer of the predict_graph, "img" is not initalized. We don't have a check for this, since
        there is no way we can know which blob is the input of the predict_graph.
        '''
        super(Caffe2Backend, cls).prepare(model, device, **kwargs)

        ws = Workspace()
        device_option = get_device_option(Device(device))

        # Directly load initializer data into blobs in workspace
        cls._direct_initialize_parameters(
            model.graph.initializer,
            ws,
            device_option,
        )
        # Need to pull this out before we delete model.graph.initializer
        initialized = {init.name for init in model.graph.initializer}
        initializer = model.graph.initializer[:]
        # Delete the initializers so they aren't serialized
        del model.graph.initializer[:]

        cls._direct_initialize_inputs(
            model.graph.input,
            initialized,
            ws,
            device_option,
        )
        # Pull this out to manually add external inputs
        external_inputs = model.graph.input[:]
        del model.graph.input[:]

        _, predict_net = cls.onnx_graph_to_caffe2_net(model.graph)
        predict_net.device_option.CopyFrom(device_option)
        predict_net.external_input.extend(value_info.name
                                          for value_info in external_inputs)
        predict_net.device_option.CopyFrom(get_device_option(Device(device)))

        # Restore these so as not to mutate input
        model.graph.initializer.extend(initializer)
        model.graph.input.extend(external_inputs)

        uninitialized = [
            x for x in predict_net.external_input if x not in initialized
        ]

        retval = Caffe2Rep(predict_net, ws, uninitialized)
        return retval
示例#7
0
def c2_native_run_op(op_def, inputs):
    with Workspace():
        if isinstance(inputs, dict):
            for key, value in inputs.items():
                workspace.FeedBlob(key, value)
        else:
            assert (len(op_def.input) == len(inputs))
            for key, value in zip(op_def.input, inputs):
                workspace.FeedBlob(key, value)

        workspace.RunOperatorOnce(op_def)

        output_names = op_def.output
        output_values = [workspace.FetchBlob(name) for name in output_names]
        return namedtupledict('Outputs', output_names)(*output_values)
示例#8
0
    def run_node(cls, node, inputs, device='CPU'):
        super(Caffe2Backend, cls).run_node(node, inputs, device)

        device_option = get_device_option(Device(device))
        with Workspace(), core.DeviceScope(device_option):  # temporary!
            if isinstance(inputs, dict):
                for key, value in inputs.items():
                    workspace.FeedBlob(key, value)
            else:
                assert(len(node.input) == len(inputs))
                for key, value in zip(node.input, inputs):
                    workspace.FeedBlob(key, value)

            cls._inplace_rewrite([node])
            ops = cls._onnx_node_to_caffe2_op(node)
            for op in ops:
                workspace.RunOperatorOnce(op)
            output_values = [workspace.FetchBlob(name) for name in node.output]
            return namedtupledict('Outputs', node.output)(*output_values)
示例#9
0
def c2_native_run_net(init_net, predict_net, inputs):
    ws = Workspace()
    if init_net:
        ws.RunNetOnce(init_net)

    if isinstance(inputs, dict):
        for key, value in inputs.items():
            ws.FeedBlob(key, value)
    else:
        uninitialized = [
            input_name for input_name in predict_net.external_input
            if not ws.HasBlob(input_name)
        ]
        assert len(uninitialized) == len(inputs)
        for key, value in zip(uninitialized, inputs):
            ws.FeedBlob(key, value)

    ws.RunNetOnce(predict_net)

    output_names = predict_net.external_output
    output_values = [ws.FetchBlob(name) for name in output_names]
    return ws, namedtupledict('Outputs', output_names)(*output_values)
示例#10
0
def c2_native_run_op(op_def, inputs):
    ws = Workspace()
    if isinstance(inputs, dict):
        for key, value in inputs.items():
            ws.FeedBlob(key, value, op_def.device_option)
    else:
        assert (len(op_def.input) == len(inputs))
        for key, value in zip(op_def.input, inputs):
            ws.FeedBlob(key, value, op_def.device_option)

    ws.RunOperatorOnce(op_def)

    output_names = op_def.output
    output_values = [ws.FetchBlob(name) for name in output_names]
    return ws, namedtupledict('Outputs', output_names)(*output_values)
示例#11
0
    def prepare(cls, model, device='CPU', **kwargs):
        '''
        For Onnx Caffe2Backend, we require that init_graph don't initialize the actual input of the predict_graph,

        for example, if "img" is the input blob for the predict_net, we require that in init_graph and in
        initializer of the predict_graph, "img" is not initalized. We don't have a check for this, since
        there is no way we can know which blob is the input of the predict_graph.
        '''
        super(Caffe2Backend, cls).prepare(model, device, **kwargs)

        init_net, predict_net = cls.onnx_graph_to_caffe2_net(model.graph)
        predict_net.device_option.CopyFrom(get_device_option(Device(device)))

        initialized = {init.name for init in model.graph.initializer}
        uninitialized = [x for x in predict_net.external_input
                         if x not in initialized]

        ws = Workspace()
        with ws, core.DeviceScope(predict_net.device_option):
            workspace.RunNetOnce(init_net)

        return Caffe2Rep(predict_net, ws, uninitialized)