Example #1
0
def main():
    log.basicConfig(format='[ %(levelname)s ] %(message)s',
                    level=log.INFO, stream=sys.stdout)
    args = build_argparser().parse_args()
    try:
        model_wrapper = tensorflow_io_model_wrapper(args)
        data_transformer = tensorflow_transformer(create_dict_for_transformer(args))
        io = io_adapter.get_io_adapter(args, model_wrapper, data_transformer)
        log.info('Loading network files:\n\t {0}'.format(args.model_path))
        graph = load_network(tf, args.model_path, args.output_names)
        input_shapes = get_input_shape(model_wrapper, graph)
        for layer in input_shapes:
            log.info('Shape for input layer {0}: {1}'.format(layer, input_shapes[layer]))
        log.info('Prepare input data')
        io.prepare_input(graph, args.input)
        log.info('Starting inference ({} iterations)'.format(args.number_iter))

        inputs_names = model_wrapper.get_input_layer_names(graph)
        outputs_names = model_wrapper.get_outputs_layer_names(graph, args.output_names)
        result, inference_time = inference_tensorflow(graph, inputs_names, outputs_names,
                                                      args.number_iter, io.get_slice_input)

        time, latency, fps = process_result(args.batch_size, inference_time)
        if not args.raw_output:
            if args.number_iter == 1:
                result = prepare_output(result, outputs_names, args.task)
            io.process_output(result, log)
            result_output(time, fps, latency, log)
        else:
            raw_result_output(time, fps, latency)
    except Exception as ex:
        print('ERROR! : {0}'.format(str(ex)))
        sys.exit(1)
def main():
    log.basicConfig(format = '[ %(levelname)s ] %(message)s',
        level = log.INFO, stream = sys.stdout)
    args = build_argparser().parse_args()
    try:
        model_wrapper = intelcaffe_io_model_wrapper()
        data_transformer = intelcaffe_transformer(create_dict_for_transformer(args))
        io = io_adapter.get_io_adapter(args, model_wrapper, data_transformer)
        log.info('The assign of the device to infer')
        set_device_to_infer(args.device)
        log.info('The device has been assigned: {0}'.format(args.device))
        log.info('Loading network files:\n\t {0}\n\t {1}'.format(
            args.model_prototxt, args.model_caffemodel))
        net = load_network(args.model_prototxt, args.model_caffemodel)
        net = network_input_reshape(net, args.batch_size)
        input_shapes = utils.get_input_shape(model_wrapper, net)
        for layer in input_shapes:
            log.info('Shape for input layer {0}: {1}'.format(layer, input_shapes[layer]))
        log.info('Prepare input data')
        io.prepare_input(net, args.input)
        log.info('Starting inference ({} iterations)'.
            format(args.number_iter))
        result, inference_time = inference_caffe(net, args.number_iter, io.get_slice_input)       
        time, latency, fps = process_result(args.batch_size, inference_time)
        if not args.raw_output:
            io.process_output(result, log)   
            result_output(time, fps, latency, log)
        else:
            raw_result_output(time, fps, latency)  
    except Exception as ex:
        print('ERROR! : {0}'.format(str(ex)))
        sys.exit(1)
def main():
    log.basicConfig(format = '[ %(levelname)s ] %(message)s',
        level = log.INFO, stream = sys.stdout)
    args = build_argparser().parse_args()
    try:
        model_wrapper = openvino_io_model_wrapper()
        data_transformer = transformer()
        io = io_adapter.get_io_adapter(args, model_wrapper, data_transformer)
        iecore = utils.create_ie_core(args.extension, args.cldnn_config, args.device,
            args.nthreads, None, args.dump, 'sync', log)
        net = utils.create_network(iecore, args.model_xml, args.model_bin, log)
        input_shapes = utils.get_input_shape(model_wrapper, net)
        for layer in input_shapes:
            log.info('Shape for input layer {0}: {1}'.format(layer, input_shapes[layer]))
        utils.reshape_input(net, args.batch_size)
        log.info('Prepare input data')
        io.prepare_input(net, args.input)
        log.info('Create executable network')
        exec_net = utils.load_network(iecore, net, args.device, args.priority, 1)
        log.info('Starting inference ({} iterations) on {}'.
            format(args.number_iter, args.device))
        result, time = infer_sync(exec_net, args.number_iter, io.get_slice_input)
        average_time, latency, fps = process_result(time, args.batch_size, args.mininfer)
        if not args.raw_output:
            io.process_output(result, log)
            result_output(average_time, fps, latency, log)
        else:
            raw_result_output(average_time, fps, latency)
        del net
        del exec_net
        del iecore
    except Exception as ex:
        print('ERROR! : {0}'.format(str(ex)))
        sys.exit(1)
Example #4
0
def main():
    log.basicConfig(
        format='[ %(levelname)s ] %(message)s',
        level=log.INFO,
        stream=sys.stdout
    )
    args = build_parser().parse_args()
    try:
        model_wrapper = openvino_io_model_wrapper()
        data_transformer = openvino_transformer()
        io = io_adapter.get_io_adapter(args, model_wrapper, data_transformer)
        core = utils.create_core(
            args.extension,
            args.intel_gpu_config,
            args.device,
            args.nthreads,
            args.nstreams,
            args.dump,
            'async',
            log
        )
        model = utils.create_model(core, args.model_xml, args.model_bin, log)
        utils.configure_model(core, model, args.device, args.default_device, args.affinity)
        input_shapes = utils.get_input_shape(model_wrapper, model)
        for layer in input_shapes:
            log.info('Shape for input layer {0}: {1}'.format(layer, input_shapes[layer]))
        utils.reshape_input(model, args.batch_size)
        log.info('Prepare input data')
        io.prepare_input(model, args.input)
        log.info('Create executable network')
        compiled_model = utils.compile_model(core, model, args.device, args.priority)
        log.info('Starting inference ({} iterations) with {} requests on {}'.format(args.number_iter,
                                                                                    args.requests,
                                                                                    args.device))
        result, time = infer_async(compiled_model, args.number_iter, args.requests, io.get_slice_input)
        average_time, fps = process_result(time, args.batch_size, args.number_iter)
        if not args.raw_output:
            io.process_output(result, log)
            result_output(average_time, fps, log)
        else:
            raw_result_output(average_time, fps)
        del model
        del compiled_model
        del core
    except Exception as ex:
        print('ERROR! : {0}'.format(str(ex)))
        sys.exit(1)