Example #1
0
    def __init__(self):
        """ Initializes the class. """

        self.Helpers = Helpers("OpenVINO", False)

        os.environ["KMP_BLOCKTIME"] = "1"
        os.environ["KMP_SETTINGS"] = "1"
        os.environ["KMP_AFFINITY"] = "granularity=fine,verbose,compact,1,0"
        os.environ["OMP_NUM_THREADS"] = str(
            self.Helpers.confs["cnn"]["system"]["cores"])

        self.testing_dir = self.Helpers.confs["cnn"]["data"]["test"]
        self.valid = self.Helpers.confs["cnn"]["data"]["valid_types"]

        mxml = self.Helpers.confs["cnn"]["model"]["ir"]
        mbin = os.path.splitext(mxml)[0] + ".bin"

        ie = IECore()
        self.net = ie.read_network(model=mxml, weights=mbin)
        self.input_blob = next(iter(self.net.inputs))
        self.net = ie.load_network(
            network=self.net,
            device_name=self.Helpers.confs["cnn"]["model"]["device"])

        self.Helpers.logger.info("Class initialization complete.")
    def do_model(self):
        """ Loads the model """

        mxml = self.Helpers.confs["cnn"]["model"]["ir"]
        mbin = os.path.splitext(mxml)[0] + ".bin"

        ie = IECore()
        self.net = ie.read_network(model=mxml, weights=mbin)
        self.input_blob = next(iter(self.net.inputs))
        self.net = ie.load_network(
            network=self.net,
            device_name=self.Helpers.confs["cnn"]["model"]["device"])
def apply_offline_transformations(input_model: str, framework: str,
                                  transforms: list):
    # This variable is only needed by GenerateMappingFile transformation
    # to produce correct mapping
    extract_names = framework in ['tf', 'mxnet', 'kaldi']

    from openvino.inference_engine import read_network  # pylint: disable=import-error,no-name-in-module
    from openvino.offline_transformations import GenerateMappingFile  # pylint: disable=import-error,no-name-in-module

    net = read_network(input_model + "_tmp.xml", input_model + "_tmp.bin")
    apply_moc_transformations(net, transforms)
    net.serialize(input_model + ".xml", input_model + ".bin")
    path_to_mapping = input_model + ".mapping"
    GenerateMappingFile(net, path_to_mapping.encode('utf-8'), extract_names)
Example #4
0
def apply_offline_transformations(input_model: str, framework: str, transforms: list):
    # This variable is only needed by GenerateMappingFile transformation
    # to produce correct mapping
    extract_names = framework in ['tf', 'mxnet', 'kaldi']

    from openvino.inference_engine import read_network  # pylint: disable=import-error
    from openvino.offline_transformations import ApplyMOCTransformations, GenerateMappingFile  # pylint: disable=import-error

    net = read_network(input_model + "_tmp.xml", input_model + "_tmp.bin")

    available_transformations = get_available_transformations()

    for name, args in transforms:
        if name not in available_transformations.keys():
            raise Error("Transformation {} is not available.".format(name))

        available_transformations[name](net, **args)

    net.serialize(input_model + ".xml", input_model + ".bin")
    path_to_mapping = input_model + ".mapping"
    GenerateMappingFile(net, path_to_mapping.encode('utf-8'), extract_names)
Example #5
0
def main():
    # Run inference
    job_id = os.getenv("PBS_JOBID")

    # Plugin initialization for specified device and load extensions library if specified.
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Set up logging to a file
    logpath = os.path.join(os.path.dirname(__file__), ".log")
    log.basicConfig(
        level=log.INFO,
        format="%(asctime)s %(name)-12s %(levelname)-8s %(message)s",
        filename=logpath,
        filemode="w")
    try:
        job_id = os.environ['PBS_JOBID']
        infer_file = os.path.join(args.out_dir,
                                  'i_progress_' + str(job_id) + '.txt')
    except Exception as e:
        log.warning(e)
        log.warning("job_id: {}".format(job_id))

    # Setup additional logging to console
    console = log.StreamHandler()
    console.setLevel(log.INFO)
    formatter = log.Formatter("[ %(levelname)s ] %(message)s")
    console.setFormatter(formatter)
    log.getLogger("").addHandler(console)

    # Plugin initialization for specified device and load extensions library if specified
    log.info("Initializing plugin for {} device...".format(args.device))
    ie = IECore()

    if args.cpu_extension and 'CPU' in args.device:
        log.info("Loading plugins for {} device...".format(args.device))
        ie.add_extension(args.cpu_extension, "CPU")

    # Load network from IR files
    log.info("Reading IR...")
    net = ie.read_network(model=model_xml, weights=model_bin)

    if "CPU" in args.device:
        supported_layers = ie.query_network(net, "CPU")
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.warning(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(args.device, ', '.join(not_supported_layers)))
            log.warning(
                "Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)

    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))

    # Load network to the plugin
    log.info("Loading model to the plugin")
    exec_net = ie.load_network(network=net, device_name=args.device)

    print("Preparing input blobs")

    #We define the batch size as x for easier use throughout
    x = args.batch_size
    net.batch_size = x
    print("Batch size is {}".format(x))

    correct = 0
    wrong = 0
    total_inference = 0
    top3 = 0
    j = 0

    def run_it(start):
        #Setup an array to run inference on
        pics = np.ndarray(shape=(x, 1, 28, 28))

        #Fill up the input array
        stop = start + x  #exclusive
        i = 0

        for item in test_images[start:stop]:
            pics[i] = item.transpose(2, 0, 1)
            #  modulations[i] = item.reshape([1,2,128]).transpose(2, 0, 1)
            i += 1

        # Loading model to the plugin
        # Start inference
        infer_time = []

        t0 = time()
        res = exec_net.infer(inputs={input_blob: pics})
        infer_time.append((time() - t0) * 1000)

        # Processing output blob
        res = res[out_blob]

        nonlocal correct
        nonlocal wrong
        nonlocal j

        for i, probs in enumerate(res):
            probs = np.squeeze(probs)
            top_ind = np.argsort(probs)[-args.number_top:][::-1]

            det_label = top_ind[0]

            if det_label == test_labels[j]:
                correct = correct + 1
            else:
                wrong = wrong + 1

            j = j + 1

        nonlocal total_inference
        total_inference += np.sum(np.asarray(infer_time))

    #Iterate through the whole dataset
    num_batches = test_images.shape[0] // x

    stopper = 0
    print("Running inference: Batch 1")

    #Run it on all the batches
    for i in range(num_batches):
        if (i + 1) % 100 == 0:
            print("Running inference: Batch " + str(i + 1))
        run_it(stopper)
        stopper += x

    # Print results
    print("Correct " + str(correct))
    print("Wrong " + str(wrong))
    print("Accuracy: " + str(correct / (correct + wrong)))

    print("Average running time of one batch: {} ms".format(total_inference /
                                                            num_batches))
    print("Total running time of inference: {} ms".format(total_inference))
    print("Throughput: {} FPS".format(
        (1000 * args.number_iter * x * num_batches) / total_inference))

    import platform
    platform.processor()
    print("Processor: " + platform.processor())
    print("\n")

    del net

    del exec_net
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0

import argparse

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--input_model")
    parser.add_argument("--framework")
    args = parser.parse_args()
    path_to_model = args.input_model

    # This variable is only needed by GenerateMappingFile transformation
    # to produce correct mapping
    extract_names = True if args.framework in ['tf', 'mxnet', 'kaldi'
                                               ] else False

    try:
        from openvino.inference_engine import IECore, read_network  # pylint: disable=import-error
        from openvino.offline_transformations import ApplyMOCTransformations, GenerateMappingFile, CheckAPI  # pylint: disable=import-error
    except Exception as e:
        print("[ WARNING ] {}".format(e))
        exit(1)

    CheckAPI()

    net = read_network(path_to_model + "_tmp.xml", path_to_model + "_tmp.bin")
    net.serialize(path_to_model + ".xml", path_to_model + ".bin")
    path_to_mapping = path_to_model + ".mapping"
    GenerateMappingFile(net, path_to_mapping.encode('utf-8'), extract_names)
Example #7
0
def main():
    # Run inference
    job_id = os.getenv("PBS_JOBID")

    # Plugin initialization for specified device and load extensions library if specified.
    args = build_argparser().parse_args()
    model_xml = args.model
    model_bin = os.path.splitext(model_xml)[0] + ".bin"

    # Set up logging to a file
    logpath = os.path.join(os.path.dirname(__file__), ".log")
    log.basicConfig(
        level=log.INFO,
        format="%(asctime)s %(name)-12s %(levelname)-8s %(message)s",
        filename=logpath,
        filemode="w")
    try:
        job_id = os.environ['PBS_JOBID']
        infer_file = os.path.join(args.out_dir,
                                  'i_progress_' + str(job_id) + '.txt')
    except Exception as e:
        log.warning(e)
        log.warning("job_id: {}".format(job_id))

    # Setup additional logging to console
    console = log.StreamHandler()
    console.setLevel(log.INFO)
    formatter = log.Formatter("[ %(levelname)s ] %(message)s")
    console.setFormatter(formatter)
    log.getLogger("").addHandler(console)

    # Plugin initialization for specified device and load extensions library if specified
    log.info("Initializing plugin for {} device...".format(args.device))
    ie = IECore()

    if args.cpu_extension and 'CPU' in args.device:
        log.info("Loading plugins for {} device...".format(args.device))
        ie.add_extension(args.cpu_extension, "CPU")

    # Read IR
    log.info("Reading IR...")
    net = ie.read_network(model=model_xml, weights=model_bin)

    if "CPU" in args.device:
        supported_layers = ie.query_network(net, "CPU")
        not_supported_layers = [
            l for l in net.layers.keys() if l not in supported_layers
        ]
        if len(not_supported_layers) != 0:
            log.warning(
                "Following layers are not supported by the plugin for specified device {}:\n {}"
                .format(args.device, ', '.join(not_supported_layers)))
            log.warning(
                "Please try to specify cpu extensions library path in sample's command line parameters using -l "
                "or --cpu_extension command line argument")
            sys.exit(1)

    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))

    # Load network to the plugin
    log.info("Loading model to the plugin")
    exec_net = ie.load_network(network=net, device_name=args.device)

    #CLASSES = 11
    class_names = [
        "8PSK", "AM-DSB", "AM-SSB", "BPSK", "CPFSK", "GFSK", "PAM4", "QAM16",
        "QAM64", "QPSK", "WBFM"
    ]

    print("Preparing input blobs")

    # We define the batch size as x for easier use throughout
    # Note that if you want to have a different batch size, you would have to create different IR (see the Juypter Notebook
    # for more information on this)
    x = 110
    print("Batch size is {}".format(x))

    correct = 0
    wrong = 0
    total_inference = 0
    top_n = 0
    j = 0

    def run_it(start, n):
        #Setup an array to run inference on
        modulations = np.ndarray(shape=(x, 128, 1, 2))

        #Fill up the input array for this batch
        stop = start + x
        i = 0

        for item in X[start:stop]:
            modulations[i] = item.reshape([1, 2, 128]).transpose(2, 0, 1)
            i += 1

        # Loading model to the plugin
        # Start inference
        infer_time = []

        t0 = time()
        res = exec_net.infer(inputs={input_blob: modulations})
        infer_time.append((time() - t0) * 1000)

        # Processing output blob
        res = res[out_blob]

        #Check results for accuracy

        nonlocal correct
        nonlocal wrong
        nonlocal j

        # Function for calculating the amount correct, up to a certain "num"
        # For example, for num = 3, would return top_n, representing the amount of
        # times the correct label was one of the top 3 probabilities predicted
        def top_n_accuracy(num):
            nonlocal top_n

            for i in range(num):
                det_label = class_names[top_ind[i]]

                if det_label == labels[j][0]:
                    top_n += 1
                    return

        #Automatically calculates the accuracy for top 1 predictions
        for i, probs in enumerate(res):
            probs = np.squeeze(probs)
            top_ind = np.argsort(probs)[-args.number_top:][::-1]

            det_label = class_names[top_ind[0]]

            if det_label == labels[j][0]:
                correct = correct + 1
            else:
                wrong = wrong + 1

            #Default to calculating top-3 accuracy
            top_n_accuracy(3)

            j = j + 1

        nonlocal total_inference
        total_inference += np.sum(np.asarray(infer_time))

    #Iterate through the whole dataset
    num_batches = X.shape[0] // x

    k = 0
    n = 3
    print("Running inference: Batch 1")

    #Run it on the dataset
    for i in range(num_batches):
        if (i + 1) % 100 == 0:
            print("Running inference: Batch " + str(i + 1))
        run_it(k, n)
        k += x

    # Print results
    print("Correct " + str(correct))
    print("Wrong " + str(wrong))
    print("Accuracy: " + str(correct / (correct + wrong)))
    print("Top " + str(n) + " Correct: " + str(top_n))
    print("Top " + str(n) + " Accuracy: " + str(top_n / (correct + wrong)))

    print("Average running time of one batch: {} ms".format(total_inference /
                                                            num_batches))
    print("Total running time of inference: {} ms".format(total_inference))
    print("Throughput: {} FPS".format(
        (1000 * args.number_iter * x * num_batches) / total_inference))

    import platform
    platform.processor()
    print("Processor: " + platform.processor())
    print("\n")

    #Cleanup
    del net
    del exec_net