Exemple #1
0
def do_inference(onnx_model):
    import time
    num_tests = 100
    num_images = 100
    total_tests = 0
    total_elapsed = 0
    #import ipdb as pdb; pdb.set_trace()
    sess = rt.InferenceSession(onnx_model)
    input_name = sess.get_inputs()[0].name
    pass_cnt = 0
    utility.print_banner("Starting Inference Engine")
    for test_id in range(0, num_tests):
        images, labels = load_images(num_images)
        inf_start = time.time()
        pred_onx = sess.run(None, {input_name: images.astype(np.float32)})[0]
        inf_end = time.time()
        elapsed_time = inf_end - inf_start
        total_elapsed += elapsed_time
        utility.print_log("batch {} took {:2.4f}ms to complete".format(
            test_id, elapsed_time * 1000))
        for i in range(0, num_images):
            pred = pred_onx[i].argmax()
            res = "FAIL"
            if labels[i] == pred:
                res = "PASS"
                pass_cnt += 1
                total_tests += 1
            utility.print_log("actual={}   pred={}   res={}".format(
                labels[i], pred, res),
                              verbosity="VERB_HIGH")
    avg_inf = 1000.0 * float(total_elapsed) / total_tests
    utility.print_banner(
        "Accuracy = {}% out of {} tests, avg inference={:2.4}ms per image".
        format(100 * pass_cnt / (float(num_images) * num_tests),
               float(num_images) * num_tests, avg_inf))
def run_inference(exec_net, input_blob, output_blob, images, labels):
    # import ipdb as pdb; pdb.set_trace()
    util.print_banner("Running Openvino Inference on {0} images".format(
        images.shape[0]),
                      color='green',
                      verbosity="VERB_LOW")
    data_counts = images.shape[0]
    hit_counts = 0
    for i in range(data_counts):
        res = exec_net.infer(inputs={input_blob: images[i]})
        pred = res[output_blob].argmax()
        if pred == labels[i]:
            hit_counts += 1
    accuracy = float(hit_counts) / float(data_counts)
    print_perf_counts(exec_net)
    return accuracy
def export_to_onnx(config):
    util.print_banner("exporting {0} to ONNX".format(config['trained_model']),
                      color='green',
                      verbosity="VERB_LOW")
    # import ipdb as pdb; pdb.set_trace()
    # model object
    model = models.LENET(config)
    # load weights
    trained_model = torch.load(config['trained_model'])
    model.load_state_dict(trained_model)
    # create the imput placeholder for the model
    # note: we have to specify the size of a batch of input images
    input_placeholder = torch.randn(1, 1, 28, 28)
    # export
    onnx_model_fname = config['experiment_name'] + ".onnx"
    torch.onnx.export(model, input_placeholder, onnx_model_fname)
    print('{} exported!'.format(onnx_model_fname))
    # print_onnx(onnx_model_fname)
    return onnx_model_fname
def optimize_model(expr_name, onnx_file, model_xml, weight_bin):
    run_opt = False
    if (model_xml == None):
        util.print_log("Could not find xml model", id_str="warning")
        run_opt = True
    if (weight_bin == None):
        util.print_log("Could not find binary weights", id_str="warning")
        run_opt = True
    if run_opt:
        util.print_banner(
            "Running OpenVino optimizer on {0}".format(onnx_file),
            color='green',
            verbosity="VERB_LOW")
        cmd = "python {0}/deployment_tools/model_optimizer/mo.py --input_model={1} --model_name {2}".format(
            openvino_inst_path, onnx_file, expr_name)
        util.run_command(cmd, verbosity="VERB_LOW")
        model_xml, weight_bin = expr_name + ".xml", expr_name + ".bin"
    # load model
    # import ipdb as pdb; pdb.set_trace()
    return model_xml, weight_bin
def export_to_onnx(args):
#    import ipdb as pdb; pdb.set_trace()
    util.print_banner("exporting {0} to ONNX".format(args['arch']), color='green', verbosity="VERB_LOW")
   
  # model object
    model = models.__dict__[args['arch']](num_classes=10)
    #model = torch.nn.DataParallel(model).cuda()
    # load weights
    checkpoint = torch.load(args['chkpoint'])
    model = load_state_dict(model, checkpoint['state_dict'])
    #model.load_state_dict(checkpoint['state_dict'])
    # create the imput placeholder for the model
    # note: we have to specify the size of a batch of input images
    input_placeholder = torch.randn(1, 3, 32, 32)
    # export
    onnx_model_fname = args['arch'] + ".onnx"
    #import ipdb as pdb; pdb.set_trace()
    torch.onnx.export(model, input_placeholder, onnx_model_fname)
    print('{} exported!'.format(onnx_model_fname))
    # print_onnx(onnx_model_fname)
    return onnx_model_fname
def optimize_model(expr_name, onnx_file, model_xml, weight_bin, opt_ops=""):
#    import ipdb as pdb; pdb.set_trace()
    run_opt = False
    options = ""
    if (model_xml == None):
        util.print_log("No xml model was provided", id_str="warning")
        run_opt = True
    if (weight_bin == None):
        util.print_log("No binary weight file was provided", id_str="warning")
        run_opt = True
    if run_opt:
        util.print_banner("Running OpenVino optimizer on {0}".format(onnx_file), color='green', verbosity="VERB_LOW")
        options += "--input_model={0} ".format(onnx_file)
        options += "--model_name {0} ".format(expr_name)
        options += (" --"+opt_ops[0]) if len(opt_ops)==1 else  "".join(" --"+e for e in opt_ops)
        cmd = "python {0}/deployment_tools/model_optimizer/mo_onnx.py {1}".format(openvino_inst_path, options)
        util.run_command(cmd, verbosity="VERB_LOW")
        model_xml, weight_bin = expr_name+".xml", expr_name+".bin"
    # load model
    # import ipdb as pdb; pdb.set_trace()
    return model_xml, weight_bin
Exemple #7
0
 coverage = args['coverage']
 debug = args['debug']
 waveform = args['waveform']
 clean = args['clean']
 # import ipdb as pdb; pdb.set_trace()
 silence = args['silence']
 verbosity = args['verbosity']
 if verbosity is None:
     verbosity = 'VERB_LOW'
 if util.get_platform(verbosity=verbosity) != "linux":
     util.print_log("This script works only on a Linux platform",
                    "ERROR",
                    verbosity="VERB_LOW")
     sys.exit()
 if clean:
     util.print_banner("Cleaning project", verbosity=verbosity)
     util.clean_proj(files_to_clean)
 if not os.path.exists(result_dir):
     util.print_log("Creating a result directory in {0}".format(result_dir),
                    "INFO",
                    verbosity="VERB_LOW")
     os.makedirs(result_dir)
 if simulator == None:
     util.print_log("You need to provide Simulator name",
                    "ERROR",
                    verbosity="VERB_LOW")
     sys.exit()
 if simulator.lower() == "xilinx":
     # For Xilinx tools we need to specify top level for creating snapshots which is needed
     # by simulator and synthesis tools
     if top_level == None:
Exemple #8
0
    waveform = args['waveform']
    clean = args['clean']
    silence = args['silence']
    verbosity = args['verbosity']
    compileonly = args['compileonly']
    elabonly = args['elabonly']

    if verbosity is None:
        verbosity = 'VERB_LOW'
    if util.get_platform(verbosity=verbosity) != "linux":
        util.print_log("This script works only on a Linux platform",
                       "ERROR",
                       verbosity="VERB_LOW")
        sys.exit()
    if clean:
        util.print_banner("Cleaning project", verbosity=verbosity)
        util.clean_proj(files_to_clean)
    if not os.path.exists(result_dir):
        util.print_log("Creating a result directory in {0}".format(result_dir),
                       "INFO",
                       verbosity="VERB_LOW")
        os.makedirs(result_dir)
    if simulator == None:
        util.print_log("You need to provide Simulator name",
                       "ERROR",
                       verbosity="VERB_LOW")
        sys.exit()

    # Load Verilog macros file, if specified
    vlogmacros = ""
    if vlogmacros_file is not None:
            window[w_cnt, :, :] = image[x:x + w_width, y:y + w_height]
            w_cnt += 1
    return window


if __name__ == '__main__':
    args = parse_args()
    verbosity = args['verbosity']
    verbosity = verbosity if verbosity != None else "VERB_HIGH"
    input_img_width = int(args['input_img_width'])
    input_img_height = int(args['input_img_height'])
    kernel_size = int(args['kernel_size'])
    input_img = open(args['input_img'], "r").read()
    output_window = open(args['output_window'], "r").read()

    util.print_banner("Testing Window Slider Output ", verbosity='VERB_LOW')
    util.print_log("input image: {0} ".format(args['input_img']))
    util.print_log("simulation output: {0} ".format(args['output_window']))
    input_img = input_img.replace("\n", ",").split(",")
    input_img = input_img[:-1]
    input_img = [int(x) for x in input_img]
    input_img = np.asarray(input_img)
    input_img = input_img.reshape((input_img_height, input_img_width))
    sw_output = gen_window(image=input_img,
                           w_height=kernel_size,
                           w_width=kernel_size,
                           stride=1)
    output_window = output_window.replace("1'b", "").replace("'{", "").replace(
        "}", "").split("\n")

    total_num_windows = ((input_img_height - kernel_size) + 1) * (
Exemple #10
0
            ip = dep.split("/")[-1].split("\n")[0]
            f.write("../rtl/" + ip + "\n")
        f.close()


#=======================================================================
# Main
#=======================================================================
if __name__ == '__main__':
    cmd_to_run = ""
    __args__ = parse_args()
    project_name = __args__['project_name']
    path_for_proj = __args__['path_proj']
    path_fpga = __args__['path_fpga']
    verbosity = __args__['vebosity']
    util.print_banner("Creating {0} Project".format(project_name),
                      verbosity="VERB_LOW")
    # import ipdb as pdb; pdb.set_trace()
    if path_for_proj == None:
        default_path_for_proj = os.getcwd() + "/"
        util.print_log("Using current path for creating project: {0}".format(
            default_path_for_proj))
        path_for_proj = default_path_for_proj
    if path_fpga == None:
        util.print_log("Using default path to fpga directory {0}".format(
            default_path_to_fpga_dir))
        path_fpga = default_path_to_fpga_dir
# Check if project has already been created
    if os.path.isdir("{0}{1}".format(path_for_proj, project_name)):
        util.print_log("Project path {0}{1} already exist!".format(
            path_for_proj, project_name),
                       id_str="ERROR")
Exemple #11
0
    util.print_log("saving outputs to {0}".format(output_filename))


if __name__ == '__main__':
    args = parse_args()
    output_filename = args['output_filename']
    operation_type = args['operation_type']
    verbosity = args['verbosity']
    if output_filename == None:
        output_filename = "output.txt"
    if operation_type == None:
        operation_type = "re_run"
    if verbosity == None:
        verbosity = "VERB_HIGH"

    util.print_banner("Running HW simulator", verbosity="VERB_LOW")
    if operation_type.lower() == "re_run":
        util.print_log("re-running the model...", verbosity="VERB_LOW")
        windows, W1, W2, W3, b1, b2, b3 = hw.load_params(
            "image.npy", "w1.npy", "w2.npy", "w3.npy", "b1.npy", "b2.npy",
            "b3.npy", w_width, w_height, stride)
    elif operation_type.lower() == "normal":
        util.print_log("running in normal model...", verbosity="VERB_LOW")
        windows, W1, W2, W3, b1, b2, b3 = hw.gen_random_params(
            input_file_name, image_h, image_w, w_width, w_height, stride,
            l1_input, l2_input, l3_input, output_size)
    hw.save_model(W1, W2, W3)
    hw.gen_sv_weight_file("weight.svh", W1, W2, W3, b1, b2, b3)
    out = run_model(windows, W1, W2, W3, b1, b2, b3, verbosity)
    save_output(out, output_filename)