Example #1
0
def run(args):
    # Export if we know how, otherwise return
    if args.file:
        args.copy = False
        args.build = False
        export.run(args)
    else:
        if not os.path.exists(os.path.join(args.directory, args.project)):
            logging.debug("The project: %s does not exist." %
                          os.path.join(args.directory, args.project))
            return

    if args.file:
        # known project from records
        workspace = Workspace(args.file, os.getcwd())
        if args.project:
            workspace.build_project(args.project, args.tool)
        else:
            workspace.build_projects(args.tool)
    else:
        # not project known by pgen
        project_settings = ProjectSettings()
        project_files = [os.path.join(args.directory, args.project)]
        builder = ToolsSupported().get_value(args.tool, 'builder')
        build(builder, args.project, project_files, args.tool,
              project_settings)
Example #2
0
def test(
        weights=ROOT / 'yolov5s.pt',  # weights path
        imgsz=640,  # inference size (pixels)
        batch_size=1,  # batch size
        data=ROOT / 'data/coco128.yaml',  # dataset.yaml path
        device='',  # cuda device, i.e. 0 or 0,1,2,3 or cpu
        half=False,  # use FP16 half-precision inference
        test=False,  # test exports only
        pt_only=False,  # test PyTorch only
):
    y, t = [], time.time()
    device = select_device(device)
    for i, (name, f, suffix, gpu) in export.export_formats().iterrows(
    ):  # index, (name, file, suffix, gpu-capable)
        try:
            w = weights if f == '-' else \
                export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1]  # weights
            assert suffix in str(w), 'export failed'
            y.append([name, True])
        except Exception:
            y.append([name, False])  # mAP, t_inference

    # Print results
    LOGGER.info('\n')
    parse_opt()
    notebook_init()  # print system info
    py = pd.DataFrame(y, columns=['Format', 'Export'])
    LOGGER.info(f'\nExports complete ({time.time() - t:.2f}s)')
    LOGGER.info(str(py))
    return py
Example #3
0
def run(
        weights=ROOT / 'yolov5s.pt',  # weights path
        imgsz=640,  # inference size (pixels)
        batch_size=1,  # batch size
        data=ROOT / 'data/coco128.yaml',  # dataset.yaml path
        device='',  # cuda device, i.e. 0 or 0,1,2,3 or cpu
        half=False,  # use FP16 half-precision inference
):
    y, t = [], time.time()
    formats = export.export_formats()
    device = select_device(device)
    for i, (name, f, suffix, gpu) in formats.iterrows(
    ):  # index, (name, file, suffix, gpu-capable)
        try:
            if device.type != 'cpu':
                assert gpu, f'{name} inference not supported on GPU'
            if f == '-':
                w = weights  # PyTorch format
            else:
                w = export.run(weights=weights,
                               imgsz=[imgsz],
                               include=[f],
                               device=device,
                               half=half)[-1]  # all others
            assert suffix in str(w), 'export failed'
            result = val.run(data,
                             w,
                             batch_size,
                             imgsz,
                             plots=False,
                             device=device,
                             task='benchmark',
                             half=half)
            metrics = result[
                0]  # metrics (mp, mr, map50, map, *losses(box, obj, cls))
            speeds = result[2]  # times (preprocess, inference, postprocess)
            y.append([name, metrics[3], speeds[1]])  # mAP, t_inference
        except Exception as e:
            LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}')
            y.append([name, None, None])  # mAP, t_inference

    # Print results
    LOGGER.info('\n')
    parse_opt()
    notebook_init()  # print system info
    py = pd.DataFrame(
        y, columns=['Format', '[email protected]:0.95', 'Inference time (ms)'])
    LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)')
    LOGGER.info(str(py))
    return py
Example #4
0
def run(args):
    # Export if we know how, otherwise return
    if args.file:
        args.copy = False
        args.build = False
        export.run(args)
    else:
        if not os.path.exists(os.path.join(args.directory, args.project)):
            logging.debug("The project: %s does not exist." % os.path.join(args.directory, args.project))
            return

    if args.file:
        # known project from records
        workspace = Workspace(args.file, os.getcwd())
        if args.project:
            workspace.build_project(args.project, args.tool)
        else:
            workspace.build_projects(args.tool)
    else:
        # not project known by pgen
        project_settings = ProjectSettings()
        project_files = [os.path.join(args.directory, args.project)]
        builder = ToolsSupported().get_value(args.tool, 'builder')
        build(builder, args.project, project_files, args.tool, project_settings)
Example #5
0
def run(
        weights=ROOT / 'yolov5s.pt',  # weights path
        imgsz=640,  # inference size (pixels)
        batch_size=1,  # batch size
        data=ROOT / 'data/coco128.yaml',  # dataset.yaml path
):
    y, t = [], time.time()
    formats = export.export_formats()
    for i, (name, f,
            suffix) in formats.iterrows():  # index, (name, file, suffix)
        try:
            w = weights if f == '-' else export.run(
                weights=weights, imgsz=[imgsz], include=[f], device='cpu')[-1]
            assert suffix in str(w), 'export failed'
            result = val.run(data,
                             w,
                             batch_size,
                             imgsz=imgsz,
                             plots=False,
                             device='cpu',
                             task='benchmark')
            metrics = result[
                0]  # metrics (mp, mr, map50, map, *losses(box, obj, cls))
            speeds = result[2]  # times (preprocess, inference, postprocess)
            y.append([name, metrics[3], speeds[1]])  # mAP, t_inference
        except Exception as e:
            LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}')
            y.append([name, None, None])  # mAP, t_inference

    # Print results
    LOGGER.info('\n')
    parse_opt()
    notebook_init()  # print system info
    py = pd.DataFrame(
        y, columns=['Format', '[email protected]:0.95', 'Inference time (ms)'])
    LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)')
    LOGGER.info(str(py))
    return py
Example #6
0
def run(
        weights=ROOT / 'yolov5s.pt',  # weights path
        imgsz=640,  # inference size (pixels)
        batch_size=1,  # batch size
        data=ROOT / 'data/coco128.yaml',  # dataset.yaml path
        device='',  # cuda device, i.e. 0 or 0,1,2,3 or cpu
        half=False,  # use FP16 half-precision inference
        test=False,  # test exports only
        pt_only=False,  # test PyTorch only
):
    y, t = [], time.time()
    device = select_device(device)
    for i, (name, f, suffix, gpu) in export.export_formats().iterrows(
    ):  # index, (name, file, suffix, gpu-capable)
        try:
            assert i != 9, 'Edge TPU not supported'
            assert i != 10, 'TF.js not supported'
            if device.type != 'cpu':
                assert gpu, f'{name} inference not supported on GPU'

            # Export
            if f == '-':
                w = weights  # PyTorch format
            else:
                w = export.run(weights=weights,
                               imgsz=[imgsz],
                               include=[f],
                               device=device,
                               half=half)[-1]  # all others
            assert suffix in str(w), 'export failed'

            # Validate
            result = val.run(data,
                             w,
                             batch_size,
                             imgsz,
                             plots=False,
                             device=device,
                             task='benchmark',
                             half=half)
            metrics = result[
                0]  # metrics (mp, mr, map50, map, *losses(box, obj, cls))
            speeds = result[2]  # times (preprocess, inference, postprocess)
            y.append([
                name,
                round(file_size(w), 1),
                round(metrics[3], 4),
                round(speeds[1], 2)
            ])  # MB, mAP, t_inference
        except Exception as e:
            LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}')
            y.append([name, None, None, None])  # mAP, t_inference
        if pt_only and i == 0:
            break  # break after PyTorch

    # Print results
    LOGGER.info('\n')
    parse_opt()
    notebook_init()  # print system info
    c = ['Format', 'Size (MB)', '[email protected]:0.95', 'Inference time (ms)'
         ] if map else ['Format', 'Export', '', '']
    py = pd.DataFrame(y, columns=c)
    LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)')
    LOGGER.info(str(py if map else py.iloc[:, :2]))
    return py