コード例 #1
0
def main():
    args = make_parser().parse_args()
    logger.info("args value: {}".format(args))
    exp = get_exp(args.exp_file, args.name)
    exp.merge(args.opts)

    if not args.experiment_name:
        args.experiment_name = exp.exp_name

    model = exp.get_model()
    if args.ckpt is None:
        file_name = os.path.join(exp.output_dir, args.experiment_name)
        ckpt_file = os.path.join(file_name, "best_ckpt.pth.tar")
    else:
        ckpt_file = args.ckpt

    # load the model state dict
    ckpt = torch.load(ckpt_file, map_location="cpu")

    model.eval()
    if "model" in ckpt:
        ckpt = ckpt["model"]
    model.load_state_dict(ckpt)
    model = replace_module(model, nn.SiLU, SiLU)
    model.head.decode_in_inference = False

    logger.info("loading checkpoint done.")
    dummy_input = torch.randn(1, 3, exp.test_size[0], exp.test_size[1])
    torch.onnx._export(
        model,
        dummy_input,
        args.output_name,
        input_names=[args.input],
        output_names=[args.output],
        opset_version=args.opset,
    )
    logger.info("generated onnx model named {}".format(args.output_name))

    if not args.no_onnxsim:
        import onnx

        from onnxsim import simplify

        # use onnxsimplify to reduce reduent model.
        onnx_model = onnx.load(args.output_name)
        model_simp, check = simplify(onnx_model)
        assert check, "Simplified ONNX model could not be validated"
        onnx.save(model_simp, args.output_name)
        logger.info("generated simplified onnx model named {}".format(
            args.output_name))
コード例 #2
0
def main():
    args = make_parser().parse_args()
    exp = get_exp(args.exp_file, args.name)
    if not args.experiment_name:
        args.experiment_name = exp.exp_name

    model = exp.get_model()
    file_name = os.path.join(exp.output_dir, args.experiment_name)
    os.makedirs(file_name, exist_ok=True)
    if args.ckpt is None:
        ckpt_file = os.path.join(file_name, "best_ckpt.pth.tar")
    else:
        ckpt_file = args.ckpt

    ckpt = torch.load(ckpt_file, map_location="cpu")
    # load the model state dict

    model.load_state_dict(ckpt["model"])
    logger.info("loaded checkpoint done.")
    model.eval()
    model.cuda()
    model.head.decode_in_inference = False
    x = torch.ones(1, 3, exp.test_size[0], exp.test_size[1]).cuda()
    model_trt = torch2trt(
        model,
        [x],
        fp16_mode=True,
        log_level=trt.Logger.INFO,
        max_workspace_size=(1 << 32),
    )
    torch.save(model_trt.state_dict(), os.path.join(file_name,
                                                    'model_trt.pth'))
    logger.info("Converted TensorRT model done.")
    engine_file = os.path.join(file_name, 'model_trt.engine')
    engine_file_demo = os.path.join('demo', 'TensorRT', 'cpp',
                                    'model_trt.engine')
    with open(engine_file, 'wb') as f:
        f.write(model_trt.engine.serialize())

    shutil.copyfile(engine_file, engine_file_demo)

    logger.info(
        "Converted TensorRT model engine file is saved for C++ inference.")
コード例 #3
0
def main():
    args = make_parser().parse_args()
    logger.info("args value: {}".format(args))
    exp = get_exp(args.exp_file, args.name)
    exp.merge(args.opts)

    if not args.experiment_name:
        args.experiment_name = exp.exp_name

    model = exp.get_model()
    if args.ckpt is None:
        file_name = os.path.join(exp.output_dir, args.experiment_name)
        ckpt_file = os.path.join(file_name, "best_ckpt.pth.tar")
    else:
        ckpt_file = args.ckpt

    ckpt = torch.load(ckpt_file, map_location="cpu")
    # load the model state dict

    model.eval()
    if "model" in ckpt:
        ckpt = ckpt["model"]
    model.load_state_dict(ckpt)
    model = replace_module(model, nn.SiLU, SiLU)
    model.head.decode_in_inference = False

    logger.info("loaded checkpoint done.")
    dummy_input = torch.randn(1, 3, exp.test_size[0], exp.test_size[1])
    torch.onnx._export(
        model,
        dummy_input,
        args.output_name,
        input_names=[args.input],
        output_names=[args.output],
        opset_version=args.opset,
    )
    logger.info("generate onnx named {}".format(args.output_name))

    if not args.no_onnxsim:
        # use onnxsimplify to reduce reduent model.
        os.system("python3 -m onnxsim {} {}".format(args.output_name,
                                                    args.output_name))
        logger.info("generate simplify onnx named {}".format(args.output_name))
コード例 #4
0
        logger.info("\tFusing model...")
        model = fuse_model(model)

    if args.trt:
        assert (not args.fuse),\
            "TensorRT model is not support model fusing!"
        trt_file = os.path.join(file_name, "model_trt.pth")
        assert os.path.exists(trt_file), (
            "TensorRT model is not found!\n Run python3 tools/trt.py first!"
        )
        model.head.decode_in_inference = False
        decoder = model.head.decode_outputs
        logger.info("Using TensorRT to inference")
    else:
        trt_file = None
        decoder = None

    predictor = Predictor(model, exp, COCO_CLASSES, trt_file, decoder)
    current_time = time.localtime()
    if args.demo == 'image':
        image_demo(predictor, vis_folder, args.path, current_time, args.save_result)
    elif args.demo == 'video' or args.demo == 'webcam':
        imageflow_demo(predictor, vis_folder, current_time, args)


if __name__ == "__main__":
    args = make_parser().parse_args()
    exp = get_exp(args.exp_file, args.name)

    main(exp, args)
コード例 #5
0
ファイル: yolox_cam.py プロジェクト: smyrk2031/hello_world
print(MODEL_FILE)
print(MODEL_FILE.split("/")[-1].split(".pth")[0])


# 推論用パラメータ
test_size = (224, 224) #(640, 640)    #(960, 960)
num_classes = 80
confthre =0.25  #0.1
nmsthre = 0.45


print(" YOLOモデルの取得")
#model = exp.get_model()
#model = exp_.yolox_base.Exp.get_model()
#exp_my = get_exp(exp_file=None, exp_name=MODEL_FILE.split(".")[0])  #"yolox_x")    # ここ苦戦した
exp_my = get_exp(exp_file=None, exp_name=MODEL_FILE.split("/")[-1].split(".pth")[0])  #"yolox_x")    # ここ苦戦した
model = exp_my.get_model()
# model.cuda()
model.eval()


# get custom trained checkpoint
#ckpt_file = "./YOLOX_outputs/cots_config/best_ckpt.pth"
#ckpt = torch.load(ckpt_file, map_location="cpu")
#model.load_state_dict(ckpt["model"])

print("torch_load実行")
ckpt = torch.load(MODEL_FILE, map_location="cpu")
model.load_state_dict(ckpt["model"], strict=False)

コード例 #6
0
def main():
    parser = argparse.ArgumentParser(description="")
    parser.add_argument("model", type=str, help="Model to export")
    parser.add_argument("-o",
                        "--output_dir",
                        type=str,
                        default="",
                        help="Output directory")
    parser.add_argument('-v',
                        "--verbose",
                        action='store_true',
                        help="Set logging level to INFO")
    parser.add_argument('--weights',
                        type=str,
                        help="yolo-x weights file (.pth or .pt)")
    parser.add_argument(
        '--backbone_weights',
        type=str,
        help="yolo-x weights file, but will be applied only to backbone")
    parser.add_argument('--yolox_path',
                        type=str,
                        help="Path of yolo-x repository")
    parser.add_argument('--num_classes',
                        type=int,
                        default=80,
                        help="Number of classes of the model")
    parser.add_argument('--gpu', type=int, help="GPU id to run on GPU")
    parser.add_argument('--to_onnx',
                        action="store_true",
                        help="Export model to onnx")
    parser.add_argument(
        '--use_wrapper',
        action="store_true",
        help=
        "In case of onnx export, if this option is present, the model will be wrapped so that its output match dede expectations"
    )
    parser.add_argument(
        '--top_k',
        type=int,
        default=200,
        help="When exporting to onnx, specify maximum returned prediction count"
    )
    parser.add_argument('--batch_size',
                        type=int,
                        default=1,
                        help="When exporting to onnx, batch size of model")
    parser.add_argument(
        '--img_width',
        type=int,
        default=640,
        help="Width of the image when exporting with fixed image size")
    parser.add_argument(
        '--img_height',
        type=int,
        default=640,
        help="Height of the image when exporting with fixed image size")

    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.INFO)

    device = "cpu"
    if args.gpu:
        device = "cuda:%d" % args.gpu

    # get yolox model
    sys.path.insert(0, args.yolox_path)
    import yolox
    from yolox.exp import get_exp
    from yolox.utils import get_model_info, postprocess, replace_module

    from yolox.models.network_blocks import SiLU

    exp = get_exp(None, args.model)
    exp.num_classes = args.num_classes
    logging.info("num_classes == %d" % args.num_classes)

    model = exp.get_model()
    model.eval()
    model.head.decode_in_inference = True

    if args.weights:
        logging.info("Load weights from %s" % args.weights)
        try:
            # state_dict
            weights = torch.load(args.weights)["model"]
        except:
            # torchscript
            logging.info("Detected torchscript weights")
            weights = torch.jit.load(args.weights).state_dict()
            weights = {k[6:]: w
                       for k, w in weights.items()}  # skip "model." prefix

        model.load_state_dict(weights, strict=True)

    elif args.backbone_weights:
        logging.info("Load weights from %s" % args.backbone_weights)

        weights = torch.load(args.backbone_weights)["model"]
        weights = {k: w for k, w in weights.items() if "backbone" in k}

        model.load_state_dict(weights, strict=False)

    logging.info("Model Summary: {}".format(
        get_model_info(model, exp.test_size)))

    filename = os.path.join(args.output_dir, args.model)

    if args.to_onnx:
        model = replace_module(model, nn.SiLU, SiLU)

        model = YoloXWrapper_TRT(model,
                                 topk=args.top_k,
                                 raw_output=not args.use_wrapper)
        model.to(device)
        model.eval()

        filename += ".onnx"
        example = get_image_input(args.batch_size, args.img_width,
                                  args.img_height)
        # XXX: dynamic batch size not supported with wrapper
        # XXX: dynamic batch size not yet supported in dede as well
        dynamic_axes = None  # {"input": {0: "batch"}} if not args.use_wrapper else None
        torch.onnx.export(model,
                          example,
                          filename,
                          export_params=True,
                          verbose=args.verbose,
                          opset_version=11,
                          do_constant_folding=True,
                          input_names=["input"],
                          output_names=["detection_out", "keep_count"],
                          dynamic_axes=dynamic_axes)
    else:
        # wrap model
        model = YoloXWrapper(model, args.num_classes, postprocess)
        model.to(device)
        model.eval()

        filename += "_cls" + str(args.num_classes) + ".pt"
        script_module = torch.jit.script(model)
        logging.info("Save jit model at %s" % filename)
        script_module.save(filename)