示例#1
0
    def before_train(self):
        logger.info("args: {}".format(self.args))
        logger.info("exp value:\n{}".format(self.exp))

        # model related init
        torch.cuda.set_device(self.local_rank)
        model = self.exp.get_model()
        logger.info("Model Summary: {}".format(
            get_model_info(model, self.exp.test_size)))
        model.to(self.device)

        # solver related init
        self.optimizer = self.exp.get_optimizer(self.args.batch_size)

        if self.amp_training:
            model, optimizer = amp.initialize(model,
                                              self.optimizer,
                                              opt_level="O1")

        # value of epoch will be set in `resume_train`
        model = self.resume_train(model)

        # data related init
        self.no_aug = self.start_epoch >= self.max_epoch - self.exp.no_aug_epochs
        self.train_loader = self.exp.get_data_loader(
            batch_size=self.args.batch_size,
            is_distributed=self.is_distributed,
            no_aug=self.no_aug,
        )
        logger.info("init prefetcher, this might take one minute or less...")
        self.prefetcher = DataPrefetcher(self.train_loader)
        # max_iter means iters per epoch
        self.max_iter = len(self.train_loader)

        self.lr_scheduler = self.exp.get_lr_scheduler(
            self.exp.basic_lr_per_img * self.args.batch_size, self.max_iter)
        if self.args.occupy:
            occupy_mem(self.local_rank)

        if self.is_distributed:
            model = apex.parallel.DistributedDataParallel(model)
            # from torch.nn.parallel import DistributedDataParallel as DDP
            # model = DDP(model, device_ids=[self.local_rank], broadcast_buffers=False)

        if self.use_model_ema:
            self.ema_model = ModelEMA(model, 0.9998)
            self.ema_model.updates = self.max_iter * self.start_epoch

        self.model = model
        self.model.train()

        self.evaluator = self.exp.get_evaluator(
            batch_size=self.args.batch_size,
            is_distributed=self.is_distributed)
        # Tensorboard logger
        if self.rank == 0:
            self.tblogger = SummaryWriter(self.file_name)

        logger.info("Training start...")
        logger.info("\n{}".format(model))
示例#2
0
def main(exp, args):
    if not args.experiment_name:
        args.experiment_name = exp.exp_name

    # set environment variables for distributed training
    cudnn.benchmark = True
    rank = 0

    file_name = os.path.join(exp.output_dir, args.experiment_name)
    os.makedirs(file_name, exist_ok=True)

    if args.save_result:
        vis_folder = os.path.join(file_name, 'vis_res')
        os.makedirs(vis_folder, exist_ok=True)

    setup_logger(
        file_name, distributed_rank=rank, filename="demo_log.txt", mode="a"
    )
    logger.info("Args: {}".format(args))

    if args.conf is not None:
        exp.test_conf = args.conf
    if args.nms is not None:
        exp.nmsthre = args.nms
    if args.tsize is not None:
        exp.test_size = (args.tsize, args.tsize)

    model = exp.get_model()
    logger.info("Model Summary: {}".format(get_model_info(model, exp.test_size)))

    torch.cuda.set_device(rank)
    model.cuda(rank)
    model.eval()

    if not args.trt:
        if args.ckpt is None:
            ckpt_file = os.path.join(file_name, "best_ckpt.pth.tar")
        else:
            ckpt_file = args.ckpt
        logger.info("loading checkpoint")
        loc = "cuda:{}".format(rank)
        ckpt = torch.load(ckpt_file, map_location=loc)
        # load the model state dict
        model.load_state_dict(ckpt["model"])
        logger.info("loaded checkpoint done.")

    if args.fuse:
        logger.info("\tFusing model...")
        model = fuse_model(model)

    if args.trt:
        assert (not args.fuse),\
            "TensorRT model is not support model fusing!"
        trt_file = os.path.join(file_name, "model_trt.pth")
        assert os.path.exists(trt_file), (
            "TensorRT model is not found!\n Run python3 tools/trt.py first!"
        )
        model.head.decode_in_inference = False
        decoder = model.head.decode_outputs
        logger.info("Using TensorRT to inference")
    else:
        trt_file = None
        decoder = None

    predictor = Predictor(model, exp, COCO_CLASSES, trt_file, decoder)
    current_time = time.localtime()
    if args.demo == 'image':
        image_demo(predictor, vis_folder, args.path, current_time, args.save_result)
    elif args.demo == 'video' or args.demo == 'webcam':
        imageflow_demo(predictor, vis_folder, current_time, args)
示例#3
0
def main(exp, args):
    if not args.experiment_name:
        args.experiment_name = exp.exp_name

    output_dir = osp.join(exp.output_dir, args.experiment_name)
    os.makedirs(output_dir, exist_ok=True)

    if args.save_result:
        vis_folder = osp.join(output_dir, "track_vis")
        os.makedirs(vis_folder, exist_ok=True)

    if args.trt:
        args.device = "gpu"
    args.device = torch.device("cuda" if args.device == "gpu" else "cpu")

    logger.info("Args: {}".format(args))

    if args.conf is not None:
        exp.test_conf = args.conf
    if args.nms is not None:
        exp.nmsthre = args.nms
    if args.tsize is not None:
        exp.test_size = (args.tsize, args.tsize)

    model = exp.get_model().to(args.device)
    logger.info("Model Summary: {}".format(get_model_info(
        model, exp.test_size)))
    model.eval()

    if not args.trt:
        if args.ckpt is None:
            ckpt_file = osp.join(output_dir, "best_ckpt.pth.tar")
        else:
            ckpt_file = args.ckpt
        logger.info("loading checkpoint")
        ckpt = torch.load(ckpt_file, map_location="cpu")
        # load the model state dict
        model.load_state_dict(ckpt["model"])
        logger.info("loaded checkpoint done.")

    if args.fuse:
        logger.info("\tFusing model...")
        model = fuse_model(model)

    if args.fp16:
        model = model.half()  # to FP16

    if args.trt:
        assert not args.fuse, "TensorRT model is not support model fusing!"
        trt_file = osp.join(output_dir, "model_trt.pth")
        assert osp.exists(
            trt_file
        ), "TensorRT model is not found!\n Run python3 tools/trt.py first!"
        model.head.decode_in_inference = False
        decoder = model.head.decode_outputs
        logger.info("Using TensorRT to inference")
    else:
        trt_file = None
        decoder = None

    predictor = Predictor(model, exp, trt_file, decoder, args.device,
                          args.fp16)
    current_time = time.localtime()
    if args.demo == "image":
        image_demo(predictor, vis_folder, current_time, args)
    elif args.demo == "video" or args.demo == "webcam":
        imageflow_demo(predictor, vis_folder, current_time, args)
示例#4
0
def main(exp, args, num_gpu):
    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn(
            "You have chosen to seed testing. This will turn on the CUDNN deterministic setting, "
        )

    is_distributed = num_gpu > 1

    # set environment variables for distributed training
    cudnn.benchmark = True

    rank = args.local_rank
    # rank = get_local_rank()

    file_name = os.path.join(exp.output_dir, args.experiment_name)

    if rank == 0:
        os.makedirs(file_name, exist_ok=True)

    setup_logger(file_name,
                 distributed_rank=rank,
                 filename="val_log.txt",
                 mode="a")
    logger.info("Args: {}".format(args))

    if args.conf is not None:
        exp.test_conf = args.conf
    if args.nms is not None:
        exp.nmsthre = args.nms
    if args.tsize is not None:
        exp.test_size = (args.tsize, args.tsize)

    model = exp.get_model()
    logger.info("Model Summary: {}".format(get_model_info(
        model, exp.test_size)))
    logger.info("Model Structure:\n{}".format(str(model)))

    evaluator = exp.get_evaluator(args.batch_size, is_distributed, args.test)

    torch.cuda.set_device(rank)
    model.cuda(rank)
    model.eval()

    if not args.speed and not args.trt:
        if args.ckpt is None:
            ckpt_file = os.path.join(file_name, "best_ckpt.pth.tar")
        else:
            ckpt_file = args.ckpt
        logger.info("loading checkpoint")
        loc = "cuda:{}".format(rank)
        ckpt = torch.load(ckpt_file, map_location=loc)
        # load the model state dict
        model.load_state_dict(ckpt["model"])
        logger.info("loaded checkpoint done.")

    if is_distributed:
        model = DDP(model, device_ids=[rank])

    if args.fuse:
        logger.info("\tFusing model...")
        model = fuse_model(model)

    if args.trt:
        assert (
            not args.fuse and not is_distributed and args.batch_size == 1
        ), "TensorRT model is not support model fusing and distributed inferencing!"
        trt_file = os.path.join(file_name, "model_trt.pth")
        assert os.path.exists(
            trt_file), "TensorRT model is not found!\n Run tools/trt.py first!"
        model.head.decode_in_inference = False
        decoder = model.head.decode_outputs
    else:
        trt_file = None
        decoder = None

    # start evaluate
    *_, summary = evaluator.evaluate(model, is_distributed, args.fp16,
                                     trt_file, decoder, exp.test_size)
    logger.info("\n" + summary)
示例#5
0
def main():
    parser = argparse.ArgumentParser(description="")
    parser.add_argument("model", type=str, help="Model to export")
    parser.add_argument("-o",
                        "--output_dir",
                        type=str,
                        default="",
                        help="Output directory")
    parser.add_argument('-v',
                        "--verbose",
                        action='store_true',
                        help="Set logging level to INFO")
    parser.add_argument('--weights',
                        type=str,
                        help="yolo-x weights file (.pth or .pt)")
    parser.add_argument(
        '--backbone_weights',
        type=str,
        help="yolo-x weights file, but will be applied only to backbone")
    parser.add_argument('--yolox_path',
                        type=str,
                        help="Path of yolo-x repository")
    parser.add_argument('--num_classes',
                        type=int,
                        default=80,
                        help="Number of classes of the model")
    parser.add_argument('--gpu', type=int, help="GPU id to run on GPU")
    parser.add_argument('--to_onnx',
                        action="store_true",
                        help="Export model to onnx")
    parser.add_argument(
        '--use_wrapper',
        action="store_true",
        help=
        "In case of onnx export, if this option is present, the model will be wrapped so that its output match dede expectations"
    )
    parser.add_argument(
        '--top_k',
        type=int,
        default=200,
        help="When exporting to onnx, specify maximum returned prediction count"
    )
    parser.add_argument('--batch_size',
                        type=int,
                        default=1,
                        help="When exporting to onnx, batch size of model")
    parser.add_argument(
        '--img_width',
        type=int,
        default=640,
        help="Width of the image when exporting with fixed image size")
    parser.add_argument(
        '--img_height',
        type=int,
        default=640,
        help="Height of the image when exporting with fixed image size")

    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.INFO)

    device = "cpu"
    if args.gpu:
        device = "cuda:%d" % args.gpu

    # get yolox model
    sys.path.insert(0, args.yolox_path)
    import yolox
    from yolox.exp import get_exp
    from yolox.utils import get_model_info, postprocess, replace_module

    from yolox.models.network_blocks import SiLU

    exp = get_exp(None, args.model)
    exp.num_classes = args.num_classes
    logging.info("num_classes == %d" % args.num_classes)

    model = exp.get_model()
    model.eval()
    model.head.decode_in_inference = True

    if args.weights:
        logging.info("Load weights from %s" % args.weights)
        try:
            # state_dict
            weights = torch.load(args.weights)["model"]
        except:
            # torchscript
            logging.info("Detected torchscript weights")
            weights = torch.jit.load(args.weights).state_dict()
            weights = {k[6:]: w
                       for k, w in weights.items()}  # skip "model." prefix

        model.load_state_dict(weights, strict=True)

    elif args.backbone_weights:
        logging.info("Load weights from %s" % args.backbone_weights)

        weights = torch.load(args.backbone_weights)["model"]
        weights = {k: w for k, w in weights.items() if "backbone" in k}

        model.load_state_dict(weights, strict=False)

    logging.info("Model Summary: {}".format(
        get_model_info(model, exp.test_size)))

    filename = os.path.join(args.output_dir, args.model)

    if args.to_onnx:
        model = replace_module(model, nn.SiLU, SiLU)

        model = YoloXWrapper_TRT(model,
                                 topk=args.top_k,
                                 raw_output=not args.use_wrapper)
        model.to(device)
        model.eval()

        filename += ".onnx"
        example = get_image_input(args.batch_size, args.img_width,
                                  args.img_height)
        # XXX: dynamic batch size not supported with wrapper
        # XXX: dynamic batch size not yet supported in dede as well
        dynamic_axes = None  # {"input": {0: "batch"}} if not args.use_wrapper else None
        torch.onnx.export(model,
                          example,
                          filename,
                          export_params=True,
                          verbose=args.verbose,
                          opset_version=11,
                          do_constant_folding=True,
                          input_names=["input"],
                          output_names=["detection_out", "keep_count"],
                          dynamic_axes=dynamic_axes)
    else:
        # wrap model
        model = YoloXWrapper(model, args.num_classes, postprocess)
        model.to(device)
        model.eval()

        filename += "_cls" + str(args.num_classes) + ".pt"
        script_module = torch.jit.script(model)
        logging.info("Save jit model at %s" % filename)
        script_module.save(filename)