Beispiel #1
0
def detect_specific_frame():
    args = parse_config()

    output_dir = os.path.join(cfg.CODE_DIR, "output", cfg.TAG, args.extra_tag)
    os.makedirs(output_dir, exist_ok=True)
    eval_output_dir = os.path.join(output_dir, "eval")
    os.makedirs(eval_output_dir, exist_ok=True)
    log_file = os.path.join(
        eval_output_dir,
        "log_eval_%s.txt" % datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
    logger = common_utils.create_logger(log_file)

    test_set, test_dataloader, sampler = build_data_loader(
        dist=False,
        data_path=cfg.DATA_DIR,
        batch_size=args.batch_size,
        num_workers=cfg.DATA_CONFIG.NUM_WORKERS,
        logger=logger,
        split=args.split,
        training=False)
    model = build_netword(num_class=len(cfg.CLASS_NAMES),
                          dataset=test_set,
                          logger=logger)
    model.load_params_from_file(args.ckpt, logger=logger)
    # for fix,id in enumerate(test_set.sample_idx_list):
    #     if id == "000137":
    #         break
    for i, data in enumerate(test_dataloader):
        if i == 58:
            break
    input = example_convert_to_torch(data)
    model.cuda()
    model.eval()
    output, _ = model(input)
    print(output)
Beispiel #2
0
def origin_model():
    args = parse_config()
    ckpt_dir = args.ckpt
    log_file = os.path.join(
        args.log_path,
        "log_eval_%s.txt" % datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
    logger = common_utils.create_logger(log_file)
    prepocess_model = PrepocessData()
    test_set, test_dataloader, sampler = build_data_loader(
        dist=False,
        data_path=cfg.DATA_DIR,
        batch_size=args.batch_size,
        num_workers=cfg.DATA_CONFIG.NUM_WORKERS,
        logger=None,
        split=args.split,
        training=False)
    model = build_netword(num_class=len(cfg.CLASS_NAMES),
                          dataset=test_set,
                          logger=logger)
    model.load_params_from_file(filename=ckpt_dir, logger=logger)
    with torch.no_grad():
        test_dataloader_iter = iter(test_dataloader)
        model.cuda()
        model.eval()
        data = next(test_dataloader_iter)
        input_dict = example_convert_to_torch(data)
        points = parpare_point_cloud()  #取出点云
        fov_flag = get_fov_flag(points, prepocess_model.image_shape,
                                prepocess_model.get_calib())  #过滤点云
        points = points[fov_flag]
        inputs = prepocess_model.points2voxel(points)

        pred_dict, ret_dict = model(input_dict)
        # script_model = torch.jit.trace(model, input_dict)
    print("done")
Beispiel #3
0
def save_model():
    args = parse_config()
    ckpt_dir = args.ckpt
    log_file = os.path.join(
        args.log_path,
        "log_eval_%s.txt" % datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
    logger = common_utils.create_logger(log_file)
    prepocess_model = PrepocessData()
    model = DetNet(4)
    model.load_params_from_file(filename=ckpt_dir, logger=logger)
    points = parpare_point_cloud()
    fov_flag = get_fov_flag(points, prepocess_model.image_shape,
                            prepocess_model.get_calib())
    points = points[fov_flag]
    with torch.set_grad_enabled(False):
        inputs = prepocess_model.points2voxel(points)
        model.cuda()
        model.eval()
        output = model(inputs)
    # torch.save(model,"DetNet.pkl")
    print("done")
Beispiel #4
0
def main():
    args = parse_config()
    output_dir = os.path.join(cfg.CODE_DIR, "output", cfg.TAG, args.extra_tag)
    os.makedirs(output_dir, exist_ok=True)
    eval_output_dir = os.path.join(output_dir, "eval")

    if not args.eval_all:
        # num_list  = re.findall(r"\d+",args.ckpt_id) if args.ckpt_id is not None else []
        # epoch_id = num_list[-1] if num_list.__len__()>0 else "no number"
        epoch_id = args.ckpt_id
        eval_output_dir = os.path.join(eval_output_dir, "epoch_%s" % epoch_id)
    else:
        eval_output_dir = os.path.join(eval_output_dir, "eval_all_default")

    os.makedirs(eval_output_dir, exist_ok=True)

    log_file = os.path.join(
        eval_output_dir,
        "log_eval_%s.txt" % datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
    logger = common_utils.create_logger(log_file)

    #log to file
    logger.info("*****************start logging****************")
    gpu_list = os.environ[
        'CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys(
        ) else 'ALL'
    logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)

    #非分布式训练
    dist_test = False
    if dist_test:
        total_gpus = dist.get_world_size()
        logger.info("total_batch_size:%d" % (total_gpus * args.batch_size))

    for key, val in vars(args).items():
        logger.info("{:16} {}".format(key, val))

    log_cfg_to_file(cfg, logger=logger)

    ckpt_dir = args.ckpt

    test_set, test_dataloader, sampler = build_data_loader(
        dist=dist_test,
        data_path=cfg.DATA_DIR,
        batch_size=args.batch_size,
        num_workers=cfg.DATA_CONFIG.NUM_WORKERS,
        logger=logger,
        split=args.split,
        training=False,
        args=args)
    model = build_netword(num_class=len(cfg.CLASS_NAMES),
                          dataset=test_set,
                          logger=logger)

    with torch.no_grad():
        if args.eval_all:
            repeat_eval_ckpt(model, test_dataloader, cfg, eval_output_dir,
                             logger, ckpt_dir)
        else:
            eval_single_ckpt(model, test_dataloader, eval_output_dir, logger,
                             ckpt_dir, epoch_id, args)
def main():
    args = parse_config()
    dist_train = False  # 非分布式运行
    if args.launcher=="pytorch":
        args.batch_size, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
            args.batch_size, args.tcp_port, args.local_rank, backend='nccl'
        )
        dist_train = True

    output_dir = os.path.join(cfg.CODE_DIR,"output",args.TAG,args.split,args.extra_tag)
    os.makedirs(output_dir,exist_ok=True)
    ckpt_dir = args.ckpt
    os.makedirs(ckpt_dir,exist_ok=True)

    log_file = os.path.join(output_dir,("log_train_%s.txt" % datetime.datetime.now().strftime("%Y%m%d-%H%M%S")))
    logger = common_utils.create_logger(log_file)

    logger.info("*********************start logging********************************")
    gpu_list = os.environ["CUDA_VISIBLE_DEVICES"] if "CUDA_VISIBLE_DEVICES" in os.environ.keys() else "ALL"
    log_cfg_to_file(cfg,logger=logger)

    tb_log_path = os.path.join(output_dir,"tensorboard")
    os.makedirs(tb_log_path,exist_ok=True)
    tb_log = SummaryWriter(log_dir=tb_log_path if args.local_rank ==0 else None)


    dataset,data_loader,sampler= build_data_loader(dist=dist_train,
                                                   batch_size=args.batch_size,
                                                   num_workers=args.workers,
                                                   training=True,
                                                   logger=logger,
                                                   split=args.split,
                                                   args = args,
                                                   train_all=args.train_all)
    if args.detector_name=="LZnet":
        model = FPVdet(dataset=dataset,logger=logger)
    elif args.detector_name=="pvrcnn":
        model = Part2net(num_class=len(cfg.CLASS_NAMES),dataset=dataset)

    else:
        raise NotImplementedError
    model.cuda()

    optimizer = build_optimizer(model, cfg.MODEL.OPTIMIZATION)

    start_epoch = it = 0
    last_epoch = -1
    if args.pretrained_model is not None:
        model.load_params_from_file(filename=args.pretrained_model,to_cpu=dist_train)

    ckpt_list = glob.glob(os.path.join(ckpt_dir, "*checkpoint_epoch_*.pth"))
    if len(ckpt_list) > 0:
        ckpt_list.sort(key=os.path.getatime)
        it, start_epoch = model.load_params_with_optimizer(ckpt_list[-1], to_cpu=dist,
                                                           optimizer=optimizer, logger=logger)
        last_epoch = start_epoch + 1

    model.train()
    #logger.info(model)
    if dist_train:
        model = nn.parallel.DistributedDataParallel(model,device_ids=[args.local_rank])

    lr_scheduler,lr_warmup_scheduler = build_scheduler(optimizer,
                                                       total_iters_each_epoch=len(data_loader),
                                                       total_epochs=args.epochs,
                                                       last_epoch=last_epoch,
                                                       optim_cfg=cfg.MODEL.OPTIMIZATION,
                                                       )
    logger.info("************start training*************")
    train_model(
        model,
        optimizer,
        data_loader,
        model_func=model_fn_decorator(),
        lr_scheduler=lr_scheduler,
        optim_cfg=cfg.MODEL.OPTIMIZATION,
        start_epoch=start_epoch,
        total_epochs=args.epochs,
        start_iter=it,
        rank=args.local_rank,
        tb_log=tb_log,
        ckpt_save_dir=ckpt_dir,
        train_sampler=sampler,
        lr_warmup_scheduler = lr_warmup_scheduler,
        ckpt_save_interval=args.ckpt_save_interval ,
        max_ckpt_save_num=args.max_ckpt_save_num
    )
    logger.info("**************End training********************")