def train(cfg, args): logger = logging.getLogger('SSD.trainer') model = build_detection_model(cfg) device = torch.device(cfg.MODEL.DEVICE) model.to(device) if args.distributed: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank) lr = cfg.SOLVER.LR * args.num_gpus # scale by num gpus optimizer = make_optimizer(cfg, model, lr) milestones = [step // args.num_gpus for step in cfg.SOLVER.LR_STEPS] scheduler = make_lr_scheduler(cfg, optimizer, milestones) arguments = {"iteration": 0} save_to_disk = dist_util.get_rank() == 0 checkpointer = CheckPointer(model, optimizer, scheduler, cfg.OUTPUT_DIR, save_to_disk, logger) extra_checkpoint_data = checkpointer.load() arguments.update(extra_checkpoint_data) max_iter = cfg.SOLVER.MAX_ITER // args.num_gpus train_loader = make_data_loader(cfg, is_train=True, distributed=args.distributed, max_iter=max_iter, start_iter=arguments['iteration']) model = do_train(cfg, model, train_loader, optimizer, scheduler, checkpointer, device, arguments, args) return model
def evaluation(cfg, ckpt, distributed): logger = logging.getLogger("SSD.inference") model = build_detection_model(cfg) checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR, logger=logger) device = torch.device(cfg.MODEL.DEVICE) model.to(device) checkpointer.load(ckpt, use_latest=ckpt is None) model.eval() device = torch.device(cfg.MODEL.DEVICE) data_loaders_val = make_data_loader(cfg, is_train=False, distributed=distributed) for dataset_name, data_loader in zip(cfg.DATASETS.TEST, data_loaders_val): res = [] for batch in data_loader: images, targets, image_ids = batch with torch.no_grad(): torch.cuda.synchronize(device) start = time.time() outputs = model(images.to(device)) torch.cuda.synchronize(device) end = time.time() res.append(end - start) time_sum = 0.0 for i in res: time_sum += i print("FPS: %f" % (float(len(res) * cfg.TEST.BATCH_SIZE) / time_sum))
def start_train(cfg): logger = logging.getLogger('SSD.trainer') model = SSDDetector(cfg) model = torch_utils.to_cuda(model) optimizer = torch.optim.SGD( filter(lambda p: p.requires_grad, model.parameters()), lr=cfg.SOLVER.LR, momentum=cfg.SOLVER.MOMENTUM, weight_decay=cfg.SOLVER.WEIGHT_DECAY, nesterov=True, ) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, T_max=int(cfg.SOLVER.MAX_ITER / 1000), eta_min=0) arguments = {"iteration": 0} save_to_disk = True checkpointer = CheckPointer( model, optimizer, cfg.OUTPUT_DIR, save_to_disk, logger, ) extra_checkpoint_data = checkpointer.load() arguments.update(extra_checkpoint_data) max_iter = cfg.SOLVER.MAX_ITER train_loader = make_data_loader(cfg, is_train=True, max_iter=max_iter, start_iter=arguments['iteration']) model = do_train(cfg, model, train_loader, optimizer, checkpointer, arguments, scheduler) return model
def start_train(cfg): logger = logging.getLogger('SSD.trainer') model = SSDDetector(cfg) model = torch_utils.to_cuda(model) lr = cfg.SOLVER.LR optimizer = make_optimizer(cfg, model, lr) milestones = [step for step in cfg.SOLVER.LR_STEPS] scheduler = make_lr_scheduler(cfg, optimizer, milestones) arguments = {"iteration": 0} save_to_disk = True checkpointer = CheckPointer(cfg, model, optimizer, scheduler, cfg.OUTPUT_DIR, save_to_disk, logger) extra_checkpoint_data = checkpointer.load() arguments.update(extra_checkpoint_data) max_iter = cfg.SOLVER.MAX_ITER train_loader = make_data_loader(cfg, is_train=True, max_iter=max_iter, start_iter=arguments['iteration']) model = do_train(cfg, model, train_loader, optimizer, scheduler, checkpointer, arguments) return model
def train(cfg, args): # 工厂模式,加载日志文件设置,这里暂时不同管 logger = logging.getLogger('SSD.trainer') # 建立目标检测模型 model = build_detection_model(cfg) # 设置Device并且把模型部署到设备上 device = torch.device(cfg.MODEL.DEVICE) model.to(device) if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank) # 设置学习率、优化器还有学习率变化步长,可以理解为模拟退火这种,前面的步长比较大,后面的步长比较小 lr = cfg.SOLVER.LR * args.num_gpus # scale by num gpus optimizer = make_optimizer(cfg, model, lr) milestones = [step // args.num_gpus for step in cfg.SOLVER.LR_STEPS] scheduler = make_lr_scheduler(cfg, optimizer, milestones) arguments = {"iteration": 0} save_to_disk = dist_util.get_rank() == 0 # **** 这里应该是从断点开始对模型进行训练 **** checkpointer = CheckPointer(model, optimizer, scheduler, cfg.OUTPUT_DIR, save_to_disk, logger) extra_checkpoint_data = checkpointer.load() arguments.update(extra_checkpoint_data) # Important 通过torch的形式去加载数据集 # 关键在于如何加载数据集,模型的构建过程可以简单地看成是黑盒 max_iter = cfg.SOLVER.MAX_ITER // args.num_gpus train_loader = make_data_loader(cfg, is_train=True, distributed=args.distributed, max_iter=max_iter, start_iter=arguments['iteration']) # 正式开始训练, 暂时先不训练? # 不对,不训练也得加载数据集**** 暂时不训练就完事了 *** 直接看数据加载过程 # model = do_train(cfg, model, train_loader, optimizer, scheduler, checkpointer, device, arguments, args) return model
def start_train(cfg): logger = logging.getLogger('SSD.trainer') model = SSDDetector(cfg) model = torch_utils.to_cuda(model) optimizer = torch.optim.SGD( model.parameters(), lr=cfg.SOLVER.LR, momentum=cfg.SOLVER.MOMENTUM, weight_decay=cfg.SOLVER.WEIGHT_DECAY ) arguments = {"iteration": 0} save_to_disk = True checkpointer = CheckPointer( model, optimizer, cfg.OUTPUT_DIR, save_to_disk, logger, ) extra_checkpoint_data = checkpointer.load() arguments.update(extra_checkpoint_data) max_iter = cfg.SOLVER.MAX_ITER train_loader = make_data_loader(cfg, is_train=True, max_iter=max_iter, start_iter=arguments['iteration']) model = do_train( cfg, model, train_loader, optimizer, checkpointer, arguments) return model
def visualize_validation_set(cfg, amount=200): output_dir = pathlib.Path('visualizations/validation_set') output_dir.mkdir(exist_ok=True, parents=True) data_loader = make_data_loader(cfg, is_train=False) if isinstance(data_loader, list): data_loader = data_loader[0] dataset = data_loader.dataset indices = list(range(len(dataset))) np.random.shuffle(indices) print("Generating images ..") counter = 0 for idx in indices: image_id = dataset.image_ids[idx] image = dataset._read_image(image_id) boxes, labels = dataset._get_annotation(image_id) #image = draw_boxes( # image, boxes, labels, class_name_map=dataset.class_names #) plt.imsave( "visualizations/validation_set/visualization" + str(idx) + ".png", image) counter += 1 if counter >= amount: break print("Saved the images to visualizations/")
def train_data(): max_iter = 12000 train_loader = make_data_loader(cfg, is_train=True, distributed=False, max_iter=max_iter, start_iter=0) for iteration, (images, targets, _) in enumerate(train_loader, 0): print(iteration) print(images.shape) print(targets.shape) exit(0)
def train(cfg, args): logger = logging.getLogger('SSD.trainer') model = build_detection_model(cfg) device = torch.device(cfg.MODEL.DEVICE) model.to(device) if args.distributed: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank) lr = cfg.SOLVER.LR * args.num_gpus # scale by num gpus optimizer = make_optimizer(cfg, model, lr) milestones = [step // args.num_gpus for step in cfg.SOLVER.LR_STEPS] scheduler = make_lr_scheduler(cfg, optimizer, milestones) arguments = {"iteration": 0} save_to_disk = dist_util.get_rank() == 0 checkpointer = CheckPointer(model, optimizer, scheduler, cfg.OUTPUT_DIR, save_to_disk, logger) extra_checkpoint_data = checkpointer.load() arguments.update(extra_checkpoint_data) max_iter = cfg.SOLVER.MAX_ITER // args.num_gpus train_loader = make_data_loader(cfg, is_train=True, distributed=args.distributed, max_iter=max_iter, start_iter=arguments['iteration']) # macs, params = profile(model, inputs=(input, )) # # macs, params = clever_format([flops, params], "%.3f") # net = model.to() # with torch.cuda.device(0): # net = model.to(device) # macs, params = get_model_complexity_info(net, (3, 512, 512), as_strings=True, # print_per_layer_stat=True, verbose=True) # print('{:<30} {:<8}'.format('Computational complexity: ', macs)) # print('{:<30} {:<8}'.format('Number of parameters: ', params)) n_params = sum(p.numel() for name, p in model.named_parameters() if p.requires_grad) print(n_params) # # model = net # inputs = torch.randn(1, 3, 300, 300) #8618 305 # inputs = torch.randn(1, 3, 300, 300) # macs = profile_macs(model, inputs) # print(macs) model = do_train(cfg, model, train_loader, optimizer, scheduler, checkpointer, device, arguments, args) return model
def start_train(cfg, visualize_example=False): logger = logging.getLogger('SSD.trainer') model = SSDDetector(cfg) print(model) model = torch_utils.to_cuda(model) optimizer = torch.optim.SGD(model.parameters(), lr=cfg.SOLVER.LR, momentum=cfg.SOLVER.MOMENTUM, weight_decay=cfg.SOLVER.WEIGHT_DECAY) """ optimizer = torch.optim.Adam( model.parameters(), lr=cfg.SOLVER.LR, weight_decay=cfg.SOLVER.WEIGHT_DECAY ) """ """ lr_scheduler = torch.optim.lr_scheduler.CyclicLR( optimizer= optimizer, base_lr= cfg.SOLVER.LR /10, max_lr=0.05, step_size_up=8000, mode='triangular2' ) """ arguments = {"iteration": 0} save_to_disk = True checkpointer = CheckPointer( model, optimizer, cfg.OUTPUT_DIR, save_to_disk, logger, ) extra_checkpoint_data = checkpointer.load() arguments.update(extra_checkpoint_data) max_iter = cfg.SOLVER.MAX_ITER train_loader = make_data_loader(cfg, is_train=True, max_iter=max_iter, start_iter=arguments['iteration']) model = do_train(cfg, model, train_loader, optimizer, checkpointer, arguments, visualize_example, lr_scheduler=None) return model
def do_evaluation(cfg, model, **kwargs): model.eval() data_loaders_val = make_data_loader(cfg, is_train=False) eval_results = [] for dataset_name, data_loader in zip(cfg.DATASETS.TEST, data_loaders_val): output_folder = pathlib.Path(cfg.OUTPUT_DIR, "inference", dataset_name) output_folder.mkdir(exist_ok=True, parents=True) eval_result = inference(model, data_loader, dataset_name, output_folder, **kwargs) eval_results.append(eval_result) return eval_results
def train(): # 数据加载 max_iter = cfg.SOLVER.MAX_ITER // 1 train_loader = make_data_loader(cfg, is_train=True, distributed=False, max_iter=max_iter, start_iter=1) for iteration, (images, targets, _) in enumerate(train_loader, 1): print(images)
def do_run(cfg, model, distributed, **kwargs): if isinstance(model, torch.nn.parallel.DistributedDataParallel): model = model.module model.eval() device = torch.device(cfg.MODEL.DEVICE) data_loaders_val = make_data_loader(cfg, is_train=False, distributed=distributed) for dataset_name, data_loader in zip(cfg.DATASETS.TEST, data_loaders_val): output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name) if not os.path.exists(output_folder): mkdir(output_folder) run(model, data_loader, dataset_name, device, output_folder, **kwargs)
def visualize_training_set(cfg, image_id="Czech_000006"): data_loader = make_data_loader(cfg, is_train=True) if isinstance(data_loader, list): data_loader = data_loader[0] dataset = data_loader.dataset image = dataset._read_image(image_id) boxes, labels = dataset._get_annotation(image_id) image = draw_boxes(image, boxes, labels, class_name_map=dataset.class_names) plt.imshow(image) plt.show()
def start_train(cfg): logger = logging.getLogger('SSD.trainer') model = SSDDetector(cfg) model = torch_utils.to_cuda(model) if cfg.SOLVER.TYPE == "adam": optimizer = torch.optim.Adam( model.parameters(), lr=cfg.SOLVER.LR, weight_decay=cfg.SOLVER.WEIGHT_DECAY, ) elif cfg.SOLVER.TYPE == "sgd": optimizer = torch.optim.SGD(model.parameters(), lr=cfg.SOLVER.LR, weight_decay=cfg.SOLVER.WEIGHT_DECAY, momentum=cfg.SOLVER.MOMENTUM) else: # Default to Adam if incorrect solver print("WARNING: Incorrect solver type, defaulting to Adam") optimizer = torch.optim.Adam( model.parameters(), lr=cfg.SOLVER.LR, weight_decay=cfg.SOLVER.WEIGHT_DECAY, ) scheduler = LinearMultiStepWarmUp(cfg, optimizer) arguments = {"iteration": 0} save_to_disk = True checkpointer = CheckPointer( model, optimizer, cfg.OUTPUT_DIR, save_to_disk, logger, ) extra_checkpoint_data = checkpointer.load() arguments.update(extra_checkpoint_data) max_iter = cfg.SOLVER.MAX_ITER train_loader = make_data_loader(cfg, is_train=True, max_iter=max_iter, start_iter=arguments['iteration']) model = do_train(cfg, model, train_loader, optimizer, checkpointer, arguments, scheduler) return model
def train(cfg, args): logger = logging.getLogger('SSD.trainer') model = build_detection_model(cfg) device = torch.device(cfg.MODEL.DEVICE) model.to(device) if args.distributed: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank) lr = cfg.SOLVER.LR * args.num_gpus # scale by num gpus optimizer = make_optimizer(cfg, model, lr) milestones = [step // args.num_gpus for step in cfg.SOLVER.LR_STEPS] scheduler = make_lr_scheduler(cfg, optimizer, milestones) arguments = {"iteration": 0} save_to_disk = dist_util.get_rank() == 0 checkpointer = CheckPointer(model, optimizer, scheduler, cfg.OUTPUT_DIR, save_to_disk, logger) extra_checkpoint_data = checkpointer.load(args.ckpt) arguments.update(extra_checkpoint_data) max_iter = cfg.SOLVER.MAX_ITER // args.num_gpus train_loader = make_data_loader(cfg, is_train=True, distributed=args.distributed, max_iter=max_iter, start_iter=arguments['iteration']) logging.info('==>Start statistic') do_run(cfg, model, distributed=args.distributed) logging.info('==>End statistic') for ops in model.modules(): if isinstance(ops, torch.nn.ReLU): ops.collectStats = False # ops.c.data = ops.running_mean + (ops.running_b * laplace[args.actBitwidth]) ops.c.data = ops.running_mean + (3 * ops.running_std) ops.quant = True torch.cuda.empty_cache() model = do_train(cfg, model, train_loader, optimizer, scheduler, checkpointer, device, arguments, args) return model
def train(cfg: CfgNode, args: Namespace, output_dir: Path, model_manager: Dict[str, Any], freeze_non_sigma: bool = False): logger = logging.getLogger('SSD.trainer') model = build_detection_model(cfg) device = torch.device(cfg.MODEL.DEVICE) model.to(device) if args.distributed: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank) lr = cfg.SOLVER.LR * args.num_gpus # scale by num gpus optimizer = make_optimizer(cfg, model, lr) milestones = [step // args.num_gpus for step in cfg.SOLVER.LR_STEPS] scheduler = make_lr_scheduler(cfg, optimizer, milestones) arguments = {"iteration": 0} save_to_disk = dist_util.get_rank() == 0 checkpointer = CheckPointer(model, optimizer, scheduler, cfg.OUTPUT_DIR, save_to_disk, logger) resume_from = checkpointer.get_best_from_experiment_dir(cfg) extra_checkpoint_data = checkpointer.load(f=resume_from) arguments.update(extra_checkpoint_data) max_iter = cfg.SOLVER.MAX_ITER // args.num_gpus train_loader = make_data_loader(cfg, is_train=True, distributed=args.distributed, max_iter=max_iter, start_iter=arguments['iteration']) # Weight freezing test: # print_model(model) # freeze_weights(model) print_model(model) model = do_train(cfg, model, train_loader, optimizer, scheduler, checkpointer, device, arguments, args, output_dir, model_manager) return model
def start_train(cfg): logger = logging.getLogger('SSD.trainer') model = SSDDetector(cfg) model = torch_utils.to_cuda(model) # SGD # optimizer = torch.optim.SGD( # model.parameters(), # lr=cfg.SOLVER.LR, # momentum=cfg.SOLVER.MOMENTUM, # weight_decay=cfg.SOLVER.WEIGHT_DECAY # ) # Adam optimizer = torch.optim.Adam(model.parameters(), lr=cfg.SOLVER.LR, weight_decay=cfg.SOLVER.WEIGHT_DECAY) scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizer, milestones=[6000, 10000], gamma=cfg.SOLVER.GAMMA) arguments = {"iteration": 0} save_to_disk = True checkpointer = CheckPointer( model, optimizer, cfg.OUTPUT_DIR, save_to_disk, logger, ) extra_checkpoint_data = checkpointer.load() arguments.update(extra_checkpoint_data) max_iter = cfg.SOLVER.MAX_ITER train_loader = make_data_loader(cfg, is_train=True, max_iter=max_iter, start_iter=arguments['iteration']) model = do_train(cfg, model, train_loader, optimizer, checkpointer, arguments, scheduler) return model
def do_evaluation(cfg, model, distributed, **kwargs): if isinstance(model, torch.nn.parallel.DistributedDataParallel): model = model.module model.eval() device = torch.device(cfg.MODEL.DEVICE) data_loaders_val = make_data_loader(cfg, is_train=False, distributed=distributed) eval_results = [] timer = Timer() timer.tic() for dataset_name, data_loader in zip(cfg.DATASETS.TEST, data_loaders_val): output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name) if not os.path.exists(output_folder): mkdir(output_folder) eval_result = inference(model, data_loader, dataset_name, device, output_folder, **kwargs) eval_results.append(eval_result) print("\nTotal detection speed1: %.1f FPS" % (4952 / timer.toc())) return eval_results
def train(cfg, args): logger = logging.getLogger('SSD.trainer') model = build_detection_model(cfg) # 建立模型 device = torch.device(cfg.MODEL.DEVICE) # 看cfg怎么组织的,把文件和args剥离开 model.to(device) if args.distributed: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank) # model = nn.DataParallel(model) lr = cfg.SOLVER.LR * args.num_gpus # scale by num gpus optimizer = make_optimizer(cfg, model, lr) # 建立优化器 milestones = [step // args.num_gpus for step in cfg.SOLVER.LR_STEPS] scheduler = make_lr_scheduler(cfg, optimizer, milestones) arguments = {"iteration": 0} save_to_disk = dist_util.get_rank() == 0 checkpointer = CheckPointer(model, optimizer, scheduler, save_dir=cfg.OUTPUT_DIR, save_to_disk=save_to_disk, logger=logger) # 建立模型存储载入类,给save_dir赋值表示 extra_checkpoint_data = checkpointer.load(f='', use_latest=False) # 载入模型 arguments.update(extra_checkpoint_data) max_iter = cfg.SOLVER.MAX_ITER // args.num_gpus train_loader = make_data_loader(cfg, is_train=True, distributed=args.distributed, max_iter=max_iter, start_iter=arguments['iteration']) # 建立数据库 print("dataloader: ", train_loader.batch_size) # exit(1232) model = do_train(cfg, model, train_loader, optimizer, scheduler, checkpointer, device, arguments, args) # 训练 return model
def evaluation(cfg, ckpt, N_images: int): model = SSDDetector(cfg) logger = logging.getLogger("SSD.inference") checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR, logger=logger) model = torch_utils.to_cuda(model) checkpointer.load(ckpt, use_latest=ckpt is None) model.eval() data_loaders_val = make_data_loader(cfg, is_train=False) for data_loader in data_loaders_val: batch = next(iter(data_loader)) images, targets, image_ids = batch images = torch_utils.to_cuda(images) imshape = list(images.shape[2:]) # warmup print("Checking runtime for image shape:", imshape) for i in range(10): model(images) start_time = time.time() for i in range(N_images): outputs = model(images) total_time = time.time() - start_time print("Runtime for image shape:", imshape) print("Total runtime:", total_time) print("FPS:", N_images / total_time)
def test_data(): data_loaders_val = make_data_loader(cfg, is_train=False, distributed=False) for dataset_name, data_loader in zip(cfg.DATASETS.TEST, data_loaders_val): print(dataset_name) print(data_loader) exit(0)
from ssd.config.defaults import cfg from ssd.data.build import make_data_loader from ssd.modeling.detector import SSDDetector # config cfg.MODEL.BACKBONE.NAME = 'resnet34' cfg.INPUT.IMAGE_SIZE = 300 # cfg.MODEL.BACKBONE.OUT_CHANNELS = (256,512,256,256,128,64) # wip34 cfg.MODEL.BACKBONE.OUT_CHANNELS = (128,256,512,256,256,128) # resnet34 cfg.MODEL.PRIORS.FEATURE_MAPS = [38, 19, 10, 5, 3, 1] cfg.SOLVER.BATCH_SIZE = 2 cfg.DATASET_DIR = "datasets" cfg.DATASETS.TRAIN = ("waymo_train",) # cfg.DATASETS.TEST = ("waymo_val",) model = SSDDetector(cfg) for level, bank in enumerate(model.backbone.feature_extractor): bank_n = level+1 print("Bank %d:" % bank_n, bank) data_loader = make_data_loader(cfg, is_train=True, max_iter=cfg.SOLVER.MAX_ITER) images, targets, _ = next(iter(data_loader)) # 1 batch outputs = model(images, targets=targets)
import numpy as np import matplotlib.pyplot as plt from train import get_parser from ssd.config.defaults import cfg from ssd.data.build import make_data_loader from vizer.draw import draw_boxes from ssd.modeling.box_head.prior_box import PriorBox from ssd.utils import box_utils args = get_parser().parse_args() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() data_loader = make_data_loader(cfg, is_train=True) mean = np.array([cfg.INPUT.PIXEL_MEAN]).reshape(1, 1, -1) std = np.array([cfg.INPUT.PIXEL_STD]) priors = PriorBox(cfg)() if isinstance(data_loader, list): data_loader = data_loader[0] for img, batch, *_ in data_loader: boxes = batch["boxes"] # SSD Target transform transfers target boxes into prior locations # Have to revert the transformation boxes = box_utils.convert_locations_to_boxes(boxes, priors, cfg.MODEL.CENTER_VARIANCE, cfg.MODEL.SIZE_VARIANCE) boxes = box_utils.center_form_to_corner_form(boxes)
import numpy as np import matplotlib.pyplot as plt from train import get_parser from ssd.config.defaults import cfg from ssd.data.build import make_data_loader from vizer.draw import draw_boxes args = get_parser().parse_args() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() data_loader = make_data_loader(cfg, is_train=False) if isinstance(data_loader, list): data_loader = data_loader[0] dataset = data_loader.dataset indices = list(range(len(dataset))) np.random.shuffle(indices) for idx in indices: image = dataset._read_image(idx) boxes, labels = dataset.get_annotation(idx) image = draw_boxes(image, boxes, labels, class_name_map=dataset.class_names) plt.imshow(image) plt.show()