def train_detector(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None): logger = get_root_logger(cfg.log_level) # prepare data loaders dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] data_loaders = [ build_dataloader( ds, cfg.data.samples_per_gpu, cfg.data.workers_per_gpu, # cfg.gpus will be ignored if distributed len(cfg.gpu_ids), dist=distributed, seed=cfg.seed) for ds in dataset ] # put model on gpus if distributed: find_unused_parameters = cfg.get('find_unused_parameters', False) # Sets the `find_unused_parameters` parameter in # torch.nn.parallel.DistributedDataParallel model = MMDistributedDataParallel( model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters) else: model = MMDataParallel( model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids) # build runner optimizer = build_optimizer(model, cfg.optimizer) runner = Runner( model, batch_processor, optimizer, cfg.work_dir, logger=logger, meta=meta) # an ugly walkaround to make the .log and .log.json filenames the same runner.timestamp = timestamp # fp16 setting fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: optimizer_config = Fp16OptimizerHook( **cfg.optimizer_config, **fp16_cfg, distributed=distributed) elif distributed and 'type' not in cfg.optimizer_config: optimizer_config = DistOptimizerHook(**cfg.optimizer_config) else: optimizer_config = cfg.optimizer_config # register hooks runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config, cfg.get('momentum_config', None)) if distributed: runner.register_hook(DistSamplerSeedHook()) # register eval hooks if validate: val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) val_dataloader = build_dataloader( val_dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False) eval_cfg = cfg.get('evaluation', {}) eval_hook = DistEvalHook if distributed else EvalHook runner.register_hook(eval_hook(val_dataloader, **eval_cfg)) if cfg.resume_from: runner.resume(cfg.resume_from) elif cfg.load_from: runner.load_checkpoint(cfg.load_from) classes_rearrange = cfg.get('classes_rearrange', False) if classes_rearrange: runner.model = rearrange_classes(runner.model, cfg.classes, cfg.dataset_type) runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
def after_train_epoch(self, runner: Runner): ec = runner.executionConfig runner.model.eval() results = [None for _ in range(len(self.indices))] if runner.rank == 0: prog_bar = mmcv.ProgressBar(len(self.indices)) for idx in self.indices: pi = self.dataset.ds[idx] data = self.dataset[idx] data_gpu = scatter(collate([data], samples_per_gpu=1), [torch.cuda.current_device()])[0] # compute output with torch.no_grad(): pred = runner.model(return_loss=False, rescale=True, **data_gpu) withMasks = isinstance(pred, tuple) result = convertMMDETModelOutput(pred, withMasks, 0.1) result = applyTresholdToPrediction(result, withMasks, self.threshold) results[idx] = (result, pi) #batch_size = runner.world_size if runner.rank == 0: prog_bar.update() gtImages = [] predImages = [] gtMaskedImages = [] predMaskedImages = [] classNames = self.dataset.CLASSES for r in results: imgOrig = r[1].x scale = self.exampleWidth / imgOrig.shape[1] newY = self.exampleWidth newX = int(imgOrig.shape[0] * scale) img = imgaug.imresize_single_image(imgOrig, (newX, newY), 'cubic') gtLabels = r[1].y[0] - 1 gtBboxesRaw = r[1].y[1] gtBboxes = gtBboxesRaw * scale result = r[0] labels = result[0] bboxes = result[1] if len(result) == 3: segm_result = result[2] bboxes *= scale numColors = len( imgaug.SegmentationMapsOnImage.DEFAULT_SEGMENT_COLORS) if segm_result is not None: masksShape = list(imgOrig.shape[:2]) + [1] gtMasks = r[1].y[2] maskIndices = set() objColor = 1 if gtMasks is not None: gtMasksArr = np.zeros(masksShape, dtype=np.int) for i in range(len(gtLabels)): l = gtLabels[i] gtm = gtMasks[i] gtMasksArr[gtm > 0] = objColor objColor = 1 + (objColor + 1) % (numColors - 1) predMasksArr = np.zeros(masksShape, dtype=np.int) objColor = 1 for x in segm_result: predMasksArr[x > 0] = objColor objColor = 1 + (objColor + 1) % (numColors - 1) #CustomSegmentationMapOnImage(np.transpose(gtMasks, axes=(1,2,0)), imgOrig.shape).draw_on_image(imgOrig) gtMaskImg = imgaug.SegmentationMapOnImage( gtMasksArr, imgOrig.shape).draw_on_image(imgOrig)[0] predMaskImg = imgaug.SegmentationMapOnImage( predMasksArr, imgOrig.shape).draw_on_image(imgOrig)[0] #predMaskImg = imgaug.HeatmapsOnImage(predMasksArr,imgOrig.shape).draw_on_image(imgOrig) gtMaskedImages.append( imgaug.imresize_single_image(gtMaskImg, (newX, newY), 'cubic')) predMaskedImages.append( imgaug.imresize_single_image(predMaskImg, (newX, newY), 'cubic')) predImg = imdraw_det_bboxes(img.copy(), bboxes, labels, class_names=classNames) gtImg = imdraw_det_bboxes(img.copy(), gtBboxes, gtLabels, class_names=classNames) gtImages.append(gtImg) predImages.append(predImg) gtImg = np.concatenate(gtImages, axis=0) predImg = np.concatenate(predImages, axis=0) exampleImg = np.concatenate([gtImg, predImg], axis=1) if len(gtMaskedImages) > 0: gtMaskImg = np.concatenate(gtMaskedImages, axis=0) exampleImg = np.concatenate([exampleImg, gtMaskImg], axis=1) if len(predMaskedImages) > 0: predMaskImg = np.concatenate(predMaskedImages, axis=0) exampleImg = np.concatenate([exampleImg, predMaskImg], axis=1) epoch = runner.epoch imgPath = f"{epoch}.jpg" imFolder = os.path.join(self.dstFolder, f"{ec.fold}/{ec.stage}") if not os.path.exists(imFolder): os.makedirs(imFolder) out_file = os.path.join(imFolder, imgPath) imageio.imwrite(out_file, exampleImg)