def test(epochs_tested): is_train=False transforms = transform.build_transforms(is_train=is_train) coco_dataset = dataset.COCODataset(is_train=is_train, transforms=transforms) dataloader = build_dataloader(coco_dataset, sampler=None, is_train=is_train) assert isinstance(epochs_tested, (list, set)), "during test, archive_name must be a list or set!" model = FCOS(is_train=is_train) for epoch in epochs_tested: utils.load_model(model, epoch) model.cuda() model.eval() final_results = [] with torch.no_grad(): for data in tqdm(dataloader): img = data["images"] ori_img_shape = data["ori_img_shape"] fin_img_shape = data["fin_img_shape"] index = data["indexs"] img = img.cuda() ori_img_shape = ori_img_shape.cuda() fin_img_shape = fin_img_shape.cuda() cls_pred, reg_pred, label_pred = model([img, ori_img_shape, fin_img_shape]) cls_pred = cls_pred[0].cpu() reg_pred = reg_pred[0].cpu() label_pred = label_pred[0].cpu() index = index[0] img_info = dataloader.dataset.img_infos[index] imgid = img_info["id"] reg_pred = utils.xyxy2xywh(reg_pred) label_pred = label_pred.tolist() cls_pred = cls_pred.tolist() final_results.extend( [ { "image_id": imgid, "category_id": dataloader.dataset.label2catid[label_pred[k]], "bbox": reg_pred[k].tolist(), "score": cls_pred[k], } for k in range(len(reg_pred)) ] ) output_path = os.path.join(cfg.output_path, "fcos_"+str(epoch)+".json") utils.evaluate_coco(dataloader.dataset.coco, final_results, output_path, "bbox")
def get_prediction(image, info, display=True): ''' predict on the model and return the prediction containing masks and bounding boxes. ''' vis = Visualizer() info["transforms"] = build_transforms(info["cfg"], min_image_size=min(image.shape[:-1]), max_image_size=max(image.shape[:-1])) prediction = vis.compute_prediction(info["cfg"], image, info["transforms"], info["device"], info["model"], info["cpu_device"], info["masker"]) prediction = vis.select_top_predictions(prediction, info["confidence_threshold"]) print(prediction) if display: vis.display_instances(image[:, :, ::-1], prediction.bbox, prediction.get_field("mask"), prediction.get_field("labels"), info["CATEGORIES"], prediction.get_field("scores"), show_mask=False) return prediction
def train(is_dist, start_epoch, local_rank): transforms = transform.build_transforms() coco_dataset = dataset.COCODataset(is_train=True, transforms=transforms) if (is_dist): sampler = distributedGroupSampler(coco_dataset) else: sampler = groupSampler(coco_dataset) dataloader = build_dataloader(coco_dataset, sampler) batch_time_meter = utils.AverageMeter() cls_loss_meter = utils.AverageMeter() reg_loss_meter = utils.AverageMeter() losses_meter = utils.AverageMeter() model = retinanet(is_train=True) if (start_epoch == 1): model.resnet.load_pretrained(pretrained_path[cfg.resnet_depth]) else: utils.load_model(model, start_epoch - 1) model = model.cuda() if is_dist: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[ local_rank, ], output_device=local_rank, broadcast_buffers=False) optimizer = solver.build_optimizer(model) scheduler = solver.scheduler(optimizer) model.train() logs = [] for epoch in range(start_epoch, cfg.max_epochs + 1): if is_dist: dataloader.sampler.set_epoch(epoch - 1) scheduler.lr_decay(epoch) end_time = time.time() for iteration, datas in enumerate(dataloader, 1): scheduler.linear_warmup(epoch, iteration - 1) images = datas["images"] bboxes = datas["bboxes"] labels = datas["labels"] res_img_shape = datas["res_img_shape"] pad_img_shape = datas["pad_img_shape"] images = images.cuda() bboxes = [bbox.cuda() for bbox in bboxes] labels = [label.cuda() for label in labels] loss_dict = model(images, gt_bboxes=bboxes, gt_labels=labels, res_img_shape=res_img_shape, pad_img_shape=pad_img_shape) cls_loss = loss_dict["cls_loss"] reg_loss = loss_dict["reg_loss"] losses = cls_loss + reg_loss optimizer.zero_grad() losses.backward() optimizer.step() batch_time_meter.update(time.time() - end_time) end_time = time.time() cls_loss_meter.update(cls_loss.item()) reg_loss_meter.update(reg_loss.item()) losses_meter.update(losses.item()) if (iteration % 50 == 0): if (local_rank == 0): res = "\t".join([ "Epoch: [%d/%d]" % (epoch, cfg.max_epochs), "Iter: [%d/%d]" % (iteration, len(dataloader)), "Time: %.3f (%.3f)" % (batch_time_meter.val, batch_time_meter.avg), "cls_loss: %.4f (%.4f)" % (cls_loss_meter.val, cls_loss_meter.avg), "reg_loss: %.4f (%.4f)" % (reg_loss_meter.val, reg_loss_meter.avg), "Loss: %.4f (%.4f)" % (losses_meter.val, losses_meter.avg), "lr: %.6f" % (optimizer.param_groups[0]["lr"]), ]) print(res) logs.append(res) batch_time_meter.reset() cls_loss_meter.reset() reg_loss_meter.reset() losses_meter.reset() if (local_rank == 0): utils.save_model(model, epoch) if (is_dist): utils.synchronize() if (local_rank == 0): with open("logs.txt", "w") as f: for i in logs: f.write(i + "\n")