Exemple #1
0
def write_coco_json():
    from pycocotools.coco import COCO
    img_root = "/home/wangchao/public_dataset/coco/images/val2017"
    model = RetinaNet()
    weights = torch.load("weights/0_retinanet_free_anchor_best_map.pth")['ema']
    model.load_state_dict(weights)
    model.cuda().eval()

    basic_transform = ScalePadding(target_size=(640, 640),
                                   padding_val=(103, 116, 123))
    coco = COCO(
        "/home/wangchao/public_dataset/coco/annotations/instances_val2017.json"
    )
    coco_predict_list = list()
    for img_id in tqdm(coco.imgs.keys()):
        file_name = coco.imgs[img_id]['file_name']
        img_path = os.path.join(img_root, file_name)
        img = cv.imread(img_path)
        # ori_img = img.copy()
        img, ratio, (left, top) = basic_transform.make_border(img)
        h, w = img.shape[:2]
        img_out = img[:, :, [2, 1, 0]].astype(np.float32) / 255.0
        img_out = ((img_out - np.array(rgb_mean)) /
                   np.array(rgb_std)).transpose(2, 0, 1).astype(np.float32)
        img_out = torch.from_numpy(
            np.ascontiguousarray(img_out)).unsqueeze(0).float().cuda()
        predicts = model(img_out)
        for i in range(len(predicts)):
            predicts[i][:, [0, 2]] = predicts[i][:, [0, 2]].clamp(min=0, max=w)
            predicts[i][:, [1, 3]] = predicts[i][:, [1, 3]].clamp(min=0, max=h)
        box = non_max_suppression(predicts,
                                  conf_thresh=0.05,
                                  iou_thresh=0.5,
                                  max_det=300)[0]
        if box is None:
            continue
        box[:, [0, 2]] = (box[:, [0, 2]] - left) / ratio[0]
        box[:, [1, 3]] = (box[:, [1, 3]] - top) / ratio[1]
        box = box.detach().cpu().numpy()
        # ret_img = draw_box(ori_img, box[:, [4, 5, 0, 1, 2, 3]], colors=coco_colors)
        # cv.imwrite(file_name, ret_img)
        coco_box = box[:, :4]
        coco_box[:, 2:] = coco_box[:, 2:] - coco_box[:, :2]
        for p, b in zip(box.tolist(), coco_box.tolist()):
            coco_predict_list.append({
                'image_id': img_id,
                'category_id': coco_ids[int(p[5])],
                'bbox': [round(x, 3) for x in b],
                'score': round(p[4], 5)
            })
    with open("predicts.json", 'w') as file:
        json.dump(coco_predict_list, file)
Exemple #2
0
def valid_model():
    model = RetinaNet()
    weights = torch.load("weights/0_retinanet_free_anchor_last.pth")['ema']
    model.load_state_dict(weights)
    model.cuda().eval()
    predict_list = list()
    target_list = list()
    vdata = COCODataSets(img_root="/home/huffman/data/val2017",
                         annotation_path="/home/huffman/data/annotations/instances_val2017.json",
                         use_crowd=False,
                         augments=False,
                         debug=False,
                         remove_blank=False,
                         img_size=640
                         )
    vloader = DataLoader(dataset=vdata,
                         batch_size=4,
                         num_workers=4,
                         collate_fn=vdata.collate_fn,
                         shuffle=False
                         )
    pbar = tqdm(vloader)
    for img_tensor, targets_tensor, _ in pbar:
        _, _, h, w = img_tensor.shape
        targets_tensor[:, 3:] = targets_tensor[:, 3:] * torch.tensor(data=[w, h, w, h])
        img_tensor = img_tensor.cuda()
        targets_tensor = targets_tensor.cuda()
        predicts = model(img_tensor)

        for i in range(len(predicts)):
            predicts[i][:, [0, 2]] = predicts[i][:, [0, 2]].clamp(min=0, max=w)
            predicts[i][:, [1, 3]] = predicts[i][:, [1, 3]].clamp(min=0, max=h)
        predicts = non_max_suppression(predicts,
                                       conf_thresh=0.05,
                                       iou_thresh=0.5,
                                       max_det=300)
        for i, predict in enumerate(predicts):
            predict_list.append(predict)
            targets_sample = targets_tensor[targets_tensor[:, 0] == i][:, 2:]
            target_list.append(targets_sample)
    mp, mr, map50, map = coco_map(predict_list, target_list)
    print(map50, map)
    def val(self, epoch):
        predict_list = list()
        target_list = list()
        self.model.eval()
        if self.local_rank == 0:
            pbar = tqdm(self.vloader)
        else:
            pbar = self.vloader
        for img_tensor, targets_tensor, _ in pbar:
            _, _, h, w = img_tensor.shape
            targets_tensor[:, 3:] = targets_tensor[:, 3:] * torch.tensor(
                data=[w, h, w, h])
            img_tensor = img_tensor.to(self.device)
            targets_tensor = targets_tensor.to(self.device)
            predicts = self.model(img_tensor)

            for i in range(len(predicts)):
                predicts[i][:, [0, 2]] = predicts[i][:, [0, 2]].clamp(min=0,
                                                                      max=w)
                predicts[i][:, [1, 3]] = predicts[i][:, [1, 3]].clamp(min=0,
                                                                      max=h)
            predicts = non_max_suppression(
                predicts,
                conf_thresh=self.val_cfg['conf_thresh'],
                iou_thresh=self.val_cfg['iou_thresh'],
                max_det=self.val_cfg['max_det'],
            )
            for i, predict in enumerate(predicts):
                predict_list.append(predict)
                targets_sample = targets_tensor[targets_tensor[:, 0] == i][:,
                                                                           2:]
                target_list.append(targets_sample)
        mp, mr, map50, map = coco_map(predict_list, target_list)
        print(
            "epoch: {:2d}|local:{:d}|mp:{:6.4f}|mr:{:6.4f}|map50:{:6.4f}|map:{:6.4f}"
            .format(epoch + 1, self.local_rank, mp * 100, mr * 100,
                    map50 * 100, map * 100))
        last_weight_path = os.path.join(
            self.val_cfg['weight_path'],
            "{:d}_{:s}_last.pth".format(self.local_rank,
                                        self.cfg['model_name']))
        best_map_weight_path = os.path.join(
            self.val_cfg['weight_path'],
            "{:d}_{:s}_best_map.pth".format(self.local_rank,
                                            self.cfg['model_name']))
        best_map50_weight_path = os.path.join(
            self.val_cfg['weight_path'],
            "{:d}_{:s}_best_map50.pth".format(self.local_rank,
                                              self.cfg['model_name']))
        model_static = self.model.module.state_dict() if is_parallel(
            self.model) else self.model.state_dict()

        ema_static = self.ema.ema.state_dict()
        cpkt = {
            "ori": model_static,
            "ema": ema_static,
            "map": map * 100,
            "epoch": epoch,
            "map50": map50 * 100
        }
        if self.local_rank != 0:
            return
        torch.save(cpkt, last_weight_path)
        if map > self.best_map:
            torch.save(cpkt, best_map_weight_path)
            self.best_map = map
        if map50 > self.best_map50:
            torch.save(cpkt, best_map50_weight_path)
            self.best_map50 = map50