def do_visualization(dataloader, checkpoint_file, device):
    os.makedirs(dir_prenms, exist_ok=True)
    os.makedirs(dir_postnms, exist_ok=True)

    # =========================== Pretrained ===============================
    # Put the path were you save the given pretrained model
    pretrained_path = '../pretrained/checkpoint680.pth'
    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    backbone, rpn = pretrained_models_680(pretrained_path)
    backbone = backbone.to(device)
    rpn = rpn.to(device)

    # ========================= Loading Model ==============================
    boxHead = BoxHead(Classes=3, P=7, device=device).to(device)
    checkpoint = torch.load(checkpoint_file)
    print("[INFO] Weight loaded from checkpoint file: {}".format(checkpoint_file))
    boxHead.load_state_dict(checkpoint['model_state_dict'])
    boxHead.eval()  # set to eval mode

    for iter, data in enumerate(tqdm(dataloader), 0):
        images = data['images'].to(device)
        index = data['index'][0]
        assert len(images) == 1
        visualize_img(images, index, backbone, rpn, boxHead)
示例#2
0
def do_eval(dataloader,
            checkpoint_file,
            device,
            result_dir=None,
            keep_topK=200,
            keep_num_preNMS=50,
            keep_num_postNMS=5):

    if result_dir is not None:
        os.makedirs(result_dir, exist_ok=True)
        os.makedirs("PreNMS", exist_ok=True)
        os.makedirs("PostNMS", exist_ok=True)

    # =========================== Pretrained ===============================
    # Put the path were you save the given pretrained model
    pretrained_path = '../pretrained/checkpoint680.pth'
    backbone, rpn = pretrained_models_680(pretrained_path)
    backbone = backbone.to(device)
    rpn = rpn.to(device)
    # ========================= Loading Model ==============================
    boxHead = BoxHead(Classes=3, P=7, device=device).to(device)
    if torch.cuda.is_available():
        checkpoint = torch.load(checkpoint_file)
    else:
        checkpoint = torch.load(checkpoint_file,
                                map_location=torch.device('cpu'))
    print("[INFO] Weight loaded from checkpoint file: {}".format(
        checkpoint_file))
    boxHead.load_state_dict(checkpoint['model_state_dict'])
    boxHead.eval()  # set to eval mode
    # ============================ Eval ================================
    for iter, data in enumerate(tqdm(dataloader), 0):
        img = data['images'].to(device)
        batch_size = img.shape[0]
        label_list = [x.to(device) for x in data['labels']]
        mask_list = [x.to(device) for x in data['masks']]
        bbox_list = [x.to(device) for x in data['bbox']]
        # index_list = data['index']
        img_shape = (img.shape[2], img.shape[3])
        with torch.no_grad():
            backout = backbone(img)
            im_lis = ImageList(img, [(800, 1088)] * img.shape[0])
            rpnout = rpn(im_lis, backout)
            proposals = [proposal[0:keep_topK, :] for proposal in rpnout[0]]
            fpn_feat_list = list(backout.values())
            feature_vectors = boxHead.MultiScaleRoiAlign(
                fpn_feat_list, proposals)
            class_logits, box_pred = boxHead(feature_vectors)
            class_logits = torch.softmax(class_logits, dim=1)
            proposal_torch = torch.cat(proposals, dim=0)  # x1 y1 x2 y2
            proposal_xywh = torch.zeros_like(proposal_torch,
                                             device=proposal_torch.device)
            proposal_xywh[:,
                          0] = ((proposal_torch[:, 0] + proposal_torch[:, 2]) /
                                2)
            proposal_xywh[:,
                          1] = ((proposal_torch[:, 1] + proposal_torch[:, 3]) /
                                2)
            proposal_xywh[:, 2] = torch.abs(proposal_torch[:, 2] -
                                            proposal_torch[:, 0])
            proposal_xywh[:, 3] = torch.abs(proposal_torch[:, 3] -
                                            proposal_torch[:, 1])
            result_prob, result_class, result_box = simplifyOutputs(
                class_logits, box_pred)
            box_decoded = decode_output(proposal_xywh, result_box)
            post_nms_prob, post_nms_class, post_nms_box = boxHead.postprocess_detections(
                result_prob,
                result_class,
                box_decoded,
                IOU_thresh=0.5,
                conf_thresh=0.5,
                keep_num_preNMS=keep_num_preNMS,
                keep_num_postNMS=keep_num_postNMS)
示例#3
0
from BoxHead import BoxHead
from utils import visual_bbox_mask, cv2
from pretrained_models import pretrained_models_680

# env variables
IN_COLAB = 'google' in sys.modules
COLAB_ROOT = "/content/drive/My Drive/CIS680_2019/Mask-RCNN"

if __name__ == "__main__":
    net_device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')
    torch.cuda.empty_cache()

    # Put the path were you save the given pretrained model
    pretrained_path = 'model/checkpoint680.pth'
    backbone, rpn = pretrained_models_680(pretrained_path, device=net_device)

    # file path and make a list
    imgs_path = 'data/hw3_mycocodata_img_comp_zlib.h5'
    masks_path = 'data/hw3_mycocodata_mask_comp_zlib.h5'
    labels_path = 'data/hw3_mycocodata_labels_comp_zlib.npy'
    bboxes_path = 'data/hw3_mycocodata_bboxes_comp_zlib.npy'
    checkpoints_path = 'checkpoint_save/'
    images_path = 'out_images/'
    mAP_path = "mAP/"
    figures_path = 'figures/'
    if IN_COLAB:
        imgs_path = os.path.join(COLAB_ROOT, imgs_path)
        masks_path = os.path.join(COLAB_ROOT, masks_path)
        labels_path = os.path.join(COLAB_ROOT, labels_path)
        bboxes_path = os.path.join(COLAB_ROOT, bboxes_path)
示例#4
0
def compute_map(dataloader, checkpoint_file, device):
    # =========================== Pretrained ===============================
    # Put the path were you save the given pretrained model
    pretrained_path = '../pretrained/checkpoint680.pth'
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')
    backbone, rpn = pretrained_models_680(pretrained_path)
    backbone = backbone.to(device)
    rpn = rpn.to(device)

    # ========================== Prep ============================
    metric_trackers = []
    for i in range(3):
        metric_trackers.append(MetricTracker(i))

    # ========================= Loading Model ==============================
    boxHead = BoxHead(Classes=3, P=7, device=device).to(device)
    if torch.cuda.is_available():
        checkpoint = torch.load(checkpoint_file)
    else:
        checkpoint = torch.load(checkpoint_file,
                                map_location=torch.device('cpu'))

    print("[INFO] Weight loaded from checkpoint file: {}".format(
        checkpoint_file))
    boxHead.load_state_dict(checkpoint['model_state_dict'])
    boxHead.eval()  # set to eval mode

    for iter, data in enumerate(tqdm(dataloader), 0):
        images = data['images'].to(device)
        assert len(images) == 1, "Only support batch_size == 1"
        # if (iter == 50):
        #     break

        labels_gt_all = data['labels'][0].to(device)
        bbox_gt_all = data["bbox"][0].to(device)
        index = data['index'][0]

        prob, clas, boxes = run_inference(images, index, backbone, rpn,
                                          boxHead)

        for tracker_i in range(3):
            # focus on one class
            target_class = tracker_i + 1

            labels_gt = labels_gt_all[labels_gt_all == target_class]
            bbox_gt = bbox_gt_all[labels_gt_all ==
                                  target_class]  #n,4   x,y,w,h

            clas_pred = clas[clas == target_class]  #   m
            prob_pred = prob[clas == target_class]  #   m
            boxes_pred = boxes[clas == target_class]  #  m,4  x1,y1,x2,y2

            boxes_pred_xywh = torch.zeros_like(boxes_pred,
                                               dtype=boxes_pred.dtype,
                                               device=boxes_pred.device)
            boxes_pred_xywh[:, 0] = (boxes_pred[:, 0] + boxes_pred[:, 2]) / 2
            boxes_pred_xywh[:, 1] = (boxes_pred[:, 1] + boxes_pred[:, 3]) / 2
            boxes_pred_xywh[:, 2] = boxes_pred[:, 2] - boxes_pred[:, 0]
            boxes_pred_xywh[:, 3] = boxes_pred[:, 3] - boxes_pred[:, 1]

            # determine if it is a match with bbox
            N_gt = len(bbox_gt)  #n
            N_pred = len(clas_pred)  #m

            tp_indicator = torch.zeros((N_pred, ))  #m
            match_indice = torch.zeros((N_pred, ))  #m
            if (N_pred != 0) and (N_gt != 0):
                # IOU matrix
                iou_mat = torch.zeros((N_pred, N_gt))  # m,n
                for x in range(N_pred):
                    for y in range(N_gt):
                        iou_mat[x, y] = IOU(
                            torch.unsqueeze(boxes_pred_xywh[x, :], 0),
                            torch.unsqueeze(bbox_gt[y, :], 0))
                    if torch.max(iou_mat[x, :]) >= 0.5:
                        tp_indicator[x] = 1
                        index = torch.argmax(iou_mat[x, :])
                        match_indice[x] = index

            metric_trackers[tracker_i].add_match(prob_pred, tp_indicator,
                                                 match_indice, N_gt)

    # compute map
    for i in range(3):
        metric_trackers[i].compute_precision_recall()
        recall, precision = metric_trackers[i].sorted_pr_curve()

        ap = metric_trackers[i].compute_ap()
        print("class_id: {}. ap: {}".format(i, ap))
        metric_trackers[i].reset()
示例#5
0
    # reproductivity
    keep_topK_check = 20
    torch.random.manual_seed(1)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    np.random.seed(0)
    batch_size = 1

    imgs_path = '../data/hw3_mycocodata_img_comp_zlib.h5'
    masks_path = '../data/hw3_mycocodata_mask_comp_zlib.h5'
    labels_path = "../data/hw3_mycocodata_labels_comp_zlib.npy"
    bboxes_path = "../data/hw3_mycocodata_bboxes_comp_zlib.npy"

    pretrained_path = '../pretrained/checkpoint680.pth'
    device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
    backbone, rpn = pretrained_models_680(pretrained_path)

    # we will need the ImageList from torchvision
    from torchvision.models.detection.image_list import ImageList

    paths = [imgs_path, masks_path, labels_path, bboxes_path]
    # load the data into data.Dataset
    dataset = BuildDataset(paths, augmentation=False)

    train_dataset, test_dataset = split_dataset(dataset)

    train_build_loader = BuildDataLoader(train_dataset, batch_size=batch_size, shuffle=False, num_workers=0)
    train_loader = train_build_loader.loader()

    # ============================ Train ================================
    box_head = BoxHead(device=device)