示例#1
0
def evaluate(model, path, iou_thres, conf_thres, nms_thres, img_size, batch_size):
    model.eval()

    # Get dataloader
    dataset = ListDataset(path, img_size=img_size, augment=False, multiscale=False)
    dataloader = torch.utils.data.DataLoader(
        dataset, batch_size=batch_size, shuffle=False, num_workers=1, collate_fn=dataset.collate_fn
    )

    Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor

    labels = []
    sample_metrics = []  # List of tuples (TP, confs, pred)
    for batch_i, (_, imgs, targets) in enumerate(tqdm.tqdm(dataloader, desc="Detecting objects")):

        # Extract labels
        labels += targets[:, 1].tolist()
        # Rescale target
        targets[:, 2:] = xywh2xyxy(targets[:, 2:])
        targets[:, 2:] *= img_size

        imgs = Variable(imgs.type(Tensor), requires_grad=False)

        with torch.no_grad():
            outputs = model(imgs)
            outputs = non_max_suppression(outputs, conf_thres=conf_thres, nms_thres=nms_thres)

        sample_metrics += get_batch_statistics(outputs, targets, iou_threshold=iou_thres)

    # Concatenate sample statistics
    true_positives, pred_scores, pred_labels = [np.concatenate(x, 0) for x in list(zip(*sample_metrics))]
    precision, recall, AP, f1, ap_class = ap_per_class(true_positives, pred_scores, pred_labels, labels)

    return precision, recall, AP, f1, ap_class
示例#2
0
def evaluate(model, path, iou_thres, conf_thres, nms_thres, img_size,
             batch_size):
    model.eval()

    # Get dataloader
    dataset = ListDataset(path,
                          img_size=img_size,
                          augment=False,
                          multiscale=False)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=0,
                                             collate_fn=dataset.collate_fn)

    Tensor = torch.cuda.FloatTensor if torch.cuda.is_available(
    ) else torch.FloatTensor

    labels = []
    sample_metrics = []  # List of tuples (TP, confs, pred)
    for batch_i, (_, _, imgs, targets) in enumerate(
            tqdm.tqdm(dataloader, desc="Detecting objects")):
        # Extract labels
        if targets is None:
            continue
        labels += targets[:, 1].tolist()
        # Rescale target to x1y1x2y2. YOLO outputs in XYWH and it gets converted in the IOU script later
        targets[:, 2:] = xywh2xyxy(targets[:, 2:])
        targets[:, 2:] *= img_size

        imgs = Variable(imgs.type(Tensor), requires_grad=False)

        with torch.no_grad():
            outputs = model(imgs)
            outputs = non_max_suppression(outputs,
                                          conf_thres=conf_thres,
                                          nms_thres=nms_thres)

        sample_metrics += get_batch_statistics(outputs,
                                               targets,
                                               iou_threshold=iou_thres)

    # Concatenate sample statistics
    try:
        true_positives, pred_scores, pred_labels = [
            np.concatenate(x, 0) for x in list(zip(*sample_metrics))
        ]
        precision, recall, AP, f1, ap_class = ap_per_class(
            true_positives, pred_scores, pred_labels, labels)
    except ValueError as error:
        print('-----------------------------------------------')
        print(error)
        print('Model failed to detect any boxes in validation above threshold')
        print('Zeros passed for all metrics')
        print('-----------------------------------------------')
        precision, recall, f1 = (None, None, None)
        AP = np.array([0] * len(np.unique(labels)))
        ap_class = np.unique(labels).astype("int32")

    return precision, recall, AP, f1, ap_class
示例#3
0
def val(epoch,
        args,
        model,
        val_dataloader,
        iou_thresh,
        conf_thresh,
        nms_thresh,
        img_size,
        batch_size=8):
    global best_mAP
    print("begin to val the datasets...")
    model.eval()
    Tensor = torch.cuda.FloatTensor if torch.cuda.is_available(
    ) else torch.FloatTensor
    labels = []
    sample_metrics = []

    for batch_i, (_, imgs, targets) in enumerate(
            tqdm(val_dataloader, desc="detection the objections:")):
        labels += targets[:, 1].tolist()
        targets[:, 2:] = xywh2xyxy(targets[:, 2:])
        targets[:, 2:] *= img_size

        imgs = Variable(imgs.type(Tensor), requires_grad=False)
        with torch.no_grad():
            outputs = model(imgs)
            outputs = non_max_suppression(outputs,
                                          conf_thres=conf_thresh,
                                          nms_thres=nms_thresh)

        sample_metrics += get_batch_statistics(outputs,
                                               targets,
                                               iou_threshold=iou_thresh)

    tp, pred_scores, pred_labels = [
        np.concatenate(x, 0) for x in list(zip(*sample_metrics))
    ]
    precision, recall, AP, f1, ap_class = ap_per_class(tp, pred_scores,
                                                       pred_labels, labels)
    val_precision = precision.mean()
    val_recall = recall.mean()
    val_f1 = f1.mean()
    val_mAP = AP.mean()
    print("precision: %.3f, recall: %.3f, f1: %.3f, mAP: %.3f" %
          (val_precision, val_recall, val_f1, val_mAP))
    if val_mAP > best_mAP:
        best_mAP = val_mAP
        save_name = os.path.join(args.save_dir,
                                 "best_model_%.6f.pth" % best_mAP)

        state_dict = model.state_dict()
        for key in state_dict.keys():
            state_dict[key] = state_dict[key].cpu()

        torch.save({"model": state_dict, "epoch": epoch + 1}, save_name)
        print("model has been saved in %s" % save_name, end="")

    return precision, recall, AP, f1, ap_class
示例#4
0
def evaluate(model, path, iou_thres, conf_thres, nms_thres, img_size,
             batch_size, device):
    model.eval()

    # Get dataloader
    dataset = ListDataset(path,
                          img_size=img_size,
                          augment=False,
                          multiscale=False)
    dataloader = tc.utils.data.DataLoader(dataset,
                                          batch_size=batch_size,
                                          shuffle=False,
                                          num_workers=1,
                                          collate_fn=dataset.collate_fn)

    labels = []
    sample_metrics = []  # List of tuples (TP, confs, pred)

    try:
        tq = tqdm.tqdm(dataloader, desc='Detecting objects', ncols=100)
        for _, imgs, targets in tq:
            imgs = imgs.to(device)
            # Extract labels
            labels += targets[:, 1].tolist()
            # Rescale target
            targets[:, 2:] = xywh2xyxy(targets[:, 2:])
            targets[:, 2:] *= img_size
            with tc.no_grad():
                outputs = model(imgs)
                outputs = non_max_suppression(outputs,
                                              conf_thres=conf_thres,
                                              nms_thres=nms_thres)
            sample_metrics += get_batch_statistics(outputs,
                                                   targets,
                                                   iou_threshold=iou_thres)
    finally:
        tq.close()

    # Concatenate sample statistics
    true_positives, pred_scores, pred_labels = [
        np.concatenate(x, 0) for x in list(zip(*sample_metrics))
    ]
    precision, recall, AP, f1, ap_class = ap_per_class(true_positives,
                                                       pred_scores,
                                                       pred_labels, labels)

    return precision, recall, AP, f1, ap_class
示例#5
0
def evaluate(args,
             model,
             model_cfg,
             test_loader,
             iou_thres=0.5,
             conf_thres=0.5,
             nms_thres=0.5):
    img_size = int(model_cfg[0]['width'])
    model.eval()
    Tensor = torch.cuda.FloatTensor if torch.cuda.is_available(
    ) else torch.FloatTensor
    labels, sample_metrics = [], []
    for batch_i, (_, imgs, targets) in enumerate(
            tqdm.tqdm(test_loader, desc="Detecting objects")):
        #if batch_i >10:break
        labels += targets[:, 1].tolist()
        targets[:, 2:] = xywh2xyxy(targets[:, 2:])
        targets[:, 2:] *= img_size
        #import pdb;pdb.set_trace()
        imgs = Variable(imgs.type(Tensor), requires_grad=False)
        with torch.no_grad():
            #import pdb;pdb.set_trace()
            outputs = model(imgs)
            outputs = non_max_suppression(outputs,
                                          conf_thres=conf_thres,
                                          nms_thres=nms_thres)
        sample_metrics += get_batch_statistics(outputs,
                                               targets,
                                               iou_threshold=iou_thres)

    true_positives, pred_scores, pred_labels = [
        np.concatenate(x, 0) for x in list(zip(*sample_metrics))
    ]
    precision, recall, AP, f1, ap_class = ap_per_class(true_positives,
                                                       pred_scores,
                                                       pred_labels, labels)

    return precision, recall, AP, f1, ap_class
示例#6
0
def _evaluate(model, dataloader, class_names, img_size, iou_thres, conf_thres,
              nms_thres, verbose):
    """Evaluate model on validation dataset.

    :param model: Model to evaluate
    :type model: models.Darknet
    :param dataloader: Dataloader provides the batches of images with targets
    :type dataloader: DataLoader
    :param class_names: List of class names
    :type class_names: [str]
    :param img_size: Size of each image dimension for yolo
    :type img_size: int
    :param iou_thres: IOU threshold required to qualify as detected
    :type iou_thres: float
    :param conf_thres: Object confidence threshold
    :type conf_thres: float
    :param nms_thres: IOU threshold for non-maximum suppression
    :type nms_thres: float
    :param verbose: If True, prints stats of model
    :type verbose: bool
    :return: Returns precision, recall, AP, f1, ap_class
    """
    model.eval()  # Set model to evaluation mode

    Tensor = torch.cuda.FloatTensor if torch.cuda.is_available(
    ) else torch.FloatTensor

    labels = []
    sample_metrics = []  # List of tuples (TP, confs, pred)
    for _, imgs, targets in tqdm.tqdm(dataloader, desc="Validating"):
        # Extract labels
        labels += targets[:, 1].tolist()
        # Rescale target
        targets[:, 2:] = xywh2xyxy(targets[:, 2:])
        targets[:, 2:] *= img_size

        imgs = Variable(imgs.type(Tensor), requires_grad=False)

        with torch.no_grad():
            outputs = to_cpu(model(imgs))
            outputs = non_max_suppression(outputs,
                                          conf_thres=conf_thres,
                                          iou_thres=nms_thres)

        sample_metrics += get_batch_statistics(outputs,
                                               targets,
                                               iou_threshold=iou_thres)

    if len(sample_metrics) == 0:  # No detections over whole validation set.
        print("---- No detections over whole validation set ----")
        return None

    # Concatenate sample statistics
    true_positives, pred_scores, pred_labels = [
        np.concatenate(x, 0) for x in list(zip(*sample_metrics))
    ]
    metrics_output = ap_per_class(true_positives, pred_scores, pred_labels,
                                  labels)

    print_eval_stats(metrics_output, class_names, verbose)

    return metrics_output
示例#7
0
def evaluate(model,
             dataset_des,
             thres,
             batch_size,
             init_dim_cluster,
             diagnosis_code=0):
    model.eval()
    iou_thres, conf_thres, nms_thres = thres

    # Get dataloader
    print("Begin loading validation dataset ......")
    t_load_data = time.time()
    dataset = torchvision.datasets.VOCDetection(root='data/VOC/',
                                                year=dataset_des[0],
                                                image_set=dataset_des[1],
                                                transforms=None,
                                                download=True)
    dataset_dict = trans_voc(dataset)
    dataset = ListDataset(dataset_dict)
    loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=False,
        pin_memory=True,
        collate_fn=dataset.collate_fn,
    )
    print("Complete loading validation dataset in {} s".format(time.time() -
                                                               t_load_data))

    labels = []
    sample_metrics = []  # List of tuples (TP, confs, pred)
    for i_batch, (img_ind, img, raw_targets, transform_params,
                  tar_boxes) in enumerate(loader):
        print("\n++++++++++ i_batch (val) {} ++++++++++".format(i_batch))
        batch_step_counter = 0

        # Extract labels: raw_targets -- [t_conf, t_cls, x, y, w, h, one_hot_t_cls(20)]
        labels += tar_boxes[:, 1].tolist()

        if len(img) != batch_size:
            print("Current batch size is smaller than opt.batch_size!")
            continue

        img = img.to('cuda')
        raw_targets = raw_targets.to('cuda')
        tar_boxes = tar_boxes.to('cuda')

        input_img = img

        with torch.no_grad():
            pred_conf_cls = model(input_img)
            pred_conf_cls = pred_conf_cls.permute(0, 2, 3, 1)
            pred_conf = torch.sigmoid(pred_conf_cls[:, :, :, 0])
            pred_cls = torch.sigmoid(pred_conf_cls[:, :, :, 1:])
            obj_mask = pred_conf > conf_thres
            obj_mask = obj_mask.byte().to('cuda')

        if diagnosis_code == 0:
            pass
        if diagnosis_code == 1:
            # localization ground-truth
            pred_bbox = raw_targets[:, :, :, 2:6]
            # pred_conf_cls = pred_conf_cls.permute(0, 2, 3, 1)
            # pred_conf = torch.sigmoid(pred_conf_cls[:, :, :, 0])
            # pred_cls = torch.sigmoid(pred_conf_cls[:, :, :, 1:])
        if diagnosis_code == 2:
            # classification ground-truth
            pass
        if diagnosis_code == 3:
            # full ground-truth
            pred_bbox = raw_targets[:, :, :, 2:6]
            pred_conf = raw_targets[:, :, :, 0]
            pred_cls = raw_targets[:, :, :, 6:]

        # if i_batch < 20:
        #     im = Image.open("data/VOC/VOCdevkit/VOC2007/JPEGImages/{}".format(img_ind[0])).convert('RGB')
        #     plot_detection_result(im, img_ind, pred_bbox, pred_conf, pred_cls, transform_params, iou=0.9)

        pred_outputs = torch.cat((pred_bbox, pred_conf.unsqueeze(3), pred_cls),
                                 dim=3)
        b, w, h, d = pred_outputs.shape
        pred_outputs = pred_outputs.view(b, w * h, d)

        outputs = non_max_suppression(pred_outputs,
                                      conf_thres=conf_thres,
                                      nms_thres=nms_thres)
        sample_metrics += get_batch_statistics(outputs,
                                               tar_boxes,
                                               iou_threshold=iou_thres)

    # Concatenate sample statistics
    true_positives, pred_scores, pred_labels = [
        np.concatenate(x, 0) for x in list(zip(*sample_metrics))
    ]
    precision, recall, AP, f1, ap_class = ap_per_class(true_positives,
                                                       pred_scores,
                                                       pred_labels, labels)

    return precision, recall, AP, f1, ap_class
示例#8
0
def evaluate(
    model,
    path: str,
    transform,
    iou_thres: float = 0.5,
    conf_thres: float = 0.5,
    nms_thres: float = 0.5,
    img_size: int = 416,
    batch_size: int = 8,
):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.eval()

    dataset = ListDataset(list_path=path,
                          transform=transform,
                          img_size=img_size,
                          num_samples=None)
    dataloader = torch.utils.data.DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=4,
        collate_fn=dataset.collate_fn,
    )

    labels = []
    sample_metrics = []  # List of tuples (TP, confs, pred)
    for batch_i, (_, imgs, targets) in enumerate(
            tqdm.tqdm(dataloader, desc="Detecting objects")):
        labels += targets[:, 1].tolist()

        targets[:, 2:] = xywh2xyxy(targets[:, 2:])
        targets[:, 2:] *= img_size

        imgs = imgs.requires_grad_(False).to(device)

        with torch.no_grad():
            outputs = model(imgs)
            outputs = non_max_suppression(prediction=outputs,
                                          conf_thres=conf_thres,
                                          nms_thres=nms_thres)

        sample_metrics += get_batch_statistics(outputs=outputs,
                                               targets=targets,
                                               iou_threshold=iou_thres)

    # Concatenate sample statistics
    if sample_metrics:
        true_positives, pred_scores, pred_labels = [
            np.concatenate(x, 0) for x in list(zip(*sample_metrics))
        ]
    else:
        true_positives, pred_scores, pred_labels = [
            np.zeros(1), np.zeros(1), np.zeros(1)
        ]

    precision, recall, AP, f1, ap_class = ap_per_class(true_positives,
                                                       pred_scores,
                                                       pred_labels, labels)

    return precision, recall, AP, f1, ap_class