Example #1
0
def evaluate(cfg,
             data,
             weights=None,
             batch_size=16,
             workers=4,
             image_size=416,
             confidence_threshold=0.001,
             iou_threshold=0.6,  # for nms
             save_json=True,
             single_cls=False,
             augment=False,
             model=None,
             dataloader=None):
    # Initialize/load model and set device
    if model is None:
        device = select_device(args.device, batch_size=batch_size)
        verbose = args.task == "eval"

        # Initialize model
        model = Darknet(cfg, image_size).to(device)

        # Load weightss
        if weights.endswith(".pth"):
            model.load_state_dict(torch.load(weights, map_location=device)["state_dict"])
        else:
            load_darknet_weights(model, weights)

        if device.type != "cpu" and torch.cuda.device_count() > 1:
            model = nn.DataParallel(model)
    else:
        device = next(model.parameters()).device  # get model device
        verbose = False

    # Configure run
    data = parse_data_config(data)
    classes_num = 1 if single_cls else int(data["classes"])
    path = data["valid"]  # path to valid images
    names = load_classes(data["names"])  # class names
    iouv = torch.linspace(0.5, 0.95, 10).to(device)  # iou vector for [email protected]:0.95
    iouv = iouv[0].view(1)  # comment for [email protected]:0.95
    niou = iouv.numel()

    # Dataloader
    if dataloader is None:
        dataset = LoadImagesAndLabels(path, image_size, batch_size, rect=True)
        batch_size = min(batch_size, len(dataset))
        dataloader = DataLoader(dataset,
                                batch_size=batch_size,
                                num_workers=workers,
                                pin_memory=True,
                                collate_fn=dataset.collate_fn)

    seen = 0
    model.eval()
    coco91class = coco80_to_coco91_class()
    s = ("%20s" + "%10s" * 6) % ("Class", "Images", "Targets", "P", "R", "[email protected]", "F1")
    p, r, f1, mp, mr, map, mf1, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
    loss = torch.zeros(3)
    json_dict, stats, ap, ap_class = [], [], [], []
    for batch_i, (images, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
        images = images.to(device).float() / 255.0  # uint8 to float32, 0 - 255 to 0.0 - 1.0
        targets = targets.to(device)
        batch_size, _, height, width = images.shape  # batch size, channels, height, width
        whwh = torch.Tensor([width, height, width, height]).to(device)

        # Disable gradients
        with torch.no_grad():
            # Test the effect of image enhancement
            if augment:
                fs_image = scale_image(images.flip(3), 0.9)  # flip-lr and scale
                s_image = scale_image(images, 0.7)  # scale
                images = torch.cat((images, fs_image, s_image), 0)

            # Run model
            start_time = time_synchronized()
            inference_outputs, training_outputs = model(images)
            t0 += time_synchronized() - start_time

            if augment:
                x = torch.split(inference_outputs, batch_size, dim=0)
                x[1][..., :4] /= 0.9  # scale
                x[1][..., 0] = width - x[1][..., 0]  # flip lr
                x[2][..., :4] /= 0.7  # scale
                inference_outputs = torch.cat(x, 1)

            # Compute loss
            if hasattr(model, "hyp"):  # if model has loss hyperparameters
                # GIoU, obj, cls
                loss += compute_loss(training_outputs, targets, model)[1][:3].cpu()

            # Run NMS
            start_time = time_synchronized()
            output = non_max_suppression(inference_outputs,
                                         confidence_threshold=confidence_threshold,
                                         iou_threshold=iou_threshold)
            t1 += time_synchronized() - start_time

        # Statistics per image
        for si, pred in enumerate(output):
            labels = targets[targets[:, 0] == si, 1:]
            label_num = len(labels)
            target_class = labels[:, 0].tolist() if label_num else []
            seen += 1

            if pred is None:
                if label_num:
                    stats.append((torch.zeros(0, niou, dtype=torch.bool),
                                  torch.Tensor(),
                                  torch.Tensor(),
                                  target_class))
                continue

            # Clip boxes to image bounds
            clip_coords(pred, (height, width))

            # Append to pycocotools JSON dictionary
            if save_json:
                # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
                image_id = int(Path(paths[si]).stem.split("_")[-1])
                box = pred[:, :4].clone()  # xyxy
                # to original shape
                scale_coords(images[si].shape[1:], box, shapes[si][0], shapes[si][1])
                box = xyxy2xywh(box)  # xywh
                box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner
                for p, b in zip(pred.tolist(), box.tolist()):
                    json_dict.append({"image_id": image_id,
                                      "category_id": coco91class[int(p[5])],
                                      "bbox": [round(x, 3) for x in b],
                                      "score": round(p[4], 5)})

            # Assign all predictions as incorrect
            correct = torch.zeros(len(pred), niou, dtype=torch.bool, device=device)
            if label_num:
                detected = []  # target indices
                tcls_tensor = labels[:, 0]

                # target boxes
                target_boxes = xywh2xyxy(labels[:, 1:5]) * whwh

                # Per target class
                for cls in torch.unique(tcls_tensor):
                    ti = (cls == tcls_tensor).nonzero().view(-1)  # prediction indices
                    pi = (cls == pred[:, 5]).nonzero().view(-1)  # target indices

                    # Search for detections
                    if pi.shape[0]:
                        # Prediction to target ious
                        # best ious, indices
                        ious, i = box_iou(pred[pi, :4], target_boxes[ti]).max(1)

                        # Append detections
                        for j in (ious > iouv[0]).nonzero():
                            d = ti[i[j]]  # detected target
                            if d not in detected:
                                detected.append(d)
                                correct[pi[j]] = ious[j] > iouv  # iou_thres is 1xn
                                # all targets already located in image
                                if len(detected) == label_num:
                                    break

            # Append statistics (correct, conf, pcls, tcls)
            stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), target_class))

    # Compute statistics
    stats = [np.concatenate(x, 0) for x in zip(*stats)]  # to numpy
    if len(stats):
        p, r, ap, f1, ap_class = ap_per_class(*stats)
        if niou > 1:
            p, r, ap, f1 = p[:, 0], r[:, 0], ap.mean(1), ap[:, 0]  # [P, R, [email protected]:0.95, [email protected]]
        mp, mr, map, mf1 = p.mean(), r.mean(), ap.mean(), f1.mean()
        # number of targets per class
        nt = np.bincount(stats[3].astype(np.int64), minlength=classes_num)
    else:
        nt = torch.zeros(1)

    # Print results
    context = "%20s" + "%10.3g" * 6  # print format
    print(context % ("all", seen, nt.sum(), mp, mr, map, mf1))

    # Print results per class
    if verbose and classes_num > 1 and len(stats):
        for i, c in enumerate(ap_class):
            print(context % (names[c], seen, nt[c], p[i], r[i], ap[i], f1[i]))

    # Print speeds
    if verbose:
        # tuple
        memory = torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available() else 0
        start_time = tuple(ms / seen * 1E3 for ms in (t0, t1, t0 + t1))
        start_time += (image_size, image_size, batch_size)
        print(f"Inference menory: {memory:.1f} GB.")
        print(f"Speed:\n"
              f"Image size: ({image_size}x{image_size}) at batch_size: {batch_size}\n"
              f"\t- Inference {t0 / seen * 1E3:.1f}ms.\n"
              f"\t- NMS       {t1 / seen * 1E3:.1f}ms.\n"
              f"\t- Total     {(t0 + t1) / seen * 1E3:.1f}ms.\n")

    # Save JSON
    if save_json and map and len(json_dict):
        print("\nCOCO mAP with pycocotools...")
        imgIds = [int(Path(x).stem.split("_")[-1]) for x in dataloader.dataset.image_files]
        with open("results.json", "w") as file:
            json.dump(json_dict, file)

        # initialize COCO ground truth api
        cocoGt = COCO(glob.glob("data/coco2014/annotations/instances_val*.json")[0])
        cocoDt = cocoGt.loadRes("results.json")  # initialize COCO pred api

        cocoEval = COCOeval(cocoGt, cocoDt, "bbox")
        cocoEval.params.imgIds = imgIds  # [:32]  # only evaluate these images
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        mf1, map = cocoEval.stats[:2]  # update to pycocotools results ([email protected]:0.95, [email protected])

    # Return results
    maps = np.zeros(classes_num) + map
    for i, c in enumerate(ap_class):
        maps[c] = ap[i]
    return (mp, mr, map, mf1, *(loss.cpu() / len(dataloader)).tolist()), maps
Example #2
0
                        type=str,
                        default="",
                        help="Model file weight path. (default: ``)")
    parser.add_argument("--device",
                        default="",
                        help="device id (i.e. 0 or 0,1 or cpu)")
    parser.add_argument("--single-cls",
                        action="store_true",
                        help="train as single-class dataset")
    args = parser.parse_args()
    args.weights = "weights/checkpoint.pth" if args.resume else args.weights

    print(args)

    device = select_device(args.device,
                           apex=mixed_precision,
                           batch_size=args.batch_size)
    if device.type == "cpu":
        mixed_precision = False

    try:
        os.makedirs("weights")
    except OSError:
        pass

    tb_writer = None
    if not args.evolve:
        try:
            # Start Tensorboard with "tensorboard --logdir=runs"
            from torch.utils.tensorboard import SummaryWriter
Example #3
0
def detect(save_image=False):
    # (320, 192) or (416, 256) or (608, 352) for (height, width)
    image_size = (608, 352) if ONNX_EXPORT else args.image_size
    output = args.output
    source = args.source
    weights = args.weights
    view_image = args.view_image
    save_txt = args.save_txt

    camera = False
    if source == "0" or source.startswith("http") or source.endswith(".txt"):
        camera = True

    # Initialize
    device = select_device(device="cpu" if ONNX_EXPORT else args.device)
    if os.path.exists(output):
        shutil.rmtree(output)  # delete output folder
    os.makedirs(output)  # make new output folder

    # Initialize model
    model = Darknet(args.cfg, image_size)

    # Load weight
    if weights.endswith(".pth"):
        model.load_state_dict(
            torch.load(weights, map_location=device)["model"])
    else:
        load_darknet_weights(model, weights)

    # Second-stage classifier
    classify = False
    if classify:
        # init model
        model_classifier = load_classifier(name="resnet101", classes=2)
        # load model
        model_classifier.load_state_dict(
            torch.load("weights/resnet101.pth", map_location=device)["model"])
        model_classifier.to(device)
        model_classifier.eval()
    else:
        model_classifier = None

    # Migrate the model to the specified device
    model.to(device)
    # set eval model mode
    model.eval()

    # Export mode
    if ONNX_EXPORT:
        model.fuse()
        image = torch.zeros((1, 3) + image_size)  # (1, 3, 608, 352)
        # *.onnx filename
        filename = args.weights.replace(args.weights.split(".")[-1], "onnx")
        torch.onnx.export(model,
                          tuple(image),
                          filename,
                          verbose=False,
                          opset_version=11)

        # Validate exported model
        import onnx
        model = onnx.load(filename)  # Load the ONNX model
        onnx.checker.check_model(model)  # Check that the IR is well formed
        # Print a human readable representation of the graph
        print(onnx.helper.printable_graph(model.graph))
        return

    # Set Dataloader
    video_path, video_writer = None, None
    if camera:
        view_image = True
        cudnn.benchmark = True
        dataset = LoadStreams(source, image_size=image_size)
    else:
        save_image = True
        dataset = LoadImages(source, image_size=image_size)

    # Get names and colors
    names = load_classes(args.names)
    colors = [[random.randint(0, 255) for _ in range(3)]
              for _ in range(len(names))]

    # Run inference
    start_time = time.time()
    # run once
    _ = model(torch.zeros(
        (1, 3, img_size,
         img_size), device=device)) if device.type != "cpu" else None
    for image_path, image, im0s, video_capture in dataset:
        image = torch.from_numpy(image).to(device)
        image = image.float()  # uint8 to fp16/32
        image /= 255.0  # 0 - 255 to 0.0 - 1.0
        if image.ndimension() == 3:
            image = image.unsqueeze(0)

        # Inference
        t1 = time_synchronized()
        predict = model(image, augment=args.augment)[0]
        t2 = time_synchronized()

        # Apply NMS
        predict = non_max_suppression(predict,
                                      args.confidence_threshold,
                                      args.iou_threshold,
                                      multi_label=False,
                                      classes=args.classes,
                                      agnostic=args.agnostic_nms)

        # Apply Classifier
        if classify:
            predict = apply_classifier(predict, model_classifier, image, im0s)

        # Process detections
        for i, detect in enumerate(predict):  # detections per image
            if camera:  # batch_size >= 1
                p, context, im0 = image_path[i], f"{i:g}: ", im0s[i]
            else:
                p, context, im0 = image_path, "", im0s

            save_path = str(Path(output) / Path(p).name)
            context += f"{image.shape[2]}*{image.shape[3]} "  # get image size
            if detect is not None and len(detect):
                # Rescale boxes from img_size to im0 size
                detect[:, :4] = scale_coords(image.shape[2:], detect[:, :4],
                                             im0.shape).round()

                # Print results
                for classes in detect[:, -1].unique():
                    # detections per class
                    number = (detect[:, -1] == classes).sum()
                    context += f"{number} {names[int(classes)]}s, "

                # Write results
                for *xyxy, confidence, classes in detect:
                    if save_txt:  # Write to file
                        with open(save_path + ".txt", "a") as files:
                            files.write(("%e " * 6 + "\n") %
                                        (*xyxy, classes, confidence))

                    if save_image or view_image:  # Add bbox to image
                        label = f"{names[int(classes)]} {confidence * 100:.2f}%"
                        plot_one_box(xyxy,
                                     im0,
                                     label=label,
                                     color=colors[int(classes)])

            # Stream results
            if view_image:
                cv2.imshow("camera", im0)
                if cv2.waitKey(1) == ord("q"):  # q to quit
                    raise StopIteration

            # Print time (inference + NMS)
            print(f"{context}Done. {t2 - t1:.3f}s")

            # Save results (image with detections)
            if save_image:
                if dataset.mode == "images":
                    cv2.imwrite(save_path, im0)
                else:
                    if video_path != save_path:  # new video
                        video_path = save_path
                        if isinstance(video_writer, cv2.VideoWriter):
                            video_writer.release(
                            )  # release previous video writer

                        fps = video_capture.get(cv2.CAP_PROP_FPS)
                        w = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
                        h = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
                        video_writer = cv2.VideoWriter(
                            save_path, cv2.VideoWriter_fourcc(*args.fourcc),
                            fps, (w, h))
                    video_writer.write(im0)

    print(f"Done. ({time.time() - start_time:.3f}s)")