Esempio n. 1
0
def evaluate(model, dataset_name, list_path, iou_thres, conf_thres, nms_thres,
             img_size, batch_size):
    """ Evaluate with the given model on the dataset specified by the given dataset_path to list of samples """
    model.eval()

    # Get dataloader
    dataset = ListDataset(dataset_name,
                          list_path,
                          img_size=img_size,
                          augment=False,
                          multiscale=False)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=1,
                                             collate_fn=dataset.collate_fn)

    Tensor = torch.cuda.FloatTensor if torch.cuda.is_available(
    ) else torch.FloatTensor

    labels = []
    sample_metrics = []  # List of tuples (TP, confs, pred)
    for batch_i, (img_paths, imgs, targets) in enumerate(
            tqdm.tqdm(dataloader, desc="Validation round")):
        print(img_paths)
        imgs = Variable(imgs.type(Tensor), requires_grad=False)
        labels += targets[:, 1].tolist()  # extract labels
        targets[:, 2:] = xywh2xyxy(targets[:, 2:])
        targets[:, 2:] *= img_size  # rescale target

        # model forward
        with torch.no_grad():
            outputs = model(imgs)
            outputs = non_max_suppression(outputs,
                                          conf_thres=conf_thres,
                                          nms_thres=nms_thres)

        # evaluation stats
        print(f'outputs: {outputs}')
        sample_metrics += get_batch_statistics(outputs,
                                               targets,
                                               iou_threshold=iou_thres)

    # concatenate sample statistics
    true_positives, pred_scores, pred_labels = [
        np.concatenate(x, 0) for x in list(zip(*sample_metrics))
    ]
    precision, recall, AP, f1, ap_class = ap_per_class(true_positives,
                                                       pred_scores,
                                                       pred_labels, labels)

    return precision, recall, AP, f1, ap_class
Esempio n. 2
0
def evaluate(model, path, iou_thres, conf_thres, nms_thres, img_size,
             batch_size):
    model.eval()

    # Get dataloader
    dataset = ListDataset(label_file=path,
                          img_size=img_size,
                          augment=False,
                          multiscale=False,
                          normalized_labels=False)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=1,
                                             collate_fn=dataset.collate_fn)

    Tensor = torch.cuda.FloatTensor if torch.cuda.is_available(
    ) else torch.FloatTensor

    labels = []
    sample_metrics = []  # List of tuples (TP, confs, pred)
    for batch_i, (_, imgs, targets) in enumerate(
            tqdm.tqdm(dataloader, desc="Detecting objects")):

        # Extract labels
        labels += targets[:, 1].tolist()
        # Rescale target
        targets[:, 2:] = xywh2xyxy(targets[:, 2:])
        targets[:, 2:] *= img_size

        imgs = Variable(imgs.type(Tensor), requires_grad=False)

        with torch.no_grad():
            outputs = model(imgs)
            outputs = non_max_suppression(outputs,
                                          conf_thres=conf_thres,
                                          nms_thres=nms_thres)

        sample_metrics += get_batch_statistics(outputs,
                                               targets,
                                               iou_threshold=iou_thres)

    # Concatenate sample statistics
    true_positives, pred_scores, pred_labels = [
        np.concatenate(x, 0) for x in list(zip(*sample_metrics))
    ]
    precision, recall, AP, f1, ap_class = ap_per_class(true_positives,
                                                       pred_scores,
                                                       pred_labels, labels)

    return precision, recall, AP, f1, ap_class
Esempio n. 3
0
        if not fn.endswith('.jpg'):
            continue
        # input image
        img_path = os.path.join(opt.image_folder, fn)

        # single image prediction -> plot bbox and save
        # detections_list is a list containting detections corresponded with samples
        detections_list, detections_rescaled, inference_time = detect_img(
            img_path, model, device, opt.img_size, classes, opt.conf_thres,
            opt.nms_thres, output_dir, opt.save_plot)

        # label/targets
        label_path = img_path.replace('images',
                                      'labels').replace('.jpg', '.txt')
        _, targets = ListDataset(
            opt.dataset_name,
            'data/custom/train_valid/phantom_20/valid.txt').preprocess(
                img_path, label_path)
        labels += targets[:, 1].tolist()
        targets[:, 2:] = xywh2xyxy(targets[:, 2:])
        targets[:, 2:] *= opt.img_size  # rescale target

        # prediction speed
        inference_time_total += inference_time

        # validation score computation
        sample_metrics += get_batch_statistics(detections_list,
                                               targets,
                                               iou_threshold=opt.iou_thres)
        if detections_list[0] is not None:
            ious = sample_metrics[-1][-1]
            count += 1
Esempio n. 4
0
    class_names = load_classes(data_config["names"])

    # Initiate model
    model = Darknet(opt.model_def).to(device)
    model.apply(weights_init_normal)

    # If specified we start from checkpoint
    if opt.pretrained_weights:
        if opt.pretrained_weights.endswith(".pth"):
            model.load_state_dict(torch.load(opt.pretrained_weights))
        else:
            model.load_darknet_weights(opt.pretrained_weights)

    # Get dataloader
    dataset = ListDataset(train_path,
                          augment=True,
                          multiscale=opt.multiscale_training)
    dataloader = torch.utils.data.DataLoader(
        dataset,
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=opt.n_cpu,
        pin_memory=True,
        collate_fn=dataset.collate_fn,
    )

    optimizer = torch.optim.Adam(model.parameters())

    metrics = [
        "grid_size",
        "loss",
Esempio n. 5
0
    class_names = load_classes(data_config["names"])

    # Initiate model
    model = Darknet(opt.model_def).to(device)
    model.apply(weights_init_normal)

    # If specified we start from checkpoint
    if opt.pretrained_weights:
        if opt.pretrained_weights.endswith(".pth"):
            model.load_state_dict(torch.load(opt.pretrained_weights))
        else:
            model.load_darknet_weights(opt.pretrained_weights)

    # Get dataloader
    dataset = ListDataset(label_file=train_path,
                          augment=True,
                          multiscale=opt.multiscale_training,
                          normalized_labels=False)
    dataloader = torch.utils.data.DataLoader(
        dataset,
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=opt.n_cpu,
        pin_memory=True,
        collate_fn=dataset.collate_fn,
    )

    optimizer = torch.optim.Adam(model.parameters())

    metrics = [
        "grid_size",
        "loss",
Esempio n. 6
0
    img, boxes, labels = random_crop(img, boxes, labels)
    img, boxes = resize(img,
                        boxes,
                        size=(img_size, img_size),
                        random_interpolation=True)
    img, boxes = random_flip(img, boxes)
    img = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
    ])(img)
    boxes, labels = box_coder.encode(boxes, labels)
    return img, boxes, labels


trainset = ListDataset(root='/usr/local/share/data/SynthText/',
                       list_file='data_train.txt',
                       transform=transform_train)


def transform_test(img, boxes, labels):
    img, boxes = resize(img, boxes, size=(img_size, img_size))
    img = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
    ])(img)
    boxes, labels = box_coder.encode(boxes, labels)
    return img, boxes, labels


testset = ListDataset(root='/usr/local/share/data/SynthText/',
                      list_file='data_val.txt',