Exemple #1
0
def train(epochs: int) -> None:
    train_dataset = TrainDataset(
        cfg.input_size,
        object_count_range=cfg.object_count_range,
        object_size_range=cfg.object_size_range,
        num_samples=1024,
    )
    test_dataset = TrainDataset(
        cfg.input_size,
        object_count_range=cfg.object_count_range,
        object_size_range=cfg.object_size_range,
        num_samples=256,
    )
    backbone = EfficientNetBackbone(1,
                                    out_channels=cfg.channels,
                                    pretrained=True)
    model = CenterNetV1(
        channels=cfg.channels,
        backbone=backbone,
        out_idx=cfg.out_idx,
        box_depth=cfg.box_depth,
        anchors=Anchors(size=cfg.anchor_size),
    )
    mkmaps = MkCornerMaps()
    criterion = Criterion(
        box_weight=cfg.box_weight,
        heatmap_weight=cfg.heatmap_weight,
        mkmaps=mkmaps,
    )
    train_loader = DataLoader(train_dataset,
                              collate_fn=collate_fn,
                              batch_size=cfg.batch_size,
                              shuffle=True)
    test_loader = DataLoader(test_dataset,
                             collate_fn=collate_fn,
                             batch_size=cfg.batch_size * 2,
                             shuffle=True)
    optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.lr)
    visualize = Visualize(cfg.out_dir, "test", limit=2)

    model_loader = ModelLoader(
        out_dir=cfg.out_dir,
        key=cfg.metric[0],
        best_watcher=BestWatcher(mode=cfg.metric[1]),
    )
    to_boxes = ToBoxes(threshold=cfg.to_boxes_threshold, use_peak=cfg.use_peak)
    get_score = MeanPrecition()
    trainer = Trainer(
        model=model,
        train_loader=train_loader,
        test_loader=test_loader,
        model_loader=model_loader,
        optimizer=optimizer,
        visualize=visualize,
        criterion=criterion,
        device="cuda",
        get_score=get_score,
        to_boxes=to_boxes,
    )
    trainer(epochs)
def train(epochs: int) -> None:
    train_dataset = TrainDataset(
        config.input_size,
        object_count_range=config.object_count_range,
        object_size_range=config.object_size_range,
        num_samples=1024,
    )
    test_dataset = TrainDataset(
        config.input_size,
        object_count_range=config.object_count_range,
        object_size_range=config.object_size_range,
        num_samples=256,
    )
    backbone = EfficientNetBackbone(2,
                                    out_channels=config.channels,
                                    pretrained=True)
    anchors = Anchors(size=config.anchor_size, ratios=config.anchor_ratios)
    model = EfficientDet(num_classes=1,
                         channels=config.channels,
                         backbone=backbone,
                         anchors=anchors)
    model_loader = ModelLoader(
        out_dir=config.out_dir,
        key=config.metric[0],
        best_watcher=BestWatcher(mode=config.metric[1]),
    )
    criterion = Criterion()
    optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
    visualize = Visualize("/store/efficientdet", "test", limit=2)
    get_score = MeanPrecition()
    to_boxes = ToBoxes(confidence_threshold=config.confidence_threshold, )
    trainer = Trainer(
        model,
        DataLoader(
            train_dataset,
            collate_fn=collate_fn,
            batch_size=config.batch_size,
            shuffle=True,
        ),
        DataLoader(
            test_dataset,
            collate_fn=collate_fn,
            batch_size=config.batch_size * 2,
            shuffle=True,
        ),
        model_loader=model_loader,
        optimizer=optimizer,
        visualize=visualize,
        criterion=criterion,
        get_score=get_score,
        device="cuda",
        to_boxes=to_boxes,
    )
    trainer(epochs)
Exemple #3
0
def evaluate(limit: int = 100) -> None:
    backbone = EfficientNetBackbone(config.effdet_id,
                                    out_channels=config.channels)
    anchors = Anchors(
        size=config.anchor_size,
        ratios=config.anchor_ratios,
        scales=config.anchor_scales,
    )
    model = EfficientDet(
        num_classes=1,
        channels=config.channels,
        backbone=backbone,
        anchors=anchors,
        out_ids=config.out_ids,
    )
    model_loader = ModelLoader(
        out_dir=config.out_dir,
        key=config.metric[0],
        best_watcher=BestWatcher(mode=config.metric[1]),
    )
    box_merge = BoxMerge(iou_threshold=config.iou_threshold,
                         confidence_threshold=config.final_threshold)
    dataset = Subset(
        WheatDataset(
            annot_file=config.annot_file,
            image_dir=config.train_image_dir,
            max_size=config.max_size,
            mode="test",
        ),
        list(range(limit)),
    )
    to_boxes = ToBoxes(confidence_threshold=config.confidence_threshold)
    data_loader = DataLoader(
        dataset=dataset,
        collate_fn=_collate_fn,
        batch_size=config.batch_size * 2,
        shuffle=False,
    )
    predictor = Predictor(
        model=model,
        loader=data_loader,
        model_loader=model_loader,
        device=config.device,
        box_merge=box_merge,
        to_boxes=to_boxes,
    )
    boxes_list, confs_list, ids = predictor()
    gt_boxes_list = [dataset[i][2] for i in range(len(dataset))]
    get_score = MeanPrecition()
    score = np.mean([
        get_score(x, y.to(x.device)) for x, y in zip(boxes_list, gt_boxes_list)
    ])
    print(score)
Exemple #4
0
def train(epochs: int) -> None:
    train_dataset = WheatDataset(
        image_dir=config.train_image_dir,
        annot_file=config.annot_file,
        max_size=config.max_size,
        mode="train",
    )
    test_dataset = WheatDataset(
        image_dir=config.train_image_dir,
        annot_file=config.annot_file,
        max_size=config.max_size,
        mode="test",
    )
    fold_keys = [x[2].shape[0] // 30 for x in test_dataset.rows]
    train_idx, test_idx = list(kfold(n_splits=config.n_splits, keys=fold_keys))[
        config.fold_idx
    ]

    train_loader = DataLoader(
        Subset(train_dataset, train_idx),
        batch_size=config.batch_size,
        drop_last=True,
        shuffle=True,
        collate_fn=collate_fn,
        num_workers=config.num_workers,
    )
    test_loader = DataLoader(
        Subset(test_dataset, test_idx),
        batch_size=config.batch_size,
        drop_last=False,
        shuffle=False,
        collate_fn=collate_fn,
        num_workers=config.num_workers,
    )
    backbone = EfficientNetBackbone(
        config.effdet_id, out_channels=config.channels, pretrained=config.pretrained
    )
    anchors = Anchors(
        size=config.anchor_size,
        ratios=config.anchor_ratios,
        scales=config.anchor_scales,
    )
    model = EfficientDet(
        num_classes=1,
        channels=config.channels,
        backbone=backbone,
        anchors=anchors,
        out_ids=config.out_ids,
    )
    model_loader = ModelLoader(
        out_dir=config.out_dir,
        key=config.metric[0],
        best_watcher=BestWatcher(mode=config.metric[1]),
    )
    box_merge = BoxMerge(
        iou_threshold=config.iou_threshold, confidence_threshold=config.final_threshold
    )
    criterion = Criterion(
        label_weight=config.label_weight,
        pos_loss=PosLoss(iou_threshold=config.pos_threshold),
        size_loss=SizeLoss(iou_threshold=config.size_threshold),
        label_loss=LabelLoss(iou_thresholds=config.label_thresholds),
    )
    visualize = Visualize(config.out_dir, "test", limit=5, show_probs=True)
    optimizer = torch.optim.AdamW(model.parameters(), lr=config.lr,)
    to_boxes = ToBoxes(confidence_threshold=config.confidence_threshold)
    Trainer(
        model=model,
        train_loader=train_loader,
        test_loader=test_loader,
        model_loader=model_loader,
        optimizer=optimizer,
        visualize=visualize,
        device=config.device,
        criterion=criterion,
        get_score=MeanPrecition(),
        to_boxes=to_boxes,
        box_merge=box_merge,
    )(epochs)
Exemple #5
0
def train(epochs: int) -> None:
    train_dataset = WheatDataset(
        image_dir=config.train_image_dir,
        annot_file=config.annot_file,
        max_size=config.max_size,
        mode="train",
    )
    test_dataset = WheatDataset(
        image_dir=config.train_image_dir,
        annot_file=config.annot_file,
        max_size=config.max_size,
        mode="test",
    )
    fold_keys = [x[2].shape[0] // 30 for x in test_dataset.rows]
    train_idx, test_idx = list(kfold(n_splits=config.n_splits,
                                     keys=fold_keys))[config.fold_idx]

    train_loader = DataLoader(
        Subset(train_dataset, train_idx),
        batch_size=config.batch_size,
        drop_last=True,
        shuffle=True,
        collate_fn=collate_fn,
        num_workers=config.num_workers,
    )
    test_loader = DataLoader(
        Subset(test_dataset, test_idx),
        batch_size=config.batch_size,
        drop_last=False,
        shuffle=False,
        collate_fn=collate_fn,
        num_workers=config.num_workers,
    )
    backbone = EfficientNetBackbone(config.effdet_id,
                                    out_channels=config.channels,
                                    pretrained=config.pretrained)
    model = CenterNetV1(
        channels=config.channels,
        backbone=backbone,
        out_idx=config.out_idx,
        fpn_depth=config.fpn_depth,
        hm_depth=config.hm_depth,
        box_depth=config.box_depth,
    )
    model_loader = ModelLoader(
        out_dir=config.out_dir,
        key=config.metric[0],
        best_watcher=BestWatcher(mode=config.metric[1]),
    )
    box_merge = BoxMerge(iou_threshold=config.iou_threshold,
                         confidence_threshold=config.final_threshold)
    criterion = Criterion(
        heatmap_weight=config.heatmap_weight,
        box_weight=config.box_weight,
        mk_maps=MkMaps(
            sigma=config.sigma,
            mode=config.map_mode,
        ),
    )

    visualize = Visualize(config.out_dir,
                          "centernet",
                          limit=5,
                          show_probs=True)
    optimizer = torch.optim.AdamW(
        model.parameters(),
        lr=config.lr,
    )
    to_boxes = ToBoxes(
        threshold=config.confidence_threshold,
        use_peak=config.use_peak,
    )
    Trainer(
        model=model,
        train_loader=train_loader,
        test_loader=test_loader,
        model_loader=model_loader,
        optimizer=optimizer,
        visualize=visualize,
        device=config.device,
        criterion=criterion,
        get_score=MeanPrecition(),
        to_boxes=to_boxes,
        box_merge=box_merge,
    )(epochs)
def test_mean_precision() -> None:
    pred_boxes = coco_to_yolo(CoCoBoxes(torch.from_numpy(preds)), (1024, 1024))
    gt_boxes = coco_to_yolo(CoCoBoxes(torch.from_numpy(gts)), (1024, 1024))
    fn = MeanPrecition()
    res = fn(pred_boxes, gt_boxes)
    assert res < 0.37