Esempio n. 1
0
def main(args):
    _, val_dataset = create_wf_datasets(Config.WF_DATASET_DIR)

    val_dataloader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=Config.BATCH_SIZE,
        num_workers=Config.DATALOADER_WORKER_NUM,
        shuffle=False,
        collate_fn=my_collate_fn
    )

    total = len(val_dataloader)
    detector = Detector(args.model)
    APs = []
    for index, data in enumerate(val_dataloader):
        predictions = detector.forward(data)
        for i in range(len(predictions)):
            if predictions[i] is None:
                APs.append(0)
                continue
            prediction = predictions[i]
            gt = data[1][i]
            scale = data[3][i]

            gt[:, 0] *= scale[0]
            gt[:, 1] *= scale[1]
            gt[:, 2] *= scale[0]
            gt[:, 3] *= scale[1]

            ap = AP(prediction, gt, 0.5)
            APs.append(ap[1.0])

        print("{} / {}".format(index, total))

    print("mAP: {}".format(sum(APs)/len(APs)))
Esempio n. 2
0
def evaluate(model):
    _, val_dataset = create_wf_datasets(Config.WF_DATASET_DIR)

    val_dataloader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=1,
        num_workers=Config.DATALOADER_WORKER_NUM,
        shuffle=True,
        collate_fn=my_collate_fn
    )

    total = len(val_dataloader)
    detector = Detector(model)
    APs = []
    for index, data in enumerate(val_dataloader):
        predictions = detector.forward(data)
        for i in range(len(predictions)):
            if predictions[i] is None:
                APs.append(0)
                continue
            prediction = predictions[i]
            gt = np.array(data[1][i])

            ap = AP(prediction, gt, 0.5)
            APs.append(ap[1.0])

    return sum(APs) / len(APs)
Esempio n. 3
0
def main():
    if Config.DATASETS == 'VOC':
        train_dataset, val_dataset = create_voc_datasets(
            Config.VOC_DATASET_DIR)
    elif Config.DATASETS == 'WF':
        train_dataset, val_dataset = create_wf_datasets(Config.WF_DATASET_DIR)
    else:
        raise RuntimeError('Select a dataset to train in config.py.')

    train_dataloader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=Config.BATCH_SIZE,
        num_workers=Config.DATALOADER_WORKER_NUM,
        shuffle=True,
        collate_fn=my_collate_fn)

    val_dataloader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=1,
        num_workers=Config.DATALOADER_WORKER_NUM,
        shuffle=False,
        collate_fn=my_collate_fn)

    model = Net()
    # optimizer = torch.optim.Adam(
    #     params=model.parameters(),
    #     lr=Config.LEARNING_RATE, weight_decay=Config.WEIGHT_DECAY
    # )
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=Config.LEARNING_RATE,
                                weight_decay=Config.WEIGHT_DECAY)

    trainer = Trainer(optimizer,
                      model,
                      train_dataloader,
                      val_dataloader,
                      resume=Config.RESUME_FROM,
                      log_dir=Config.LOG_DIR,
                      persist_stride=Config.MODEL_SAVE_STRIDE,
                      max_epoch=Config.EPOCHS)
    trainer.train()