Ejemplo n.º 1
0
            optimizer=optimizer,
            lr_scheduler=lr_scheduler,
            criterion=criterion,
            device=args.device,
        )
        print(f"train | mIoU: {train_miou:.3f} | accuracy: {train_acc:.3f} | loss: {train_loss:.3f}")
        val_loss, val_scores = eval_epoch(
            model=model,
            dataloader=val_loader,
            n_classes=args.n_classes,
            criterion=criterion,
            device=args.device,
        )
        val_miou, val_ious, val_acc = val_scores['mIoU'], val_scores['IoUs'], val_scores['accuracy']
        print(f"valid | mIoU: {val_miou:.3f} | accuracy: {val_acc:.3f} | loss: {val_loss:.3f}")
        recorder.update([train_miou, train_acc, train_loss, val_miou, val_acc, val_loss])
        recorder.save(args.record_path)
        if args.metric.startswith("IoU"):
            metric = val_ious[int(args.metric.split('_')[1])]
        else: metric = val_miou
        model_saver.save_models(metric, epoch+1, model,
                                ious={'train': train_ious, 'val': val_ious})

    print(f"best model at epoch {model_saver.best_epoch} with miou {model_saver.best_score:.5f}")


if __name__ == '__main__':
    arg_parser = Arguments()
    args = arg_parser.parse_args(verbose=True)
    train(args)
Ejemplo n.º 2
0
    train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True)
    val_loader = DataLoader(val_set, batch_size=1, shuffle=False)

    # get test dataloader
    if s_info.test_type == "validation":
        test_set = val_set
    elif s_info.test_type == "TextSplit":
        split_file_path = f"{args.dataset_root}/splits/{s_info.test_split_file}"
        test_set = TextSplitDataset(args.img_dir, args.label_dir, split_file_path, transform_eval)
    elif s_info.test_type == "CSVSplit":
        split_file_path = f"{args.dataset_root}/splits/{s_info.split_file}"
        test_set = CSVSplitDataset(args.img_dir, args.label_dir, split_file_path,
                                   s_info.test_split_num, transform_eval, s_info.split_col_name)
    elif s_info.test_type == "folder":
        test_set = FolderDataset(s_info.test_img_dir, s_info.test_label_dir, transform_eval)
    else:
        raise NotImplementedError(s_info.test_type)
    test_loader = DataLoader(test_set, batch_size=1, shuffle=False)
    return train_loader, val_loader, test_loader


if __name__ == '__main__':
    from args import Arguments
    import sys
    sys.argv.extend(['--config', 'test_aug.yaml'])
    parser = Arguments()
    args = parser.parse_args(use_random_seed=False)
    args.train_repeat = 1
    train_loader, val_loader, test_loader = get_dataloaders(args)
    visualize_augmentations(train_loader.dataset, idx=0, n_samples=5)
Ejemplo n.º 3
0
        dataloader = val_loader
    elif mode == 'test':
        dataloader = test_loader
    else:
        raise ValueError(f"{mode} not supported. Choose from 'val' or 'test'")
    model = UNetVgg16(n_classes=args.n_classes).to(args.device)
    model.load_state_dict(torch.load(args.model_path)['model_state_dict'],
                          strict=False)
    criterion = get_loss_fn(args.loss_type, args.ignore_index).to(args.device)
    eval_loss, scores = eval_epoch(model=model,
                                   dataloader=dataloader,
                                   n_classes=args.n_classes,
                                   criterion=criterion,
                                   device=args.device,
                                   pred_dir=save_pred and args.pred_dir)
    miou, acc = scores['mIoU'], scores['accuracy']
    print(
        f"{mode} | mIoU: {miou:.3f} | accuracy: {acc:.3f} | loss: {eval_loss:.3f}"
    )
    return scores


if __name__ == '__main__':
    arg_parser = Arguments()
    arg_parser.parser.add_argument('--mode',
                                   '-m',
                                   choices=['val', 'test'],
                                   required=True)
    args = arg_parser.parse_args()
    evaluate(args, args.mode, save_pred=True)