Example #1
0
def load_data(config_file_path, val_proportion=0.10, top_k=-1):
    '''
    Loads all the data for the ESA Kelvin competition (train, val, test, baseline)
    Args:
        config_file_path: str, paths of configuration file
        val_proportion: float, validation/train fraction
        top_k: int, number of low-resolution images to read. Default (top_k=-1) reads all low-res images, sorted by clearance.
    Returns:
        train_dataset: torch.Dataset
        val_dataset: torch.Dataset
        test_dataset: torch.Dataset
        baseline_cpsnrs: dict, shift cPSNR scores of the ESA baseline
    '''

    with open(config_file_path, "r") as read_file:
        config = json.load(read_file)

    data_directory = config["paths"]["prefix"]
    baseline_cpsnrs = readBaselineCPSNR(
        os.path.join(data_directory, "norm.csv"))

    train_set_directories = getImageSetDirectories(
        os.path.join(data_directory, "train"))
    test_set_directories = getImageSetDirectories(
        os.path.join(data_directory, "test"))

    # val_proportion = 0.10
    train_list, val_list = train_test_split(train_set_directories,
                                            test_size=val_proportion,
                                            random_state=1,
                                            shuffle=True)
    config["training"]["create_patches"] = False

    train_dataset = ImagesetDataset(imset_dir=train_list,
                                    config=config["training"],
                                    top_k=top_k)
    val_dataset = ImagesetDataset(imset_dir=val_list,
                                  config=config["training"],
                                  top_k=top_k)
    test_dataset = ImagesetDataset(imset_dir=test_set_directories,
                                   config=config["training"],
                                   top_k=top_k)
    return train_dataset, val_dataset, test_dataset, baseline_cpsnrs
Example #2
0
def main(config):
    """
    Given a configuration, trains HRNet and ShiftNet for Multi-Frame Super Resolution (MFSR), and saves best model.
    Args:
        config: dict, configuration file
    """

    # Reproducibility options
    np.random.seed(0)  # RNG seeds
    torch.manual_seed(0)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    # Initialize the network based on the network configuration

    fusion_model = HRNet(config["network"])
    regis_model = ShiftNet()

    optimizer = optim.Adam(list(fusion_model.parameters()) +
                           list(regis_model.parameters()),
                           lr=config["training"]["lr"])  # optim
    # ESA dataset
    data_directory = config["paths"]["prefix"]

    baseline_cpsnrs = None
    if os.path.exists(os.path.join(data_directory, "norm.csv")):
        baseline_cpsnrs = readBaselineCPSNR(
            os.path.join(data_directory, "norm.csv"))

    train_set_directories = getImageSetDirectories(
        os.path.join(data_directory, "train"))

    val_proportion = config['training']['val_proportion']
    train_list, val_list = train_test_split(train_set_directories,
                                            test_size=val_proportion,
                                            random_state=1,
                                            shuffle=True)

    # Dataloaders
    batch_size = config["training"]["batch_size"]
    n_workers = config["training"]["n_workers"]
    n_views = config["training"]["n_views"]
    min_L = config["training"]["min_L"]  # minimum number of views
    beta = config["training"]["beta"]

    train_dataset = ImagesetDataset(imset_dir=train_list,
                                    config=config["training"],
                                    top_k=n_views,
                                    beta=beta)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  num_workers=n_workers,
                                  collate_fn=collateFunction(min_L=min_L),
                                  pin_memory=True)

    config["training"]["create_patches"] = False
    val_dataset = ImagesetDataset(imset_dir=val_list,
                                  config=config["training"],
                                  top_k=n_views,
                                  beta=beta)
    val_dataloader = DataLoader(val_dataset,
                                batch_size=1,
                                shuffle=False,
                                num_workers=n_workers,
                                collate_fn=collateFunction(min_L=min_L),
                                pin_memory=True)

    dataloaders = {'train': train_dataloader, 'val': val_dataloader}

    # Train model
    torch.cuda.empty_cache()

    #fusion_model.load_state_dict(torch.load("/home/ubadmin/Documents/Scripts/highres_net/HighRes-net-master/models/weights/training_8b_full_ESA/HRNet.pth"))
    #regis_model.load_state_dict(torch.load("/home/ubadmin/Documents/Scripts/highres_net/HighRes-net-master/models/weights/training_8b_full_ESA/ShiftNet.pth"))

    trainAndGetBestModel(fusion_model, regis_model, optimizer, dataloaders,
                         baseline_cpsnrs, config)
Example #3
0
def load_data(config_file_path, val_proportion=0.10, top_k=-1):
    '''
    Loads all the data for the ESA Kelvin competition (train, val, test, baseline)
    Args:
        config_file_path: str, paths of configuration file
        val_proportion: float, validation/train fraction
        top_k: int, number of low-resolution images to read. Default (top_k=-1) reads all low-res images, sorted by clearance.
    Returns:
        train_dataset: torch.Dataset
        val_dataset: torch.Dataset
        test_dataset: torch.Dataset
        baseline_cpsnrs: dict, shift cPSNR scores of the ESA baseline
    '''

    with open(config_file_path, "r") as read_file:
        config = json.load(read_file)

    data_directory = config["paths"]["prefix"]
    baseline_cpsnrs = readBaselineCPSNR(
        os.path.join(data_directory, "norm.csv"))

    train_set_directories = getImageSetDirectories(
        os.path.join(data_directory, "train"))
    test_set_directories = getImageSetDirectories(
        os.path.join(data_directory, "test"))

    # val_proportion = 0.10
    train_list, val_list = train_test_split(train_set_directories,
                                            test_size=val_proportion,
                                            random_state=1,
                                            shuffle=True)
    # val_list = ["imgset0000", "imgset0061",  "imgset0203",  "imgset0280",  "imgset0374",  "imgset0476",  "imgset0585",
    #             "imgset0692",  "imgset0769",  "imgset0845",  "imgset0960",  "imgset1039",  "imgset1128",
    #"imgset0011",  "imgset0072",  "imgset0204",  "imgset0285",  "imgset0382",  "imgset0498",  "imgset0588",  "imgset0711",
    #                "imgset0771", "imgset0878",  "imgset0962",  "imgset1052",  "imgset1133",
    #"imgset0023",  "imgset0085",  "imgset0205",  "imgset0289",  "imgset0414",  "imgset0499",  "imgset0602",  "imgset0728",
    #                "imgset0776", "imgset0884",  "imgset0980",  "imgset1054",  "imgset1134",
    #"imgset0035",  "imgset0087",  "imgset0208",  "imgset0313",  "imgset0448",  "imgset0503",  "imgset0604",  "imgset0730",
    #                "imgset0791",  "imgset0896",  "imgset0998",  "imgset1063",  "imgset1158",
    #"imgset0039",  "imgset0114",  "imgset0221",  "imgset0324",  "imgset0450",  "imgset0505",  "imgset0617",  "imgset0734",
    #                "imgset0793",  "imgset0921",  "imgset1013",  "imgset1068",
    #"imgset0047",  "imgset0130",  "imgset0235",  "imgset0328",  "imgset0458",  "imgset0530",  "imgset0618",  "imgset0748",
    #                "imgset0796",  "imgset0923",  "imgset1015",  "imgset1089",
    #"imgset0051",  "imgset0138",  "imgset0255",  "imgset0337",  "imgset0460",  "imgset0534",  "imgset0652",  "imgset0751",
    #                "imgset0811",  "imgset0933",  "imgset1021",  "imgset1112",
    #"imgset0056",  "imgset0164",  "imgset0262",  "imgset0340",  "imgset0465",  "imgset0549",  "imgset0674",  "imgset0758",
    #                "imgset0814",  "imgset0948",  "imgset1023",  "imgset1121",
    #"imgset0057",  "imgset0192",  "imgset0270",  "imgset0361",  "imgset0470",  "imgset0558",  "imgset0687",  "imgset0762",
    #                "imgset0817",  "imgset0951",  "imgset1034",  "imgset1126"]
    config["training"]["create_patches"] = False

    train_dataset = ImagesetDataset(imset_dir=train_list,
                                    config=config["training"],
                                    top_k=top_k)
    val_dataset = ImagesetDataset(imset_dir=val_list,
                                  config=config["training"],
                                  top_k=top_k)
    test_dataset = ImagesetDataset(imset_dir=test_set_directories,
                                   config=config["training"],
                                   top_k=top_k)
    return train_dataset, val_dataset, test_dataset, baseline_cpsnrs