예제 #1
0
            "eps": 1e-5,
            "weight_decay": 1e-5
        },
    ],
    "scheduler":
    torch.optim.lr_scheduler.StepLR,
    "scheduler_params": {
        "step_size": 1,
        "gamma": 1.0
    },
    "acc_steps": [1, 200],
    "train_transform":
    torchvision.transforms.Compose(
        [ToComplex(),
         SimulateMeasurements(OpA),
         Jitter(1e1, 0.0, 1.0)]),
    "val_transform":
    torchvision.transforms.Compose(
        [ToComplex(), SimulateMeasurements(OpA)], ),
    "train_loader_params": {
        "shuffle": True,
        "num_workers": 8
    },
    "val_loader_params": {
        "shuffle": False,
        "num_workers": 8
    },
}

# ----- data configuration -----
        },
        {
            "lr": 5e-5,
            "eps": 1e-5,
            "weight_decay": 5e-3
        },
    ],
    "scheduler":
    torch.optim.lr_scheduler.StepLR,
    "scheduler_params": {
        "step_size": 1,
        "gamma": 1.0
    },
    "acc_steps": [1, 200],
    "train_transform":
    Jitter(2e0, 0.0, 1.0),
}

# -----data prep -----
X_train, C_train, Y_train = [
    tmp.unsqueeze(-2).to(device)
    for tmp in load_dataset(config.set_params["path"], subset="train")
]

X_val, C_val, Y_val = [
    tmp.unsqueeze(-2).to(device)
    for tmp in load_dataset(config.set_params["path"], subset="val")
]

# ------ save hyperparameters -------
os.makedirs(train_params["save_path"][-1], exist_ok=True)
예제 #3
0
     "eps": 1e-4,
     "weight_decay": 1e-6
 }],
 "scheduler":
 torch.optim.lr_scheduler.StepLR,
 "scheduler_params": {
     "step_size": 1,
     "gamma": 1.0
 },
 "acc_steps": [1],
 "train_transform":
 torchvision.transforms.Compose([
     CropOrPadAndResimulate((320, 320)),
     Flatten(0, -3),
     Normalize(reduction="mean", use_target=True),
     Jitter(1.5e2, 0.0, 1.0),
 ]),
 "val_transform":
 torchvision.transforms.Compose([
     CropOrPadAndResimulate((320, 320)),
     Flatten(0, -3),
     Normalize(reduction="mean", use_target=True),
 ], ),
 "train_loader_params": {
     "shuffle": True,
     "num_workers": 8
 },
 "val_loader_params": {
     "shuffle": False,
     "num_workers": 8
 },
        },
        {
            "lr": 5e-5,
            "eps": 1e-5,
            "weight_decay": 1e-3
        },
    ],
    "scheduler":
    torch.optim.lr_scheduler.StepLR,
    "scheduler_params": {
        "step_size": 1,
        "gamma": 1.0
    },
    "acc_steps": [1, 200],
    "train_transform":
    Jitter(4e0, 0.0, 1.0),
}

# -----data prep -----
X_train, C_train, Y_train = [
    tmp.unsqueeze(-2).to(device)
    for tmp in load_dataset(config.set_params["path"], subset="train")
]

X_val, C_val, Y_val = [
    tmp.unsqueeze(-2).to(device)
    for tmp in load_dataset(config.set_params["path"], subset="val")
]

# ------ save hyperparameters -------
os.makedirs(train_params["save_path"][-1], exist_ok=True)
예제 #5
0
            "Fourier_UNet_it_jitter_v4_"
            "train_phase_{}".format((i + 1) % (train_phases + 1)),
        )
        for i in range(train_phases + 1)
    ],
    "save_epochs": 1,
    "optimizer": torch.optim.Adam,
    "optimizer_params": [
        {"lr": 5e-5, "eps": 2e-4, "weight_decay": 1e-4},
        {"lr": 5e-5, "eps": 2e-4, "weight_decay": 1e-4},
    ],
    "scheduler": torch.optim.lr_scheduler.StepLR,
    "scheduler_params": {"step_size": 1, "gamma": 1.0},
    "acc_steps": [1, 200],
    "train_transform": torchvision.transforms.Compose(
        [ToComplex(), SimulateMeasurements(OpA), Jitter(1e1, 0.0, 1.0)]
    ),
    "val_transform": torchvision.transforms.Compose(
        [ToComplex(), SimulateMeasurements(OpA)],
    ),
    "train_loader_params": {"shuffle": True, "num_workers": 8},
    "val_loader_params": {"shuffle": False, "num_workers": 8},
}

# ----- data configuration -----

train_data_params = {
    "path": config.DATA_PATH,
}
train_data = IPDataset
예제 #6
0
    "optimizer_params": [{
        "lr": 8e-5,
        "eps": 2e-4,
        "weight_decay": 5e-4
    }],
    "scheduler":
    torch.optim.lr_scheduler.StepLR,
    "scheduler_params": {
        "step_size": 1,
        "gamma": 1.0
    },
    "acc_steps": [1],
    "train_transform":
    torchvision.transforms.Compose(
        [SimulateMeasurements(OpA),
         Jitter(5e2, 0.0, 1.0)]),
    "val_transform":
    torchvision.transforms.Compose([SimulateMeasurements(OpA)], ),
    "train_loader_params": {
        "shuffle": True,
        "num_workers": 0
    },
    "val_loader_params": {
        "shuffle": False,
        "num_workers": 0
    },
}

# ----- data configuration -----

train_data_params = {
예제 #7
0
            "Radon_UNet_jitter_v3_"
            "train_phase_{}".format((i + 1) % (train_phases + 1)),
        )
        for i in range(train_phases + 1)
    ],
    "save_epochs": 1,
    "optimizer": torch.optim.Adam,
    "optimizer_params": [
        {"lr": 2e-4, "eps": 1e-4, "weight_decay": 5e-4},
        {"lr": 5e-5, "eps": 1e-4, "weight_decay": 1e-4},
    ],
    "scheduler": torch.optim.lr_scheduler.StepLR,
    "scheduler_params": {"step_size": 1, "gamma": 1.0},
    "acc_steps": [1, 200],
    "train_transform": torchvision.transforms.Compose(
        [SimulateMeasurements(OpA), Jitter(5e2, 0.0, 1.0)]
    ),
    "val_transform": torchvision.transforms.Compose(
        [SimulateMeasurements(OpA)],
    ),
    "train_loader_params": {"shuffle": True, "num_workers": 0},
    "val_loader_params": {"shuffle": False, "num_workers": 0},
}

# ----- data configuration -----

train_data_params = {
    "path": config.DATA_PATH,
    "device": device,
}
train_data = IPDataset
예제 #8
0
            config.RESULTS_PATH,
            "unet_jitter_"
            "train_phase_{}".format((i + 1) % (train_phases + 1)),
        )
        for i in range(train_phases + 1)
    ],
    "save_epochs": 1,
    "optimizer": torch.optim.Adam,
    "optimizer_params": [
        {"lr": 8e-5, "eps": 1e-5, "weight_decay": 1e-3},
        {"lr": 5e-5, "eps": 1e-5, "weight_decay": 1e-3},
    ],
    "scheduler": torch.optim.lr_scheduler.StepLR,
    "scheduler_params": {"step_size": 1, "gamma": 1.0},
    "acc_steps": [1, 200],
    "train_transform": Jitter(4e0, 0.0, 1.0),
}


# -----data prep -----
X_train, C_train, Y_train = [
    tmp.unsqueeze(-2).to(device)
    for tmp in load_dataset(config.set_params["path"], subset="train")
]

X_val, C_val, Y_val = [
    tmp.unsqueeze(-2).to(device)
    for tmp in load_dataset(config.set_params["path"], subset="val")
]