Exemple #1
0
from delira.data_loading import DataManager, SequentialSampler

if __name__ == '__main__':

    # PARAMETERS TO CHANGE
    # TODO: Change paths and create suitable Dataset
    checkpoint_path = ""
    data_path = ""
    save_path = ""
    dset = None

    checkpoint_path = os.path.expanduser(checkpoint_path)
    data_path = os.path.expanduser(data_path)
    save_path = os.path.expanduser(save_path)

    transforms = Compose([RangeTransform((-1, 1))])

    model = UNetTorch(3, 1, norm_layer="Instance", per_class=False)
    model.load_state_dict(torch.load(checkpoint_path)["model"])
    model.eval()

    if torch.cuda.is_available():
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")

    model.to(device)

    dmgr = DataManager(dset, 1, 4, transforms, sampler_cls=SequentialSampler)

    predictor = Predictor(model,
from functools import partial
from delira.data_loading import DataManager, SequentialSampler
from delira_unet import UNetTorch
from delira import set_debug_mode

if __name__ == "__main__":
    checkpoint_path = ""
    data_path = ""
    save_path = ""

    set_debug_mode(True)

    transforms = Compose([
        CopyTransform("data", "data_orig"),
        # HistogramEqualization(),
        RangeTransform((-1, 1)),
        # AddGridTransform(),
    ])

    img_size = (1024, 256)
    thresh = 0.5

    print("Load Model")
    torch.jit.loat(checkpoint_path)
    model.eval()

    if torch.cuda.is_available():
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")
Exemple #3
0
def train(model_cls,
          model_kwargs: dict,
          outpath: str,
          data_path,
          exp_name=None,
          batchsize=64,
          num_epochs=1500,
          checkpoint_freq=10,
          additional_losses: dict = None,
          dset_type="mnist",
          key_mapping=None,
          create_optim_fn=None):

    if exp_name is None:
        exp_name = model_cls.__name__

    if additional_losses is None:
        additional_losses = {}

    if create_optim_fn is None:
        create_optim_fn = create_optims

    outpath = os.path.expanduser(outpath)

    losses = {"adversarial": AdversarialLoss()}
    losses.update(additional_losses)
    params = Parameters(
        fixed_params={
            "model": {
                **model_kwargs
            },
            "training": {
                "num_epochs": num_epochs,
                "batchsize": batchsize,
                "losses": losses,
                "val_metrics": {},
                "optimizer_cls": torch.optim.Adam,
                "optimizer_params": {
                    "lr": 0.001,
                    "betas": (0.5, 0.9995)
                },
                "scheduler_cls": None,
                "scheduler_params": {}
            }
        })
    data = setup_data(data_path, params.nested_get("batchsize"), 4,
                      RangeTransform(), RangeTransform(), dset_type)
    exp = PyTorchExperiment(params,
                            model_cls,
                            params.nested_get("num_epochs"),
                            name=exp_name,
                            save_path=outpath,
                            key_mapping=key_mapping,
                            optim_builder=create_optim_fn,
                            checkpoint_freq=checkpoint_freq,
                            gpu_ids=[0])

    model = exp.run(data["train"], data["val"])
    weight_dir = os.path.join(exp.save_path, "checkpoints", "run_00")

    return model, weight_dir