project_name = args.project_name
    patch_size = args.patchsize
    batch_size = args.batchsize
    num_imgs = args.numimgs
    modelid = args.outdir.split("/")[4]

    model_name = f"{args.outdir}/best_model.pth"
    if (not os.path.exists(model_name)):
        print(f"Can't find model {model_name}, exiting")
        sys.exit()
    # -

    # get the device to run deep learning
    print('Getting device:', flush=True)
    device = get_torch_device(args.gpuid)

    print('Loading checkpoint:', flush=True)
    checkpoint = torch.load(
        model_name, map_location=lambda storage, loc: storage
    )  #load checkpoint to CPU and then put to device https://discuss.pytorch.org/t/saving-and-loading-torch-models-on-2-machines-with-different-number-of-gpu-devices/6666

    print('Creating model:', flush=True)
    model = UNet(n_classes=checkpoint["n_classes"],
                 in_channels=checkpoint["in_channels"],
                 padding=checkpoint["padding"],
                 depth=checkpoint["depth"],
                 wf=checkpoint["wf"],
                 up_mode=checkpoint["up_mode"],
                 batch_norm=checkpoint["batch_norm"]).to(device)
    model.load_state_dict(checkpoint["model_dict"])
Beispiel #2
0
    if os.name == "nt":
        numworkers = 0
    else:
        numworkers = args.numworkers if args.numworkers != -1 else os.cpu_count(
        )

    n_classes = 3
    in_channels = 3
    padding = True
    depth = 5
    wf = 2
    up_mode = 'upsample'
    batch_norm = True

    print("Getting torch device:", flush=True)
    device = get_torch_device()

    print("Initializing model:", flush=True)
    model = UNet(n_classes=n_classes,
                 in_channels=in_channels,
                 padding=padding,
                 depth=depth,
                 wf=wf,
                 up_mode=up_mode,
                 batch_norm=batch_norm,
                 concat=True).to(device)

    print(
        f"total params: \t{sum([np.prod(p.size()) for p in model.parameters()])}",
        flush=True)