def load_config(config_path, run_makelinks=False):
    """Reads config file and calculates additional dcm attributes such as
    slice volume. Returns a dictionary used for patient wide calculations
    such as TKV.

    Args:
        config_path (str): config file path
        run_makelinks (bool, optional): Creates symbolic links during the first run. Defaults to False.

    Returns:
        dataloader, model, device, binarize_func, save_dir (str), model_name (str), split (str)
    """

    if run_makelinks:
        makelinks()
    with open(config_path, "r") as f:
        config = yaml.load(f, Loader=yaml.FullLoader)

    model_config = config["_MODEL_CONFIG"]
    loader_to_eval = config["_LOADER_TO_EVAL"]
    split = config[loader_to_eval]["dataset"]["splitter_key"].lower()
    dataloader_config = config[loader_to_eval]
    saved_checkpoint = config["_MODEL_CHECKPOINT"]
    checkpoint_format = config["_NEW_CKP_FORMAT"]

    model = get_object_instance(model_config)()
    if saved_checkpoint is not None:
        load_model_data(saved_checkpoint, model, new_format=checkpoint_format)

    dataloader = get_object_instance(dataloader_config)()

    # TODO: support other metrics as needed
    # binarize_func = SigmoidBinarize(thresholds=[0.5])

    pred_process_config = config["_LOSSES_METRICS_CONFIG"]["criterions_dict"][
        "dice_metric"]["pred_process"]
    pred_process = get_object_instance(pred_process_config)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    model.eval()

    model_name = Path(config_path).parts[-3]

    save_dir = "./saved_inference"

    return (
        dataloader,
        model,
        device,
        pred_process,
        save_dir,
        model_name,
        split,
    )
def evaluate(config):
    model_config = config["_MODEL_CONFIG"]
    dataloader_config = config["_VAL_DATALOADER_CONFIG"]
    loss_metric_config = config["_LOSSES_METRICS_CONFIG"]
    model = get_object_instance(model_config)()
    dataloader = get_object_instance(dataloader_config)()
    loss_metric = get_object_instance(loss_metric_config)()

    return (
        model,
        dataloader,
        loss_metric,
    )  # add return types for debugging/testing
예제 #3
0
def evaluate(config):
    model_config = config["_MODEL_CONFIG"]
    loader_to_eval = config["_LOADER_TO_EVAL"]
    dataloader_config = config[loader_to_eval]
    loss_metric_config = config["_LOSSES_METRICS_CONFIG"]
    results_path = config["_RESULTS_PATH"]
    saved_checkpoint = config["_MODEL_CHECKPOINT"]
    checkpoint_format = config["_NEW_CKP_FORMAT"]

    model = get_object_instance(model_config)()
    if saved_checkpoint is not None:
        load_model_data(saved_checkpoint, model, new_format=checkpoint_format)

    dataloader = get_object_instance(dataloader_config)()
    loss_metric = get_object_instance(loss_metric_config)()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    model = model.to(device)
    model.eval()
    all_losses_and_metrics = validate(dataloader, model, loss_metric, device)

    os.makedirs(results_path)
    with open("{}/val_results.json".format(results_path), "w") as fp:
        print(all_losses_and_metrics)
        json.dump(all_losses_and_metrics, fp, indent=4)

    # plotting check
    output_example_idx = (hasattr(dataloader.dataset, "output_idx")
                          and dataloader.dataset.output_idx)
    data_iter = iter(dataloader)
    if output_example_idx:
        inputs, labels, _ = next(data_iter)
    else:
        inputs, labels = next(data_iter)

    inputs = inputs.to(device)
    preds = model(inputs)
    inputs = inputs.cpu()
    preds = preds.cpu()

    plot_figure_from_batch(inputs, preds)
예제 #4
0
def evaluate(config):
    model_config = config["_MODEL_CONFIG"]
    loader_to_eval = config["_LOADER_TO_EVAL"]
    dataloader_config = config[loader_to_eval]
    saved_checkpoint = config["_MODEL_CHECKPOINT"]
    checkpoint_format = config["_NEW_CKP_FORMAT"]

    model = get_object_instance(model_config)()
    if saved_checkpoint is not None:
        load_model_data(saved_checkpoint, model, new_format=checkpoint_format)

    dataloader = get_object_instance(dataloader_config)()

    # TODO: support other metrics as needed
    binarize_func = SigmoidBinarize(thresholds=[0.5])

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = model.to(device)
    model.eval()

    updated_dcm2attribs = calculate_dcm_voxel_volumes(dataloader, model,
                                                      device, binarize_func)

    return updated_dcm2attribs
from adpkd_segmentation.data.data_utils import ( # noqa
    int16_to_uint8,
    masks_to_colorimg,
)

# %%
# needed only once
# makelinks()

# %%
path = "./experiments/september06/random_split_new_data_less_albu/val/val.yaml"

with open(path, "r") as f:
    config = yaml.load(f, Loader=yaml.FullLoader)
dataloader_config = config["_VAL_DATALOADER_CONFIG"]
dataloader = get_object_instance(dataloader_config)()

# %%
# SET THIS INDEX for selecting img label in augmentations example
IMG_IDX = 180
dataset = dataloader.dataset
x, y, index = dataset[IMG_IDX]

# %%
print("Dataset Length: {}".format(len(dataset)))
print("image -> shape {},  dtype {}".format(x.shape, x.dtype))
print("mask -> shape {},  dtype {}".format(y.shape, y.dtype))

# %%
print("Image and Mask: \n")
image, mask = x[0, ...], y
예제 #6
0
def train(config, config_save_name):
    # reproducibility
    seed = config.get("_SEED", 42)
    random.seed(seed)
    torch.manual_seed(seed)
    np.random.seed(seed)

    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    model_config = config["_MODEL_CONFIG"]
    train_dataloader_config = config["_TRAIN_DATALOADER_CONFIG"]
    val_dataloader_config = config["_VAL_DATALOADER_CONFIG"]
    loss_metric_config = config["_LOSSES_METRICS_CONFIG"]
    experiment_dir = config["_EXPERIMENT_DIR"]
    checkpoints_dir = os.path.join(experiment_dir, CHECKPOINTS)
    results_dir = os.path.join(experiment_dir, RESULTS)
    tb_logs_dir_train = os.path.join(experiment_dir, TB_LOGS, "train")
    tb_logs_dir_val = os.path.join(experiment_dir, TB_LOGS, "val")
    config_out = os.path.join(experiment_dir, config_save_name)

    saved_checkpoint = config["_MODEL_CHECKPOINT"]
    checkpoint_format = config["_NEW_CKP_FORMAT"]
    loss_key = config["_OPTIMIZATION_LOSS"]
    optim_config = config["_OPTIMIZER"]
    lookahead_config = config["_LOOKAHEAD_OPTIM"]
    lr_scheduler_config = config["_LR_SCHEDULER"]
    experiment_data = config["_EXPERIMENT_DATA"]
    val_plotting_dict = config.get("_VAL_PLOTTING")

    model = get_object_instance(model_config)()
    global_step = 0
    if saved_checkpoint is not None:
        global_step = load_model_data(saved_checkpoint,
                                      model,
                                      new_format=checkpoint_format)
    train_loader = get_object_instance(train_dataloader_config)()
    val_loader = get_object_instance(val_dataloader_config)()

    print("Train dataset length: {}".format(len(train_loader.dataset)))
    print("Validation dataset length: {}".format(len(val_loader.dataset)))
    print("Valiation dataset patients:\n{}".format(
        val_loader.dataset.patients))

    loss_metric = get_object_instance(loss_metric_config)()
    optimizer_getter = get_object_instance(optim_config)
    lr_scheduler_getter = get_object_instance(lr_scheduler_config)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    os.makedirs(checkpoints_dir)
    os.makedirs(results_dir)
    os.makedirs(tb_logs_dir_train)
    os.makedirs(tb_logs_dir_val)
    with open(config_out, "w") as f:
        yaml.dump(config, f, default_flow_style=False)

    # create configs for val and test
    val_config, val_out_dir = create_config(config, "val")
    test_config, test_out_dir = create_config(config, "test")
    os.makedirs(val_out_dir)
    os.makedirs(test_out_dir)

    val_path = os.path.join(val_out_dir, "val.yaml")
    print("Creating evaluation config for val: {}".format(val_path))
    with open(val_path, "w") as f:
        yaml.dump(val_config, f, default_flow_style=False)

    test_path = os.path.join(test_out_dir, "test.yaml")
    print("Creating evaluation config for test: {}".format(test_path))
    with open(test_path, "w") as f:
        yaml.dump(test_config, f, default_flow_style=False)

    train_writer = SummaryWriter(tb_logs_dir_train)
    val_writer = SummaryWriter(tb_logs_dir_val)

    model_params = model.parameters()
    if config.get("_MODEL_PARAM_PREP") is not None:
        model_prep = get_object_instance(config.get("_MODEL_PARAM_PREP"))
        model_params = model_prep(model)

    optimizer = optimizer_getter(model_params)
    if lookahead_config["use_lookahead"]:
        optimizer = Lookahead(optimizer, **lookahead_config["params"])
    lr_scheduler = lr_scheduler_getter(optimizer)

    model = model.to(device)
    model.train()
    num_epochs = experiment_data["num_epochs"]
    batch_log_interval = experiment_data["batch_log_interval"]
    # "low" or "high"
    best_metric_type = experiment_data["best_metric_type"]
    saving_metric = experiment_data["saving_metric"]
    previous = float("inf") if best_metric_type == "low" else float("-inf")

    output_example_idx = (hasattr(train_loader.dataset, "output_idx")
                          and train_loader.dataset.output_idx)

    for epoch in range(num_epochs):
        for output in train_loader:
            if output_example_idx:
                x_batch, y_batch, index = output
                extra_dict = train_loader.dataset.get_extra_dict(index)
                extra_dict = tensor_dict_to_device(extra_dict, device)
            else:
                x_batch, y_batch = output
                extra_dict = None

            optimizer.zero_grad()
            x_batch = x_batch.to(device)
            y_batch = y_batch.to(device)
            y_batch_hat = model(x_batch)
            losses_and_metrics = loss_metric(y_batch_hat, y_batch, extra_dict)
            loss = losses_and_metrics[loss_key]
            loss.backward()
            optimizer.step()
            global_step += 1
            if global_step % batch_log_interval == 0:
                print("TRAIN:", get_losses_str(losses_and_metrics))
                tb_log_metrics(train_writer, losses_and_metrics, global_step)
                # TODO: add support for softmax processing
                prediction = torch.sigmoid(y_batch_hat)
                plot_fig_from_batch(
                    train_writer,
                    x_batch,
                    prediction,
                    y_batch,
                    global_step,
                )
            # lr change after each batch
            if lr_scheduler_getter.step_type == "after_batch":
                lr_scheduler.step()

        # done with one epoch
        # let's validate (use code from the validation script)
        model.eval()
        all_losses_and_metrics = validate(
            val_loader,
            model,
            loss_metric,
            device,
            plotting_func=plot_fig_from_batch,
            plotting_dict=val_plotting_dict,
            writer=val_writer,
            global_step=global_step,
            val_metric_to_check=saving_metric,
            output_losses_list=False,
        )

        print("Validation results for epoch {}".format(epoch))
        print("VAL:", get_losses_str(all_losses_and_metrics, tensors=False))
        model.train()

        current = all_losses_and_metrics[saving_metric]
        if is_better(current, previous, best_metric_type):
            print("Validation metric improved "
                  "at the end of epoch {}".format(epoch))
            previous = current
            save_val_metrics(all_losses_and_metrics, results_dir, epoch,
                             global_step)
            out_path = os.path.join(checkpoints_dir, "best_val_checkpoint.pth")
            save_model_data(out_path, model, global_step)

        tb_log_metrics(val_writer, all_losses_and_metrics, global_step)

        # learning rate schedule step at the end of epoch
        if lr_scheduler_getter.step_type != "after_batch":
            if lr_scheduler_getter.step_type == "use_val":
                lr_scheduler.step(all_losses_and_metrics[loss_key])
            elif lr_scheduler_getter.step_type == "use_epoch":
                lr_scheduler.step(epoch)
            else:
                lr_scheduler.step()

        # plot distinct learning rates in order they appear in the optimizer
        lr_dict = OrderedDict()
        for param_group in optimizer.param_groups:
            lr = param_group.get("lr")
            lr_dict[lr] = None
        for idx, lr in enumerate(lr_dict):
            tb_log_metrics(val_writer, {"lr_{}".format(idx): lr}, global_step)
            tb_log_metrics(train_writer, {"lr_{}".format(idx): lr},
                           global_step)

    train_writer.close()
    val_writer.close()
# %%
CONFIG = "experiments/september02/random_split_new_data_less_albu_10_more/val/val.yaml"  # noqa

with open(CONFIG, "r") as f:
    config = yaml.load(f, Loader=yaml.FullLoader)

# %%
model_config = config["_MODEL_CONFIG"]
dataloader_config = config["_VAL_DATALOADER_CONFIG"]
losses_config = config["_LOSSES_METRICS_CONFIG"]
saved_checkpoint = config["_MODEL_CHECKPOINT"]
# override
dataloader_config["batchsize"] = 1

# %%
model = get_object_instance(model_config)()
load_model_data(saved_checkpoint, model, True)
dataloader = get_object_instance(dataloader_config)()
loss_metric = get_object_instance(losses_config)()

# %%
device = torch.device("cuda:0")
model = model.to(device)
model.eval()

# %%
averaged, all_losses_and_metrics = validate(dataloader,
                                            model,
                                            loss_metric,
                                            device,
                                            output_losses_list=True)