Exemple #1
0
    def test_gbp(self):
        model = medcam.inject(self.model, output_dir=os.path.join(self.current_path, 'results/resnet152/test_gbp'), backend='gbp',
                              evaluate=False, save_scores=False, save_maps=True, save_pickle=False)
        model.eval()
        data_loader = DataLoader(self.dataset, batch_size=1, shuffle=False)
        for i, batch in enumerate(data_loader):
            _ = model(batch[0][0])

        del model
        gc.collect()
        torch.cuda.empty_cache()

        if CLEAR and os.path.isdir(os.path.join(self.current_path, 'results/resnet152')):
            shutil.rmtree(os.path.join(self.current_path, 'results/resnet152'))
Exemple #2
0
    def test_gcampp(self):
        layer = 'full'
        metric = 'wioa'
        model = medcam.inject(self.model, output_dir=os.path.join(self.current_path, 'results/unet_seg/gcampp'), backend='gcampp', layer=layer,
                              evaluate=True, save_scores=False, save_maps=True, save_pickle=False, metric=metric, label=lambda x: 0.5 < x)
        model.eval()
        data_loader = DataLoader(self.dataset, batch_size=1, shuffle=False)
        model.test_run(next(iter(data_loader))["img"])

        for i, batch in enumerate(data_loader):
            _ = model(batch["img"], mask=batch["gt"])

        del model
        gc.collect()
        torch.cuda.empty_cache()

        if CLEAR and os.path.isdir(os.path.join(self.current_path, 'results/unet_seg')):
            shutil.rmtree(os.path.join(self.current_path, 'results/unet_seg'))
import torch
from numpy import nper
from medcam import medcam
from c3d import C3D
from custom_dataset import ucf_c3d
from torch.utils.data import Dataset, DataLoader
from vist_util import *
model = C3D(num_classes=101,pretrained=True)
loadfrom = 'c3d46perc.dict'
model.load_state_dict(torch.load(loadfrom))
print(model)
full_dataset = ucf_c3d('video_annotations_resnet','G:\\C3D Data', kernels=['original'])

dataloader = DataLoader(full_dataset, batch_size=1, shuffle=True, num_workers=0, pin_memory = True)

model = medcam.inject(model, output_dir="attention_maps", layer = 'conv5b', backend='gcam', return_attention = True, save_maps=True)

model.eval()
for batch in dataloader:
    video_matrix = batch['original']
    action_list = batch['action']
    score, attention = model(video_matrix)
    #print(score.shape)
    attention = torch.squeeze(attention).numpy()
    video_matrix = torch.squeeze(video_matrix).numpy().astype(np.int)#3,16,112,112
    #print(video_matrix)
    video_matrix = np.transpose(video_matrix, (1,2,3,0))
    #print(attention.shape)
    #print(video_matrix.shape)
    showbunch(attention)
    showbunch(video_matrix)
Exemple #4
0
def training_loop(
    training_data,
    validation_data,
    device,
    params,
    output_dir,
    testing_data=None,
    epochs=None,
):
    """
    The main training loop.

    Args:
        training_data (pandas.DataFrame): The data to use for training.
        validation_data (pandas.DataFrame): The data to use for validation.
        device (str): The device to perform computations on.
        params (dict): The parameters dictionary.
        output_dir (str): The output directory.
        testing_data (pandas.DataFrame): The data to use for testing.
        epochs (int): The number of epochs to train; if None, take from params.
    """
    # Some autodetermined factors
    if epochs is None:
        epochs = params["num_epochs"]
    params["device"] = device
    params["output_dir"] = output_dir
    params["training_data"] = training_data
    params["validation_data"] = validation_data
    params["testing_data"] = testing_data
    testingDataDefined = True
    if params["testing_data"] is None:
        # testing_data = validation_data
        testingDataDefined = False

    # Defining our model here according to parameters mentioned in the configuration file
    print("Number of channels : ", params["model"]["num_channels"])

    (
        model,
        optimizer,
        train_dataloader,
        val_dataloader,
        scheduler,
        params,
    ) = create_pytorch_objects(params, training_data, validation_data, device)

    if testingDataDefined:
        test_dataloader = get_testing_loader(params)

    # Start training time here
    start_time = time.time()

    if not (os.environ.get("HOSTNAME") is None):
        print("Hostname :", os.environ.get("HOSTNAME"))

    # datetime object containing current date and time
    print("Initializing training at :", get_date_time(), flush=True)

    # Setup a few loggers for tracking
    train_logger = Logger(
        logger_csv_filename=os.path.join(output_dir, "logs_training.csv"),
        metrics=params["metrics"],
    )
    valid_logger = Logger(
        logger_csv_filename=os.path.join(output_dir, "logs_validation.csv"),
        metrics=params["metrics"],
    )
    test_logger = Logger(
        logger_csv_filename=os.path.join(output_dir, "logs_testing.csv"),
        metrics=params["metrics"],
    )
    train_logger.write_header(mode="train")
    valid_logger.write_header(mode="valid")
    test_logger.write_header(mode="test")

    if "medcam" in params:
        model = medcam.inject(
            model,
            output_dir=os.path.join(
                output_dir, "attention_maps", params["medcam"]["backend"]
            ),
            backend=params["medcam"]["backend"],
            layer=params["medcam"]["layer"],
            save_maps=False,
            return_attention=True,
            enabled=False,
        )
        params["medcam_enabled"] = False

    # Setup a few variables for tracking
    best_loss = 1e7
    patience, start_epoch = 0, 0
    first_model_saved = False
    best_model_path = os.path.join(
        output_dir, params["model"]["architecture"] + best_model_path_end
    )

    # if previous model file is present, load it up
    if os.path.exists(best_model_path):
        try:
            main_dict = load_model(best_model_path, params["device"])
            version_check(params["version"], version_to_check=main_dict["version"])
            model.load_state_dict(main_dict["model_state_dict"])
            start_epoch = main_dict["epoch"]
            optimizer.load_state_dict(main_dict["optimizer_state_dict"])
            best_loss = main_dict["loss"]
            print("Previous model successfully loaded.")
        except RuntimeWarning:
            RuntimeWarning("Previous model could not be loaded, initializing model")

    print("Using device:", device, flush=True)

    # Iterate for number of epochs
    for epoch in range(start_epoch, epochs):

        if params["track_memory_usage"]:

            file_to_write_mem = os.path.join(output_dir, "memory_usage.csv")
            if os.path.exists(file_to_write_mem):
                # append to previously generated file
                file_mem = open(file_to_write_mem, "a")
                outputToWrite_mem = ""
            else:
                # if file was absent, write header information
                file_mem = open(file_to_write_mem, "w")
                outputToWrite_mem = "Epoch,Memory_Total,Memory_Available,Memory_Percent_Free,Memory_Usage,"  # used to write output
                if params["device"] == "cuda":
                    outputToWrite_mem += "CUDA_active.all.peak,CUDA_active.all.current,CUDA_active.all.allocated"
                outputToWrite_mem += "\n"

            mem = psutil.virtual_memory()
            outputToWrite_mem += (
                str(epoch)
                + ","
                + str(mem[0])
                + ","
                + str(mem[1])
                + ","
                + str(mem[2])
                + ","
                + str(mem[3])
            )
            if params["device"] == "cuda":
                mem_cuda = torch.cuda.memory_stats()
                outputToWrite_mem += (
                    ","
                    + str(mem_cuda["active.all.peak"])
                    + ","
                    + str(mem_cuda["active.all.current"])
                    + ","
                    + str(mem_cuda["active.all.allocated"])
                )
            outputToWrite_mem += ",\n"
            file_mem.write(outputToWrite_mem)
            file_mem.close()

        # Printing times
        epoch_start_time = time.time()
        print("*" * 20)
        print("*" * 20)
        print("Starting Epoch : ", epoch)
        if params["verbose"]:
            print("Epoch start time : ", get_date_time())

        params["current_epoch"] = epoch

        epoch_train_loss, epoch_train_metric = train_network(
            model, train_dataloader, optimizer, params
        )
        epoch_valid_loss, epoch_valid_metric = validate_network(
            model, val_dataloader, scheduler, params, epoch, mode="validation"
        )

        patience += 1

        # Write the losses to a logger
        train_logger.write(epoch, epoch_train_loss, epoch_train_metric)
        valid_logger.write(epoch, epoch_valid_loss, epoch_valid_metric)

        if testingDataDefined:
            epoch_test_loss, epoch_test_metric = validate_network(
                model, test_dataloader, scheduler, params, epoch, mode="testing"
            )
            test_logger.write(epoch, epoch_test_loss, epoch_test_metric)

        if params["verbose"]:
            print("Epoch end time : ", get_date_time())
        epoch_end_time = time.time()
        print(
            "Time taken for epoch : ",
            (epoch_end_time - epoch_start_time) / 60,
            " mins",
            flush=True,
        )

        # Start to check for loss
        if not (first_model_saved) or (epoch_valid_loss <= torch.tensor(best_loss)):
            best_loss = epoch_valid_loss
            best_train_idx = epoch
            patience = 0

            model.eval()
            save_model(
                {
                    "epoch": best_train_idx,
                    "model_state_dict": model.state_dict(),
                    "optimizer_state_dict": optimizer.state_dict(),
                    "loss": best_loss,
                },
                model,
                params,
                best_model_path,
                onnx_export=False,
            )
            model.train()
            first_model_saved = True

        if params["model"]["save_at_every_epoch"]:
            save_model(
                {
                    "epoch": epoch,
                    "model_state_dict": model.state_dict(),
                    "optimizer_state_dict": optimizer.state_dict(),
                    "loss": epoch_valid_loss,
                },
                model,
                params,
                os.path.join(
                    output_dir,
                    params["model"]["architecture"]
                    + "_epoch_"
                    + str(epoch)
                    + ".pth.tar",
                ),
                onnx_export=False,
            )
            model.train()

        print("Current Best epoch: ", best_train_idx)

        if patience > params["patience"]:
            print(
                "Performance Metric has not improved for %d epochs, exiting training loop!"
                % (patience),
                flush=True,
            )
            break

    # End train time
    end_time = time.time()

    print(
        "Total time to finish Training : ",
        (end_time - start_time) / 60,
        " mins",
        flush=True,
    )

    # once the training is done, optimize the best model
    if os.path.exists(best_model_path):
        onnx_export = True
        if params["model"]["architecture"] in ["sdnet", "brain_age"]:
            onnx_export = False
        elif (
            "onnx_export" in params["model"] and params["model"]["onnx_export"] == False
        ):
            onnx_export = False

        if onnx_export:
            print("Optimizing best model.")

            try:
                main_dict = load_model(best_model_path, params["device"])
                version_check(params["version"], version_to_check=main_dict["version"])
                model.load_state_dict(main_dict["model_state_dict"])
                best_epoch = main_dict["epoch"]
                optimizer.load_state_dict(main_dict["optimizer_state_dict"])
                best_loss = main_dict["loss"]
                save_model(
                    {
                        "epoch": best_epoch,
                        "model_state_dict": model.state_dict(),
                        "optimizer_state_dict": optimizer.state_dict(),
                        "loss": best_loss,
                    },
                    model,
                    params,
                    best_model_path,
                    onnx_export,
                )
            except Exception as e:
                print("Best model could not be loaded, error: ", e)