Example #1
0
def write_JSON_records(path_model, list_images, destination):
    """
    Keep a simple JSON record of where the model came from
    :param path_model:
    :return:
    """
    data = {}
    data["model"] = path_model
    data["images"] = list_images
    path_json = Path(destination) / (unique_name() + "_prediction_details.json")
    write_json(path_json, data)
    print("JSON record written for the prediction process.")
    def run(self, size_step=None, size_epoch=None):
        """
        Actually execute the pipeline if all stages are good.
        :param size_step:
        :param size_epoch:
        :return:
        """
        if self.model_stage != stage.DataLoaded:
            print("Stage: Data model has not been loaded yet")
            return None

        # Update parameter if never received them in the first place.
        if size_step is None:
            size_step = self.size_step
        if size_epoch is None:
            size_epoch = self.size_epoch

        # Write out the import paramemters used for the model BEFORE actual doing the training
        self.record_settings()

        # Set the proper call back to write functions and record results for tensorboard.
        self.set_callbacks()

        self.model.fit_generator(
            self.train_data,
            steps_per_epoch=size_step,
            epochs=size_epoch,
            validation_data=self.test_data,
            validation_steps=size_step,
            callbacks=self.callbacks_list,
        )

        # Timestamp and save the final model as well as its weights
        timestamp = unique_name()

        name_final_model = os.path.join(
            self.path_model, f"{timestamp}_FinalModel_{__name__}.h5")
        self.model.save(name_final_model)

        name_final_model_weights = os.path.join(
            self.path_model, f"{timestamp}_FinalModelWeights_{__name__}.h5")
        self.model.save_weights(name_final_model_weights)

        self.stage = stage.Ran
        return name_final_model, name_final_model_weights
Example #3
0
def unpack_subject_zip(
    zip_file: str, temp_folder: str = tempfile.gettempdir()
) -> tempfile.TemporaryDirectory:
    """
    Unpack the given Zip file to a temporary folder and return the reference to that temporary folder.
    :param zip_file:
    :param temp_folder: the system's temporary folder
    :return:
    """

    # Create the temporary directory at the location specified, auto erased later as system reclaim resource.
    folder = tempfile.TemporaryDirectory(prefix=unique_name(), dir=temp_folder)

    logger.debug("Subject ZIP temporary location created at:" + folder.name)
    logger.debug("Unzipping to that location")
    # Unzip into the temporary directory.
    orthanc_query.flatUnZip(zip_file, folder.name)

    logger.debug("Unzip completed. ")

    # Remove the zip file.
    os.remove(zip_file)
    logger.debug("Removing zip archived.")
    return folder
Example #4
0
from ast import literal_eval

from imageio import imsave
from pathlib import Path

path_current_file = Path(os.path.realpath(__file__))
path_module = path_current_file.parents[
    2]  # this file is located 3 folders deep: /, /model, /model/Kaggle_DCGAN_Dogs
print(f"{path_module}")
sys.path.append(f"{path_module}")

# Some convenience function I already prebuilt: github.com/dyt811/PythonUtils/
from PythonUtils.PUFile import unique_name
from PythonUtils.PUFolder import recursive_list, create

path_log_run = path_module / Path("logs") / unique_name()

create(path_log_run
       )  # create the training specific folder if it doesn't exist already.

# dimensions_noise = 100
"""
Largely inspired from source: https://github.com/DataSnaek/DCGAN-Keras
"""


class DCGAN:
    def __init__(self, path_discriminator, path_generator, path_output,
                 img_size):

        # Key image properties.
    def __init__(
        self,
        input_shape,
        output_classes,
        train_data_path: Path,
        optimizer="adam",
        loss="mse",
        metrics=None,
        checkpoint_metric="val_loss",
        checkpoint_metric_mode="min",
    ):
        # Use these settings per constructor input.
        self.input_shape = input_shape
        self.output_classes = output_classes
        self.loss = loss
        self.optimizer = optimizer

        # Default metrics
        if metrics is None:
            self.metrics = ([
                "mae",
                "mse",
                "mape",
                "cosine",
                loss_SSIM,
                loss_mae_diff_SSIM_composite,
                loss_mse_diff_SSIM_composite,
                f1_metric,
            ], )
        else:
            self.metrics = metrics

        self.checkpoint_metric = checkpoint_metric
        self.checkpoint_metric_mode = checkpoint_metric_mode

        self.train_data = None
        self.path_train_data: Path = train_data_path
        self.test_data = None  # fixme: independent data used for testing (optional?)
        self.callbacks_list = None

        self.model = None
        self.path_prediction = None

        # Default step and epoch size.   Easily overwritten during the run stage.
        self.size_step = 256
        self.size_epoch = 500

        # Dynamically generate model input_path.
        this_file = os.path.realpath(__file__)
        project_root = get_abspath(
            this_file, 2
        )  # todo: this folder path is hard coded. Needs to be generalized.

        # Log path.
        self.path_log, self.path_model = get_paths(project_root)

        # Log run path.
        self.path_log_run = os.path.join(self.path_log,
                                         unique_name() + __name__)

        # Create the Log run path.
        create(self.path_log_run)

        self.model_stage = stage.Initialized
    def set_callbacks(self):
        """
        Two important callbacks: 1) model check points 2) tensorboard update.
        :return:
        """
        # Model name.
        model_name = unique_name()

        checkpoint_last_best_model = os.path.join(
            self.path_model, f"{model_name}_LastBest_{__name__}.h5")
        checkpoint_last_model_weight = os.path.join(
            self.path_model, f"{model_name}_Weights_{__name__}.h5")
        checkpoint_best_loss_model_weight = os.path.join(
            self.path_model, f"{model_name}_LastBestLossWeights_{__name__}.h5")
        checkpoint_best_f1_model_weight = os.path.join(
            self.path_model, f"{model_name}_LastBestF1Weights_{__name__}.h5")

        # Checkpoint for saving the LAST BEST MODEL.
        callback_save_best_loss_model = ModelCheckpoint(
            checkpoint_last_best_model,
            monitor=self.checkpoint_metric,
            verbose=1,
            save_best_only=True,
            mode=self.checkpoint_metric_mode,
        )

        # Checkpoint for saving the LAST BEST MODEL WEIGHTS only without saving the full model.
        callback_save_best_loss_model_weights = ModelCheckpoint(
            checkpoint_best_loss_model_weight,
            monitor=self.checkpoint_metric,
            verbose=1,
            save_best_only=True,
            save_weights_only=True,
            mode=self.checkpoint_metric_mode,
        )

        # Checkpoint for saving the LAST BEST MODEL WEIGHTS only without saving the full model.
        callback_save_best_f1_model_weights = ModelCheckpoint(
            checkpoint_best_f1_model_weight,
            monitor="val_f1_metric",
            verbose=1,
            save_best_only=True,
            save_weights_only=True,
            mode="max",
        )

        # Checkpoint for saving the LATEST MODEL WEIGHT.
        callback_save_model_weights = ModelCheckpoint(
            checkpoint_last_model_weight, verbose=1, save_weights_only=True)

        # Checkpoint for updating the tensorboard
        callback_tensorboard = TensorBoard(log_dir=self.path_log_run,
                                           histogram_freq=0,
                                           write_images=True)

        self.callbacks_list = [
            callback_tensorboard,  # always update the tensorboard
            callback_save_model_weights,  # always save the latest model weights.
            callback_save_best_loss_model,  # always save best loss model
            callback_save_best_loss_model_weights,
            callback_save_best_f1_model_weights,
        ]
Example #7
0
            check_new_data = True
            sleep_until(timeobject(hour=19))

        except (NotImplementedError, ValueError):
            # except (ValueError, AssertionError, IOError, OSError, AssertionError, MachineError, ConnectionError):

            current_study_UID = current_import_process.orthanc_list_all_StudiesUIDs[
                current_import_process.orthanc_index_current_study]
            logger.critical(
                f"A critical error has been encountered which aborted the subject scan for {current_study_UID}"
            )

            from DICOMTransit.settings import config_get

            zip_path = config_get("ZipPath")
            name_log = os.path.join(
                zip_path, "StateMachineDump_" + unique_name() + ".pickle")
            with open(name_log, "wb") as f:
                # Pickle the 'data' dictionary using the highest protocol available.
                pickle.dump(current_import_process.machine, f,
                            pickle.HIGHEST_PROTOCOL)
            logger.warning(
                f"A finite state machine pickle dump has been made at {name_log}"
            )
            logger.warning("Check that path for more detail. ")
            current_import_process.critical_error = True
            current_import_process.trigger_wrap(
                TR_ProcessNextSubject
            )  # When ONE subject impede flow, go to the next one (without checking new data)!
            check_new_data = False