Ejemplo n.º 1
0
def main():

    parser = cli.parse_command_line()
    args = parser.parse_args()

    if args.task == "train" and args.mode == "slice":
        args.mode_task = "cnn"

    commandline = parser.parse_known_args()

    arguments = vars(args)

    if (arguments['task'] != 'preprocessing') \
            and (arguments['task'] != 'extract') \
            and (arguments['task'] != 'generate') \
            and (arguments['task'] != 'tsvtool') \
            and (arguments['task'] != 'quality_check') \
            and (arguments['task'] != 'classify'):
        commandline_to_json(commandline)
        text_file = open(path.join(args.output_dir, 'environment.txt'), 'w')
        text_file.write('Version of python: %s \n' % sys.version)
        text_file.write('Version of pytorch: %s \n' % torch.__version__)
        text_file.close()

    if arguments['task'] in ['train', 'quality_check']:
        if not args.use_cpu and not torch.cuda.is_available():
            raise ValueError(
                "No GPU is available. Please add the -cpu flag to run on CPU.")

    args.func(args)
Ejemplo n.º 2
0
def main():

    parser = cli.parse_command_line()
    args = parser.parse_args()

    if args.task == "train" and args.mode == "slice":
        args.mode_task = "cnn"

    commandline = parser.parse_known_args()

    arguments = vars(args)

    if (arguments['task'] != 'preprocessing') \
            and (arguments['task'] != 'extract') \
            and (arguments['task'] != 'generate') \
            and (arguments['task'] != 'tsvtool') \
            and (arguments['task'] != 'quality_check') \
            and (arguments['task'] != 'classify'):
        commandline_to_json(commandline)
        write_requirements_version(args.output_dir)

    if hasattr(args, "use_cpu"):
        if not args.use_cpu and not torch.cuda.is_available():
            raise ValueError(
                "No GPU is available. Please add the -cpu flag to run on CPU.")

    args.func(args)
Ejemplo n.º 3
0
Archivo: main.py Proyecto: basrie/AD-DL
def main():

    parser = cli.parse_command_line()
    args = parser.parse_args()

    commandline = parser.parse_known_args()

    if args.train_autoencoder:
        model_type = 'autoencoder'
    else:
        model_type = 'cnn'

    commandline_to_json(commandline, model_type)

    args.func(args)
Ejemplo n.º 4
0
def main():

    parser = cli.parse_command_line()
    args = parser.parse_args()
    
    print(args)
    commandline = parser.parse_known_args()

    if hasattr(args, 'train_autoencoder'):
      model_type = 'autoencoder'
    else:
      model_type = 'cnn'

    arguments = vars(args)

    if arguments['task'] != 'preprocessing':
      commandline_to_json(commandline, model_type)

    args.func(args)
Ejemplo n.º 5
0
        optimizer = eval("torch.optim." + params.optimizer)(
            filter(lambda x: x.requires_grad, model.parameters()),
            lr=params.learning_rate,
            weight_decay=params.weight_decay)
        setattr(params, 'beginning_epoch', 0)

        # Define output directories
        log_dir = os.path.join(params.output_dir, 'fold-%i' % fi,
                               'tensorboard_logs')
        model_dir = os.path.join(params.output_dir, 'fold-%i' % fi, 'models')

        print('Beginning the training task')
        train(model, train_loader, valid_loader, criterion, optimizer, False,
              log_dir, model_dir, params)

        test_cnn(train_loader, "train", fi, criterion, options)
        test_cnn(valid_loader, "validation", fi, criterion, options)

    total_time = time() - total_time
    print("Total time of computation: %d s" % total_time)


if __name__ == "__main__":
    commandline = parser.parse_known_args()
    commandline_to_json(commandline)
    options = commandline[0]
    if commandline[1]:
        raise Exception("unknown arguments: %s" %
                        (parser.parse_known_args()[1]))
    train_CNN_bad_data_split(options)
Ejemplo n.º 6
0
                                valid_df,
                                metrics_valid,
                                fi,
                                dataset='validation',
                                selection=selection)

            soft_voting_to_tsvs(params.output_dir,
                                fi,
                                dataset='train',
                                selection=selection,
                                selection_threshold=params.selection_threshold)
            soft_voting_to_tsvs(params.output_dir,
                                fi,
                                dataset='validation',
                                selection=selection,
                                selection_threshold=params.selection_threshold)
            torch.cuda.empty_cache()

    total_time = time() - total_time
    print("Total time of computation: %d s" % total_time)


if __name__ == "__main__":
    commandline = parser.parse_known_args()
    commandline_to_json(commandline, "CNN")
    options = commandline[0]
    if commandline[1]:
        raise Exception("unknown arguments: %s" %
                        (parser.parse_known_args()[1]))
    train_CNN_bad_data_split(options)
Ejemplo n.º 7
0
def inference_from_model(caps_dir,
                         tsv_path,
                         model_path=None,
                         json_file=None,
                         prefix=None,
                         labels=True,
                         gpu=True,
                         num_workers=0,
                         batch_size=1,
                         prepare_dl=False,
                         selection_metrics=None,
                         diagnoses=None,
                         logger=None,
                         multi_cohort=False):
    """
    Inference from previously trained model.

    This functions uses a previously trained model to classify the input(s).
    The model is stored in the variable model_path and it assumes the folder
    structure given by the training stage. Particullary to have a prediction at
    image level, it assumes that results of the validation set are stored in
    the model_path folder in order to perform soft-voiting at the slice/patch
    level and also for multicnn.

    Args:
        caps_dir: folder containing the tensor files (.pt version of MRI)
        tsv_path: file with the name of the MRIs to process (single or multiple)
        model_path: file with the model (pth format).
        json_file: file containing the training parameters.
        prefix: prefix of all classification outputs.
        labels: by default is True. If False no metrics tsv files will be written.
        measurements.tsv
        gpu: if true, it uses gpu.
        num_workers: num_workers used in DataLoader
        batch_size: batch size of the DataLoader
        prepare_dl: if true, uses extracted patches/slices otherwise extract them
        on-the-fly.
        selection_metrics: list of metrics to find best models to be evaluated.
        diagnoses: list of diagnoses to be tested if tsv_path is a folder.
        logger: Logger instance.
        multi_cohort (bool): If True caps_directory is the path to a TSV file linking cohort names and paths.

    Returns:
        Files written in the output folder with prediction results and metrics. By
        default the output folder is named cnn_classification and it is inside the
        model_folder.

    Raises:


    """
    import argparse
    import logging

    if logger is None:
        logger = logging

    parser = argparse.ArgumentParser()
    parser.add_argument("model_path",
                        type=str,
                        help="Path to the trained model folder.")
    options = parser.parse_args([model_path])
    options = read_json(options, json_path=json_file)

    logger.debug("Load model with these options:")
    logger.debug(options)

    # Overwrite options with user input
    options.use_cpu = not gpu
    options.nproc = num_workers
    options.batch_size = batch_size
    if diagnoses is not None:
        options.diagnoses = diagnoses

    options = translate_parameters(options)

    if options.mode_task == "multicnn":
        num_cnn = compute_num_cnn(caps_dir, tsv_path, options, "test")
    else:
        num_cnn = None
    # Define the path
    currentDirectory = pathlib.Path(model_path)
    # Search for 'fold-*' pattern
    currentPattern = "fold-*"

    # loop depending the number of folds found in the model folder
    for fold_dir in currentDirectory.glob(currentPattern):
        fold = int(str(fold_dir).split("-")[-1])
        out_path = join(fold_dir, 'models')

        for selection_metric in selection_metrics:

            if options.mode_task == 'multicnn':
                for cnn_dir in listdir(out_path):
                    if not exists(
                            join(out_path, cnn_dir, "best_%s" %
                                 selection_metric, 'model_best.pth.tar')):
                        raise FileNotFoundError(
                            errno.ENOENT, strerror(errno.ENOENT),
                            join(out_path, cnn_dir,
                                 "best_%s" % selection_metric,
                                 'model_best.pth.tar'))

            else:
                full_model_path = join(out_path, "best_%s" % selection_metric)
                if not exists(join(full_model_path, 'model_best.pth.tar')):
                    raise FileNotFoundError(
                        errno.ENOENT, strerror(errno.ENOENT),
                        join(full_model_path, 'model_best.pth.tar'))

            performance_dir = join(fold_dir, 'cnn_classification',
                                   'best_%s' % selection_metric)

            makedirs(performance_dir, exist_ok=True)

            commandline_to_json(
                {
                    "output_dir": model_path,
                    "caps_dir": caps_dir,
                    "tsv_path": tsv_path,
                    "prefix": prefix,
                    "labels": labels
                },
                filename=f"commandline_classify-{prefix}")

            # It launch the corresponding function, depending on the mode.
            inference_from_model_generic(caps_dir,
                                         tsv_path,
                                         out_path,
                                         options,
                                         prefix,
                                         currentDirectory,
                                         fold,
                                         "best_%s" % selection_metric,
                                         labels=labels,
                                         num_cnn=num_cnn,
                                         logger=logger,
                                         multi_cohort=multi_cohort,
                                         prepare_dl=prepare_dl)

            # Soft voting
            if hasattr(options, 'selection_threshold'):
                selection_thresh = options.selection_threshold
            else:
                selection_thresh = 0.8

            # Write files at the image level (for patch, roi and slice).
            # It assumes the existance of validation files to perform soft-voting
            if options.mode in ["patch", "roi", "slice"]:
                soft_voting_to_tsvs(currentDirectory,
                                    fold,
                                    "best_%s" % selection_metric,
                                    options.mode,
                                    prefix,
                                    num_cnn=num_cnn,
                                    selection_threshold=selection_thresh,
                                    use_labels=labels,
                                    logger=logger)

            logger.info("Prediction results and metrics are written in the "
                        "following folder: %s" % performance_dir)