Ejemplo n.º 1
0
def test_cnn(data_loader, subset_name, split, criterion, options):

    for selection in ["best_acc", "best_loss"]:
        # load the best trained model during the training
        model = init_model(options.model,
                           gpu=options.gpu,
                           dropout=options.dropout)
        model, best_epoch = load_model(model,
                                       os.path.join(options.output_dir,
                                                    'best_model_dir',
                                                    "fold_%i" % split, 'CNN',
                                                    selection),
                                       gpu=options.gpu,
                                       filename='model_best.pth.tar')

        results_df, metrics = test(model, data_loader, options.gpu, criterion,
                                   options.mode)
        print("Slice level balanced accuracy is %f" %
              metrics['balanced_accuracy'])

        mode_level_to_tsvs(options.output_dir,
                           results_df,
                           metrics,
                           split,
                           selection,
                           options.mode,
                           dataset=subset_name)
Ejemplo n.º 2
0
def test_cnn(output_dir,
             data_loader,
             subset_name,
             split,
             criterion,
             cnn_index,
             model_options,
             gpu=False):
    for selection in ["best_balanced_accuracy", "best_loss"]:
        # load the best trained model during the training
        model = create_model(model_options.model,
                             gpu,
                             dropout=model_options.dropout)
        model, best_epoch = load_model(model,
                                       os.path.join(output_dir,
                                                    'fold-%i' % split,
                                                    'models',
                                                    'cnn-%i' % cnn_index,
                                                    selection),
                                       gpu=gpu,
                                       filename='model_best.pth.tar')

        results_df, metrics = test(model, data_loader, gpu, criterion,
                                   model_options.mode)
        print("%s level balanced accuracy is %f" %
              (model_options.mode, metrics['balanced_accuracy']))

        mode_level_to_tsvs(output_dir,
                           results_df,
                           metrics,
                           split,
                           selection,
                           model_options.mode,
                           dataset=subset_name,
                           cnn_index=cnn_index)
Ejemplo n.º 3
0
def group_backprop(options):

    main_logger = return_logger(options.verbose, "main process")
    options = translate_parameters(options)

    fold_list = [
        fold for fold in os.listdir(options.model_path) if fold[:5:] == "fold-"
    ]
    if len(fold_list) == 0:
        raise ValueError("No folds were found at path %s" % options.model_path)

    for fold in fold_list:
        main_logger.info(fold)
        for selection in options.selection:
            results_path = path.join(options.model_path, fold, 'gradients',
                                     selection, options.name)

            model_options = argparse.Namespace()
            model_options = read_json(
                model_options, path.join(options.model_path,
                                         'commandline.json'))
            model_options = translate_parameters(model_options)
            model_options.gpu = options.gpu

            if options.tsv_path is None:
                options.tsv_path = model_options.tsv_path
            if options.input_dir is None:
                options.input_dir = model_options.input_dir
            if options.target_diagnosis is None:
                options.target_diagnosis = options.diagnosis

            criterion = get_criterion(model_options.loss)

            # Data management (remove data not well predicted by the CNN)
            training_df = load_data_test(options.tsv_path, [options.diagnosis],
                                         baseline=options.baseline)
            training_df.reset_index(drop=True, inplace=True)

            # Model creation
            _, all_transforms = get_transforms(
                model_options.mode,
                minmaxnormalization=model_options.minmaxnormalization)
            data_example = return_dataset(model_options.mode,
                                          options.input_dir,
                                          training_df,
                                          model_options.preprocessing,
                                          train_transformations=None,
                                          all_transformations=all_transforms,
                                          params=options)

            model = create_model(model_options, data_example.size)
            model_dir = os.path.join(options.model_path, fold, 'models',
                                     selection)
            model, best_epoch = load_model(model,
                                           model_dir,
                                           gpu=options.gpu,
                                           filename='model_best.pth.tar')
            options.output_dir = results_path
            commandline_to_json(options, logger=main_logger)

            # Keep only subjects who were correctly / wrongly predicted by the network
            training_df = sort_predicted(model,
                                         training_df,
                                         options.input_dir,
                                         model_options,
                                         criterion,
                                         options.keep_true,
                                         batch_size=options.batch_size,
                                         num_workers=options.num_workers,
                                         gpu=options.gpu)

            if len(training_df) > 0:

                # Save the tsv files used for the saliency maps
                training_df.to_csv(path.join('data.tsv'),
                                   sep='\t',
                                   index=False)

                data_train = return_dataset(model_options.mode,
                                            options.input_dir,
                                            training_df,
                                            model_options.preprocessing,
                                            train_transformations=None,
                                            all_transformations=all_transforms,
                                            params=options)

                train_loader = DataLoader(data_train,
                                          batch_size=options.batch_size,
                                          shuffle=True,
                                          num_workers=options.num_workers,
                                          pin_memory=True)

                interpreter = VanillaBackProp(model, gpu=options.gpu)

                cum_map = 0
                for data in train_loader:
                    if options.gpu:
                        input_batch = data['image'].cuda()
                    else:
                        input_batch = data['image']

                    maps = interpreter.generate_gradients(
                        input_batch,
                        data_train.diagnosis_code[options.target_diagnosis])
                    cum_map += maps.sum(axis=0)

                mean_map = cum_map / len(data_train)

                if len(data_train.size) == 4:
                    if options.nifti_template_path is not None:
                        image_nii = nib.load(options.nifti_template_path)
                        affine = image_nii.affine
                    else:
                        affine = np.eye(4)

                    mean_map_nii = nib.Nifti1Image(mean_map[0], affine)
                    nib.save(mean_map_nii, path.join(results_path,
                                                     "map.nii.gz"))
                    np.save(path.join(results_path, "map.npy"), mean_map[0])
                else:
                    jpg_path = path.join(results_path, "map.jpg")
                    plt.imshow(mean_map[0],
                               cmap="coolwarm",
                               vmin=-options.vmax,
                               vmax=options.vmax)
                    plt.colorbar()
                    plt.savefig(jpg_path)
                    plt.close()
                    numpy_path = path.join(results_path, "map.npy")
                    np.save(numpy_path, mean_map[0])
            else:
                main_logger.warn("There are no subjects for the given options")
Ejemplo n.º 4
0
def test_cnn(output_dir, data_loader, subset_name, split, criterion, cnn_index, model_options, gpu=False, train_begin_time=None):
    metric_dict = {}
    for selection in ["best_balanced_accuracy", "best_loss"]:
        # load the best trained model during the training
        if model_options.model == 'UNet3D':
            print('********** init UNet3D model for test! **********')
            model = create_model(model_options.model, gpu=model_options.gpu, dropout=model_options.dropout, device_index=model_options.device, in_channels=model_options.in_channels,
                 out_channels=model_options.out_channels, f_maps=model_options.f_maps, layer_order=model_options.layer_order, num_groups=model_options.num_groups, num_levels=model_options.num_levels)
        elif model_options.model == 'ResidualUNet3D':
            print('********** init ResidualUNet3D model for test! **********')
            model = create_model(model_options.model, gpu=model_options.gpu, dropout=model_options.dropout, device_index=model_options.device, in_channels=model_options.in_channels,
                    out_channels=model_options.out_channels, f_maps=model_options.f_maps, layer_order=model_options.layer_order, num_groups=model_options.num_groups, num_levels=model_options.num_levels)
        elif model_options.model == 'UNet3D_add_more_fc':
            print('********** init UNet3D_add_more_fc model for test! **********')
            model = create_model(model_options.model, gpu=model_options.gpu, dropout=model_options.dropout, device_index=model_options.device, in_channels=model_options.in_channels,
                 out_channels=model_options.out_channels, f_maps=model_options.f_maps, layer_order=model_options.layer_order, num_groups=model_options.num_groups, num_levels=model_options.num_levels)
        elif model_options.model == 'ResidualUNet3D_add_more_fc':
            print('********** init ResidualUNet3D_add_more_fc model for test! **********')
            model = create_model(model_options.model, gpu=model_options.gpu, dropout=model_options.dropout, device_index=model_options.device, in_channels=model_options.in_channels,
                    out_channels=model_options.out_channels, f_maps=model_options.f_maps, layer_order=model_options.layer_order, num_groups=model_options.num_groups, num_levels=model_options.num_levels)
        elif model_options.model == 'VoxCNN':
            print('********** init VoxCNN model for test! **********')
            model = create_model(model_options.model, gpu=model_options.gpu, device_index=model_options.device)
        elif model_options.model == 'ConvNet3D':
            print('********** init ConvNet3D model for test! **********')
            model = create_model(model_options.model, gpu=model_options.gpu, device_index=model_options.device)
        elif 'gcn' in model_options.model:
            print('********** init {}-{} model for test! **********'.format(model_options.model, model_options.gnn_type))
            model = create_model(model_options.model, gpu=model_options.gpu, device_index=model_options.device, gnn_type=model_options.gnn_type,
                             gnn_dropout=model_options.gnn_dropout, 
                             gnn_dropout_adj=model_options.gnn_dropout_adj,
                             gnn_non_linear=model_options.gnn_non_linear, 
                             gnn_undirected=model_options.gnn_undirected, 
                             gnn_self_loop=model_options.gnn_self_loop,
                             gnn_threshold = model_options.gnn_threshold,)
        elif model_options.model == 'ROI_GCN':
            print('********** init ROI_GCN model for test! **********')
            model = create_model(model_options.model, gpu=model_options.gpu, device_index=model_options.device,
                                    gnn_type=model_options.gnn_type,
                                    gnn_dropout=model_options.gnn_dropout, 
                                    gnn_dropout_adj=model_options.gnn_dropout_adj,
                                    gnn_non_linear=model_options.gnn_non_linear, 
                                    gnn_undirected=model_options.gnn_undirected, 
                                    gnn_self_loop=model_options.gnn_self_loop,
                                    gnn_threshold = model_options.gnn_threshold,
                                    nodel_vetor_layer=model_options.nodel_vetor_layer,
                                    classify_layer=model_options.classify_layer,
                                    num_node_features=model_options.num_node_features, num_class=model_options.num_class,
                                    roi_size=model_options.roi_size, num_nodes=model_options.num_nodes,
                                    gnn_pooling_layers=model_options.gnn_pooling_layers, global_sort_pool_k=model_options.global_sort_pool_k,
                                    layers=model_options.layers,
                                    shortcut_type=model_options.shortcut_type, use_nl=model_options.use_nl,
                                    dropout=model_options.dropout,
                                    device=model_options.device)
        elif model_options.model == 'SwinTransformer3d':
            print('********** init SwinTransformer3d model for test! **********')
            model = create_model(model_options.model, gpu=model_options.gpu, dropout=model_options.dropout,
                            device_index=model_options.device, 
                            sw_patch_size=model_options.sw_patch_size, 
                            window_size = model_options.window_size,
                            mlp_ratio = model_options.mlp_ratio,
                            drop_rate = model_options.drop_rate,
                            attn_drop_rate = model_options.attn_drop_rate,
                            drop_path_rate = model_options.drop_path_rate,
                            qk_scale = model_options.qk_scale,
                            embed_dim = model_options.embed_dim,
                            depths = model_options.depths,
                            num_heads = model_options.num_heads,
                            qkv_bias = model_options.qkv_bias,
                            ape = model_options.ape,
                            patch_norm = model_options.patch_norm,
                            )
        else:
            model = create_model(model_options.model, gpu=model_options.gpu, dropout=model_options.dropout, device_index=model_options.device)
        model, best_epoch = load_model(model, os.path.join(output_dir, 'fold-%i' % split, 'models',
                                                           'cnn-%i' % cnn_index, selection),
                                       gpu=gpu, filename='model_best.pth.tar', device_index=model_options.device)

        results_df, metrics = test(model, data_loader, gpu, criterion, model_options.mode, device_index=model_options.device, train_begin_time=train_begin_time)
        print("[%s]: %s level balanced accuracy is %f" % (timeSince(train_begin_time), model_options.mode, metrics['balanced_accuracy']))
        print('[{}]: {}_{}_result_df:'.format(timeSince(train_begin_time), subset_name, selection))
        print(results_df)
        print('[{}]: {}_{}_metrics:\n{}'.format(timeSince(train_begin_time), subset_name, selection, metrics))
        wandb.log({'{}_accuracy_{}_singel_model'.format(subset_name, selection): metrics['accuracy'],
                   '{}_balanced_accuracy_{}_singel_model'.format(subset_name, selection): metrics['balanced_accuracy'],
                   '{}_sensitivity_{}_singel_model'.format(subset_name, selection): metrics['sensitivity'],
                   '{}_specificity_{}_singel_model'.format(subset_name, selection): metrics['specificity'],
                   '{}_ppv_{}_singel_model'.format(subset_name, selection): metrics['ppv'],
                   '{}_npv_{}_singel_model'.format(subset_name, selection): metrics['npv'],
                   '{}_total_loss_{}_singel_model'.format(subset_name, selection): metrics['total_loss'],
                   })

        mode_level_to_tsvs(output_dir, results_df, metrics, split, selection, model_options.mode,
                           dataset=subset_name, cnn_index=cnn_index)
        # return metric dict
        metric_temp_dict = {'{}_accuracy_{}_singel_model'.format(subset_name, selection): metrics['accuracy'],
                   '{}_balanced_accuracy_{}_singel_model'.format(subset_name, selection): metrics['balanced_accuracy'],
                   '{}_sensitivity_{}_singel_model'.format(subset_name, selection): metrics['sensitivity'],
                   '{}_specificity_{}_singel_model'.format(subset_name, selection): metrics['specificity'],
                   '{}_ppv_{}_singel_model'.format(subset_name, selection): metrics['ppv'],
                   '{}_npv_{}_singel_model'.format(subset_name, selection): metrics['npv'],
                   '{}_total_loss_{}_singel_model'.format(subset_name, selection): metrics['total_loss'],
                   }
        metric_dict.update(metric_temp_dict)
    return metric_dict
Ejemplo n.º 5
0
def main(options):
    # Initialize the model
    model = create_model(options.network, options.gpu)
    transformations = transforms.Compose([MinMaxNormalization()])

    # Define loss and optimizer
    loss = torch.nn.CrossEntropyLoss()

    if options.split is None:
        fold_iterator = range(options.n_splits)
    else:
        fold_iterator = [options.split]

    # Loop on folds
    for fi in fold_iterator:
        print("Fold %i" % fi)

        if options.dataset == 'validation':
            _, test_df = load_data(options.diagnosis_tsv_path,
                                   options.diagnoses,
                                   fi,
                                   n_splits=options.n_splits,
                                   baseline=True)
        else:
            test_df = load_data_test(options.diagnosis_tsv_path,
                                     options.diagnoses)

        for n in range(options.num_cnn):

            dataset = MRIDataset_patch(options.caps_directory,
                                       test_df,
                                       options.patch_size,
                                       options.patch_stride,
                                       transformations=transformations,
                                       patch_index=n,
                                       prepare_dl=options.prepare_dl)

            test_loader = DataLoader(dataset,
                                     batch_size=options.batch_size,
                                     shuffle=False,
                                     num_workers=options.num_workers,
                                     pin_memory=True)

            # load the best trained model during the training
            model, best_epoch = load_model(
                model,
                os.path.join(options.output_dir, 'best_model_dir',
                             "fold_%i" % fi, 'cnn-%i' % n, options.selection),
                options.gpu,
                filename='model_best.pth.tar')

            results_df, metrics = test(model, test_loader, options.gpu, loss)
            print("Patch level balanced accuracy is %f" %
                  metrics['balanced_accuracy'])

            # write the test results into the tsv files
            patch_level_to_tsvs(options.output_dir,
                                results_df,
                                metrics,
                                fi,
                                options.selection,
                                dataset=options.dataset,
                                cnn_index=n)

        print("Selection threshold: ", options.selection_threshold)
        soft_voting_to_tsvs(options.output_dir,
                            fi,
                            options.selection,
                            dataset=options.dataset,
                            num_cnn=options.num_cnn,
                            selection_threshold=options.selection_threshold)
Ejemplo n.º 6
0
def individual_backprop(options):

    main_logger = return_logger(options.verbose, "main process")
    options = translate_parameters(options)

    fold_list = [
        fold for fold in os.listdir(options.model_path) if fold[:5:] == "fold-"
    ]
    if len(fold_list) == 0:
        raise ValueError("No folds were found at path %s" % options.model_path)

    model_options = argparse.Namespace()
    model_options = read_json(
        model_options, path.join(options.model_path, 'commandline.json'))
    model_options = translate_parameters(model_options)
    model_options.gpu = options.gpu

    if model_options.network_type == "multicnn":
        raise NotImplementedError(
            "The interpretation of multi-CNN is not implemented.")

    if options.tsv_path is None and options.input_dir is None:
        options.multi_cohort = model_options.multi_cohort
    if options.tsv_path is None:
        options.tsv_path = model_options.tsv_path
    if options.input_dir is None:
        options.input_dir = model_options.input_dir
    if options.target_diagnosis is None:
        options.target_diagnosis = options.diagnosis

    for fold in fold_list:
        main_logger.info(fold)
        for selection in options.selection:
            results_path = path.join(options.model_path, fold, 'gradients',
                                     selection, options.name)

            criterion = get_criterion(model_options.loss)

            # Data management (remove data not well predicted by the CNN)
            training_df = load_data_test(options.tsv_path, [options.diagnosis],
                                         baseline=options.baseline,
                                         multi_cohort=options.multi_cohort)
            training_df.reset_index(drop=True, inplace=True)

            # Model creation
            _, all_transforms = get_transforms(
                model_options.mode,
                minmaxnormalization=model_options.minmaxnormalization)
            with warnings.catch_warnings():
                warnings.simplefilter("ignore")
                data_example = return_dataset(
                    model_options.mode,
                    options.input_dir,
                    training_df,
                    model_options.preprocessing,
                    train_transformations=None,
                    all_transformations=all_transforms,
                    prepare_dl=options.prepare_dl,
                    multi_cohort=options.multi_cohort,
                    params=model_options)

            model = create_model(model_options, data_example.size)
            model_dir = os.path.join(options.model_path, fold, 'models',
                                     selection)
            model, best_epoch = load_model(model,
                                           model_dir,
                                           gpu=options.gpu,
                                           filename='model_best.pth.tar')
            options.output_dir = results_path
            commandline_to_json(options, logger=main_logger)

            # Keep only subjects who were correctly / wrongly predicted by the network
            training_df = sort_predicted(model,
                                         training_df,
                                         options.input_dir,
                                         model_options,
                                         criterion,
                                         options.keep_true,
                                         batch_size=options.batch_size,
                                         num_workers=options.num_workers,
                                         gpu=options.gpu)

            if len(training_df) > 0:

                # Save the tsv files used for the saliency maps
                training_df.to_csv(path.join('data.tsv'),
                                   sep='\t',
                                   index=False)

                with warnings.catch_warnings():
                    warnings.simplefilter("ignore")
                    data_train = return_dataset(
                        model_options.mode,
                        options.input_dir,
                        training_df,
                        model_options.preprocessing,
                        train_transformations=None,
                        all_transformations=all_transforms,
                        prepare_dl=options.prepare_dl,
                        multi_cohort=options.multi_cohort,
                        params=model_options)

                train_loader = DataLoader(data_train,
                                          batch_size=options.batch_size,
                                          shuffle=True,
                                          num_workers=options.num_workers,
                                          pin_memory=True)

                interpreter = VanillaBackProp(model, gpu=options.gpu)

                for data in train_loader:
                    if options.gpu:
                        input_batch = data['image'].cuda()
                    else:
                        input_batch = data['image']

                    map_np = interpreter.generate_gradients(
                        input_batch,
                        data_train.diagnosis_code[options.target_diagnosis])
                    for i in range(options.batch_size):
                        single_path = path.join(results_path,
                                                data['participant_id'][i],
                                                data['session_id'][i])
                        os.makedirs(single_path, exist_ok=True)

                        if len(data_train.size) == 4:
                            if options.nifti_template_path is not None:
                                image_nii = nib.load(
                                    options.nifti_template_path)
                                affine = image_nii.affine
                            else:
                                affine = np.eye(4)

                            map_nii = nib.Nifti1Image(map_np[i, 0, :, :, :],
                                                      affine)
                            nib.save(map_nii,
                                     path.join(single_path, "map.nii.gz"))
                        else:
                            jpg_path = path.join(single_path, "map.jpg")
                            plt.imshow(map_np[i, 0, :, :],
                                       cmap="coolwarm",
                                       vmin=-options.vmax,
                                       vmax=options.vmax)
                            plt.colorbar()
                            plt.savefig(jpg_path)
                            plt.close()
                        np.save(path.join(single_path, "map.npy"), map_np[i])