Пример #1
0
def print_clf_table(bin_labels: bool, metrics: list):
    config = get_config()

    if bin_labels:
        labels = ['Finding']
    else:
        labels = ['Lung Opacity', 'Pleural Effusion', 'Support Devices']

    params = Params()
    params.bin_labels = bin_labels

    dir_clf = Path(os.getcwd()) / f'data/clfs/{config["dir_clf"]}'

    clf_eval_results_path = dir_clf / f'clf_test_results{"_bin_label" if bin_labels else ""}.json'

    with open(clf_eval_results_path, 'r') as json_file:
        clf_eval_results = json.load(json_file)

    mods = {'PA': 'F', 'Lateral': 'L', 'text': 'T'}

    subsets = []
    for L in range(1, len(mods) + 1):
        for subset in combinations(mods, L):
            subsets.append(''.join(subset))

    df = pd.DataFrame(df_builder(clf_eval_results, metrics))
    df = df.reset_index(drop=True)
    df_tex = df.to_latex(index=False, escape=False)
    df_tex = df_tex.replace(r'\toprule', '')
    df_tex = df_tex.replace(r'\bottomrule', '')
    print(bold_max_value(df, df_tex))
Пример #2
0
def get_random_perf():
    config = get_config()
    dir_clf = Path(__file__).parent.parent / f'data/clfs/{config["dir_clf"]}'
    clf_result_path = f'{dir_clf}/clf_test_results_bin_label.json'
    with open(clf_result_path, 'r') as jsonfile:
        clf_results = json.load(jsonfile)
    return clf_results['random_perf'][config['eval_metric']][0]
Пример #3
0
def print_lr_table(bin_labels: bool, metrics):
    if bin_labels:
        labels = ['Finding']

    else:
        labels = ['Lung Opacity', 'Pleural Effusion', 'Support Devices']

    lr_eval_results_path = Path(os.getcwd(
    )) / f'data/lr_eval_results{"_bin_label" if bin_labels else ""}.json'

    with open(lr_eval_results_path, 'r') as json_file:
        lr_eval_results = json.load(json_file)

    mods = {'PA': 'F', 'Lateral': 'L', 'text': 'T'}

    subsets = []
    for L in range(1, len(mods) + 1):
        for subset in combinations(mods, L):
            subsets.append(''.join(subset))
    config = get_config()

    df = pd.DataFrame(df_builder(lr_eval_results, mods, metrics))

    # df.set_index(['MODEL', 'LABEL'], inplace=True)
    # df.sort_index(inplace=True)
    #
    # df_tex = df.to_latex(escape=False)

    df = df.reset_index(drop=True)
    df_tex = df.to_latex(index=False, escape=False)
    df_tex = df_tex.replace(r'\toprule', '')
    df_tex = df_tex.replace(r'\bottomrule', '')
    print(bold_max_value(df, df_tex))
Пример #4
0
def print_rand_perf():
    config = get_config()
    dir_clf = Path(os.getcwd()) / f'data/clfs/{config["dir_clf"]}'

    clf_eval_results_path = dir_clf / f'clf_test_results_bin_label.json'

    with open(clf_eval_results_path, 'r') as json_file:
        clf_eval_results = json.load(json_file)
    return round(clf_eval_results['random_perf']['mean_AP_Finding'][0], 3)
Пример #5
0
# HK, 18.01.21
import json
from pathlib import Path

import numpy as np
import pandas as pd
import os
from scripts.utils import bold_max_value, get_random_perf
from prepare.utils import get_config

label = 'Finding'

config = get_config()
gen_eval_results_path = Path(os.getcwd()) / 'data/gen_eval_results.json'

with open(gen_eval_results_path, 'r') as json_file:
    gen_eval_results = json.load(json_file)

mods_mapping = {'PA': 'F', 'Lateral': 'L', 'text': 'T'}


def df_builder(cond_values: dict):
    label_row = {
        'MODEL': 'MoPoE',
        'Metric': config['eval_metric'].replace('_', ' ')
    }
    # label_row = {'MODEL': 'MoPoE', 'LABEL': label, 'rand perf': get_random_perf()}
    for k, v in cond_values.items():
        label_row[mods_mapping[k]] = np.round(v, 3)
    yield label_row
Пример #6
0
def make_cond_gen_fig(nbr_samples=3):
    import mimic
    from mimic.utils import utils
    from mimic.utils.experiment import MimicExperiment
    from mimic.utils.filehandling import set_paths
    log.info(
        f'Starting generating cond gen fig with nbr_samples={nbr_samples}')
    config = get_config()

    # set seed
    SEED = config['seed']
    np.random.seed(SEED)
    torch.manual_seed(SEED)
    random.seed(SEED)

    # experiment_dir = config['experiment_dir_bin']
    experiment_dir = 'binary_labels-True_beta-0.01_weighted_sampler-False_class_dim-128_text_gen_lastlayer-softmax_2021_02_10_14_56_27_974859'
    experiment_path = Path(
        __file__).parent.parent / f'data/vae_model/{experiment_dir}'
    flags_path = experiment_path / 'flags.rar'
    FLAGS = torch.load(flags_path)
    FLAGS.save_figure = True
    FLAGS.dir_cond_gen = Path(__file__).parent.parent / 'data/cond_gen'
    # FLAGS.text_gen_lastlayer = 'softmax'

    FLAGS = set_paths(FLAGS)
    FLAGS.use_clf = False
    FLAGS.binary_labels = False
    state_dict_path = experiment_path / 'checkpoints/0149/mm_vae'

    mimic_experiment = MimicExperiment(flags=FLAGS)
    mimic_experiment.mm_vae.to(FLAGS.device)
    mimic_experiment.mm_vae.load_state_dict(
        state_dict=torch.load(state_dict_path))
    mimic_experiment.mm_vae.eval()

    mimic_experiment.modalities['text'].plot_img_size = torch.Size(
        [1, 256, 128])

    samples = mimic_experiment.test_samples
    model = mimic_experiment.mm_vae
    mods = mimic_experiment.modalities
    subsets = mimic_experiment.subsets

    if not Path(mimic_experiment.flags.dir_cond_gen).exists():
        Path(mimic_experiment.flags.dir_cond_gen).mkdir()

    def create_cond_gen_plot(in_mods='Lateral_PA'):
        subset = subsets[in_mods]
        plot = {
            **{f'in_{mod}': []
               for mod in mimic_experiment.modalities},
            **{f'out_{mod}': []
               for mod in mimic_experiment.modalities}
        }

        for idx in range(nbr_samples):
            sample = samples[idx]

            i_batch = {
                mod.name: sample[mod.name].unsqueeze(0)
                for mod in subset
            }
            latents = model.inference(i_batch, num_samples=1)
            c_in = latents['subsets'][in_mods]
            c_rep = utils.reparameterize(mu=c_in[0], logvar=c_in[1])
            cond_mod_in = {
                'content': c_rep,
                'style': {k: None
                          for k in mimic_experiment.modalities}
            }
            cond_gen_samples = model.generate_from_latents(cond_mod_in)
            for mod_key, mod in mods.items():
                plot[f'in_{mod_key}'].append(
                    mod.plot_data(mimic_experiment,
                                  sample[mod_key].squeeze(0)))
                plot[f'out_{mod_key}'].append(
                    mod.plot_data(mimic_experiment,
                                  cond_gen_samples[mod_key].squeeze(0)))

        rec = torch.Tensor()

        # first concatenate all input images, then all the output images
        for which, modalities in {'in': mods, 'out': mods}.items():
            for mod in modalities:
                for idx in range(nbr_samples):
                    if mod == 'text':
                        img = plot[f'{which}_{mod}'][idx].cpu().unsqueeze(0)
                    else:

                        img = plot[f'{which}_{mod}'][idx].cpu()
                        # pad the non text modalities such that they fit in a wider rectangle.
                        m = nn.ZeroPad2d((64, 64, 0, 0))
                        img = m(img.squeeze()).unsqueeze(0).unsqueeze(0)
                    rec = torch.cat((rec, img), 0)

        out_path = Path(
            mimic_experiment.flags.dir_cond_gen
        ) / f'{in_mods}{"_small" if nbr_samples < 5 else ""}.png'
        log.info(f'Saving image to {out_path}')

        _ = mimic.utils.plot.create_fig(out_path,
                                        img_data=rec,
                                        num_img_row=nbr_samples,
                                        save_figure=True)

    for in_mod in mimic_experiment.subsets:
        if in_mod:
            # for in_mod in ['Lateral_text']:
            create_cond_gen_plot(in_mod)
Пример #7
0
def get_model_flags():
    config = get_config()
    flags_path = Path(os.getcwd(
    )) / f'data/vae_model/{config["experiment_dir_bin"]}/flags.rar'
    return torch.load(flags_path)