Esempio n. 1
0
def get_training_results():
    X, Y = m.get_dataset(m.dataset1_filepath)
    baseline = m.get_score(X, Y)
    # print train_one(X, Y, 16, 0.1)
    res = np.empty((len(error_rate), len(no_of_hidden_nodes)), dtype=float)

    for i in range(len(error_rate)):
        for j in range(len(no_of_hidden_nodes)):
            print "Training for %d hidden nodes %f corruption level" % (
                no_of_hidden_nodes[j], error_rate[i])
            ari = train_one(X, Y, no_of_hidden_nodes[j], error_rate[i])
            res[i][j] = ari

    plot_result(res, baseline)
Esempio n. 2
0
    def __init__(self, input_path, name):
        """
        Helper class for evaluating a model.

        :param input_path: The path which was the output path for the model. Should contain the 'config.json' file.
        :param name: The name of the experiment.
        """

        self.model = None
        self.name = name
        self.input_path = input_path
        self.output_path = input_path.parent.joinpath('eval')
        self.metrics_path = self.output_path.joinpath('metrics')
        self.output_path.mkdir(exist_ok=True, parents=True)
        self.metrics_path.mkdir(exist_ok=True, parents=True)

        self.config = self._config()
        self.checkpoints = self._checkpoints()
        self.database = self._load_database()
        self.dataset = get_dataset(database=self.database, config=self.config)
Esempio n. 3
0

if __name__ == '__main__':
    from torch.utils import data
    from main import get_argparser, get_dataset

    opts = get_argparser().parse_args()
    opts.path_txt = {
        'images_train': '/raid/dataset/cvpr/images_train.txt',
        'labels_train': '/raid/dataset/cvpr/labels_train.txt',
        'images_val': '/raid/dataset/cvpr/images_val.txt',
        'labels_val': '/raid/dataset/cvpr/labels_val.txt',
    }
    opts.num_classes = 18

    _, val_dst = get_dataset(opts)

    val_loader = data.DataLoader(val_dst,
                                 batch_size=opts.val_batch_size,
                                 shuffle=True,
                                 num_workers=0,
                                 drop_last=True)

    model = network.deeplabv3plus_resnet101(num_classes=opts.num_classes,
                                            output_stride=opts.output_stride)
    para_dict = torch.load(
        './checkpoints/latest_deeplabv3plus_resnet101_cvpr_os16.pth')
    model.load_state_dict(para_dict['model_state'])

    # net_str = str(net)
    # import pdb
Esempio n. 4
0
#!/usr/bin/env python
# coding: utf-8

# this script is used to test whether everything is ok
import cPickle
import os.path
from main import get_dataset
from similarities import ItemSimilarity
from models import ItemPreferenceDataModel
from metrics import manhattan_distances
#from recommenders import ItemCFRecommender as Recommender

data = get_dataset()
model = ItemPreferenceDataModel(data)
sim = ItemSimilarity(model, manhattan_distances)
dump_file = 'cache_' + sim.__class__.__name__ + '_' + \
                sim.metrix.__name__

def save_result(sim, filename):
    with open(filename, 'w') as f :
        cPickle.dump(sim.similarities, f)


def load_similarity(sim, filename):
    if os.path.exists(filename) :
        with open(filename, 'r') as f :
            sim.similarities = cPickle.load(f)
    else :
        sim.similarities = {}

load_similarity(sim, dump_file)
Esempio n. 5
0
def loop_main_and_plot(components, scoring, dataset, query_server=True,
                       force=False, sparsityThreshold=0.000005,
                       memory=Memory(cachedir='nilearn_cache'), **kwargs):
    """
    Loop main.py to plot summaries of WB vs hemi ICA components
    """
    out_dir = op.join('ica_imgs', dataset, 'analyses')

    # Get the data once.
    images, term_scores = get_dataset(dataset, max_images=200,  # for testing
                                      query_server=query_server)

    # Initialize master DFs
    (wb_master, R_master, L_master) = (pd.DataFrame() for i in range(3))

    for c in components:
        print("Running analysis with %d components" % c)
        (wb_summary, R_sparsity, L_sparsity) = load_or_generate_summary(
            images=images, term_scores=term_scores, n_components=c,
            scoring=scoring, dataset=dataset, force=force,
            sparsityThreshold=sparsityThreshold, memory=memory)
        # Append them to master DFs
        wb_master = wb_master.append(wb_summary)
        R_master = R_master.append(R_sparsity)
        L_master = L_master.append(L_sparsity)

        ### Generate component-specific plots ###
        # Save component-specific images in the component dir
        comp_outdir = op.join(out_dir, str(c))

        # 1) Relationship between positive and negative HPI in wb components
        out_path = op.join(comp_outdir, "1_PosNegHPI_%dcomponents.png" % c)

        hpi_signs = ['pos', 'neg', 'abs']
        # set color to be proportional to the symmetry in the sparsity (Pos-Neg/Abs),
        # and set size to be proportional to the total sparsity (Abs)
        color = (wb_summary['posTotal'] - wb_summary['negTotal']) / wb_summary['absTotal']
        size = wb_summary['absTotal'] / 20.0
        ax = wb_summary.plot.scatter(x='posHPI', y='negHPI', c=color, s=size,
                                     xlim=(-1.1, 1.1), ylim=(-1.1, 1.1),
                                     colormap='Reds', colorbar=True, figsize=(7, 6))
        title = ax.set_title("\n".join(wrap("The relationship between HPI on "
                                            "positive and negative side: "
                                            "n_components = %d" % c, 60)))
        ax.spines['right'].set_color('none')
        ax.spines['top'].set_color('none')
        ax.yaxis.set_ticks_position('left')
        ax.yaxis.set_label_coords(-0.1, 0.5)
        ax.spines['left'].set_position(('data', 0))
        ax.xaxis.set_ticks_position('bottom')
        ax.spines['bottom'].set_position(('data', 0))
        ticks = [-1.1, -1.0, -0.5, 0, 0.5, 1.0, 1.1]
        labels = ['L', '-1.0', '-0.5', '0', '0.5', '1.0', 'R']
        plt.setp(ax, xticks=ticks, xticklabels=labels, yticks=ticks, yticklabels=labels)
        f = plt.gcf()
        title.set_y(1.05)
        f.subplots_adjust(top=0.8)
        cax = f.get_axes()[1]
        cax.set_ylabel('Balance between pos/neg(anti-correlated network)',
                       rotation=270, labelpad=20)

        save_and_close(out_path)

        # 2) Relationship between HPI and SAS in wb components
        out_path = op.join(comp_outdir, "2_HPIvsSAS_%dcomponents.png" % c)

        fh, axes = plt.subplots(1, 3, sharey=True, figsize=(18, 6))
        fh.suptitle("The relationship between HPI values and SAS: "
                    "n_components = %d" % c, fontsize=16)
        hpi_sign_colors = {'pos': 'r', 'neg': 'b', 'abs': 'g'}
        for ax, sign in zip(axes, hpi_signs):
            ax.scatter(wb_summary['%sHPI' % sign], wb_summary['wb_SAS'],
                       c=hpi_sign_colors[sign], s=wb_summary['%sTotal' % sign] / 20.0)
            ax.set_xlabel("%s HPI" % sign)
            ax.set_xlim(-1.1, 1.1)
            ax.set_ylim(0, 1)
            ax.spines['right'].set_color('none')
            ax.spines['top'].set_color('none')
            ax.yaxis.set_ticks_position('left')
            ax.spines['left'].set_position(('data', 0))
            ax.xaxis.set_ticks_position('bottom')
            ax.spines['bottom'].set_position(('data', 0))
            plt.setp(ax, xticks=ticks, xticklabels=labels)
        fh.text(0.04, 0.5, "Spatial Asymmetry Score", va='center', rotation='vertical')

        save_and_close(out_path)

    ### Generate plots over a range of specified n_components ###
    # 1) HPI-for pos, neg, and abs in wb components
    out_path = op.join(out_dir, '1_wb_HPI.png')

    fh, axes = plt.subplots(1, 3, sharex=True, sharey=True, figsize=(18, 6))
    fh.suptitle("Hemispheric Participation Index for each component", fontsize=16)
    hpi_styles = {'pos': ['r', 'lightpink', 'above %d' % sparsityThreshold],
                  'neg': ['b', 'lightblue', 'below -%d' % sparsityThreshold],
                  'abs': ['g', 'lightgreen', 'with abs value above %d' % sparsityThreshold]}
    by_comp = wb_master.groupby("n_comp")
    for ax, sign in zip(axes, hpi_signs):
        mean, sd = by_comp.mean()["%sHPI" % sign], by_comp.std()["%sHPI" % sign]
        ax.fill_between(components, mean + sd, mean - sd, linewidth=0,
                        facecolor=hpi_styles[sign][1], alpha=0.5)
        size = wb_master['%sTotal' % (sign)] / 20.0
        ax.scatter(wb_master.n_comp, wb_master["%sHPI" % sign], label=sign,
                   c=hpi_styles[sign][0], s=size)
        ax.plot(components, mean, c=hpi_styles[sign][0])
        ax.set_xlim((0, components[-1] + 5))
        ax.set_ylim((-1, 1))
        ax.set_xticks(components)
        ax.set_ylabel("HPI((R-L)/(R+L) for # of voxels %s" % (hpi_styles[sign][2]))
    fh.text(0.5, 0.04, "# of components", ha="center")

    save_and_close(out_path, fh=fh)

    # 2) SAS for wb components
    fh, ax = plt.subplots(1, 1, figsize=(18, 6))
    fh.suptitle("Spatial Asymmetry Score for each component", fontsize=16)
    sas_mean, sas_sd = by_comp.mean()["wb_SAS"], by_comp.std()["wb_SAS"]
    ax.fill_between(components, sas_mean + sas_sd, sas_mean - sas_sd,
                    linewidth=0, facecolor='lightgrey', alpha=0.5)
    size = wb_master["absTotal"] / 20.0
    ax.scatter(wb_master.n_comp, wb_master["wb_SAS"], c='grey', s=size)
    ax.plot(components, sas_mean, c='grey')
    ax.set_xlim((0, components[-1] + 5))
    ax.set_ylim((-1, 1))
    ax.set_xticks(components)
    ax.set_ylabel("SAS (higher values indicate asymmetry)")

    out_path = op.join(out_dir, '2_wb_SAS.png')
    save_and_close(out_path, fh=fh)
Esempio n. 6
0
    import warnings
    from argparse import ArgumentParser

    # Look for image computation errors
    warnings.simplefilter('ignore', DeprecationWarning)
    warnings.simplefilter('error', RuntimeWarning)  # Detect bad NV images

    # Arg parsing
    parser = ArgumentParser(description="Really?")
    parser.add_argument('check', nargs='?', default='data',
                        choices=('data', 'metadata'))
    parser.add_argument('--offline', action='store_true', default=False)
    parser.add_argument('--dataset', nargs='?', default='neurovault',
                        choices=['neurovault', 'abide', 'nyu'])
    args = vars(parser.parse_args())

    # Alias args
    check = args.pop('check')
    query_server = not args.pop('offline')
    dataset = args.pop('dataset')
    if dataset == 'neurovault':
        args['fetch_terms'] = False
    images = get_dataset(query_server=query_server, dataset=dataset, **args)[0]

    if check == 'data':
        qc_image_data(images=images, dataset=dataset)
    else:
        qc_image_metadata(images=images)

    plt.show()
Esempio n. 7
0
def main_ic_loop(components, scoring,
                 dataset, query_server=True, force=False,
                 memory=Memory(cachedir='nilearn_cache'), **kwargs):
    # $FIX Test with just 'wb' and 'rl' matching until 'lr' matching is fixed
    # match_methods = ['wb', 'rl', 'lr']
    match_methods = ['wb', 'rl']
    out_dir = op.join('ica_imgs', dataset)
    mean_scores, unmatched = [], []

    # Get the data once.
    images, term_scores = get_dataset(
        dataset, query_server=query_server)

    for match_method in match_methods:
        print("Plotting results for %s matching method" % match_method)
        mean_score_d, num_unmatched_d = {}, {}
        for c in components:
            print("Running analysis with %d components" % c)
            # main analysis is run for each component and match method:
            # plotting for component comparisons are done only if force=True
            img_d, score_mats_d, sign_mats_d = do_main_analysis(
                    dataset=dataset, images=images, term_scores=term_scores,
                    key=match_method, force=force, plot=force,
                    n_components=c, scoring=scoring, **kwargs)

            # Get mean dissimilarity scores and number of unmatched for each comparisons
            # in score_mats_d
            for comp in score_mats_d:
                score_mat, sign_mat = score_mats_d[comp], sign_mats_d[comp]
                # For ("wb", "RL-forced") and ("wb", "RL-unforced")
                if "forced" in comp[1]:
                    if "-forced" in comp[1]:
                        match, unmatch = get_match_idx_pair(score_mat, sign_mat, force=True)
                    elif "-unforced" in comp[1]:
                        match, unmatch = get_match_idx_pair(score_mat, sign_mat, force=False)
                        n_unmatched = unmatch["idx"].shape[1] if unmatch["idx"] is not None else 0
                        um_label = "unmatched RL"
                    mean_score = score_mat[[match["idx"][0], match["idx"][1]]].mean()
                    score_label = "%s" % (" vs ".join(comp))
                    # Store values in respective dict
                    if c == components[0]:
                        mean_score_d[score_label] = [mean_score]
                        if "-unforced" in comp[1]:
                            num_unmatched_d[um_label] = [n_unmatched]
                    else:
                        mean_score_d[score_label].append(mean_score)
                        if "-unforced" in comp[1]:
                            num_unmatched_d[um_label].append(n_unmatched)

                # For ("wb", "R"), ("wb", "L") --wb matching or ("R", "L") --rl matching
                else:
                    for force_match in [True, False]:
                        match, unmatch = get_match_idx_pair(score_mat, sign_mat, force=force_match)
                        mean_score = score_mat[[match["idx"][0], match["idx"][1]]].mean()
                        if force_match:
                            score_label = "%s%s" % (" vs ".join(comp), "-forced")
                            n_unmatched = None
                        else:
                            score_label = "%s%s" % (" vs ".join(comp), "-unforced")
                            n_unmatched = unmatch["idx"].shape[1] if unmatch["idx"] is not None else 0
                            um_label = "unmatched %s" % comp[1]
                        # Store values in respective dict
                        if c == components[0]:
                            mean_score_d[score_label] = [mean_score]
                            if not force_match:
                                num_unmatched_d[um_label] = [n_unmatched]
                        else:
                            mean_score_d[score_label].append(mean_score)
                            if not force_match:
                                num_unmatched_d[um_label].append(n_unmatched)

        # Store vals as df
        ms_df = pd.DataFrame(mean_score_d, index=components)
        um_df = pd.DataFrame(num_unmatched_d, index=components)
        mean_scores.append(ms_df)
        unmatched.append(um_df)
        # Save combined df
        combined = pd.concat([ms_df, um_df], axis=1)
        out = op.join(out_dir, '%s-matching_simscores.csv' % match_method)
        combined.to_csv(out)

    # We have all the scores for the matching method; now plot.
    fh, axes = plt.subplots(1, len(match_methods), sharex=True, sharey=True, figsize=(18, 6))
    fh.suptitle("Average dissimilarity scores for the best-match pairs", fontsize=16)
    labels = ["wb vs R-unforced", "wb vs L-unforced", "R vs L-unforced", "wb vs RL-unforced",
              "wb vs R-forced", "wb vs L-forced", "R vs L-forced", "wb vs RL-forced",
              "unmatched R", "unmatched L", "unmatched RL"]
    styles = ["r-", "b-", "m-", "g-",
              "r:", "b:", "m:", "g:",
              "r--", "b--", "m--"]

    for i, ax in enumerate(axes):
        ax2 = ax.twinx()
        ms_df, um_df = mean_scores[i], unmatched[i]
        for label, style in zip(labels, styles):
            if label in ms_df.columns:
                ms_df[label].plot(ax=ax, style=style)
            elif label in um_df.columns:
                um_df[label].plot(ax=ax2, style=style)
        ax.set_title("%s-matching" % (match_methods[i]))
        # Shrink current axis by 30%
        box = ax.get_position()
        ax.set_position([box.x0, box.y0, box.width * 0.75, box.height])
        ax2.set_position([box.x0, box.y0, box.width * 0.75, box.height])
        # Put the legends to the right of the current axis
        ax.legend(loc='lower left', bbox_to_anchor=(1.3, 0.5))
        ax2.legend(loc='upper left', bbox_to_anchor=(1.3, 0.5))
    fh.text(0.5, 0.04, "# of components", ha="center")
    fh.text(0.05, 0.5, "mean %s scores" % scoring, va='center', rotation='vertical')
    fh.text(0.95, 0.5, "# of unmatched R- or L- components", va='center', rotation=-90)

    out_path = op.join(out_dir, '%s_simscores.png' % scoring)
    save_and_close(out_path, fh=fh)